~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/microblaze/kernel/hw_exception_handler.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Exception handling for Microblaze
  4  *
  5  * Rewriten interrupt handling
  6  *
  7  * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  8  * Copyright (C) 2008-2009 PetaLogix
  9  *
 10  * uClinux customisation (C) 2005 John Williams
 11  *
 12  * MMU code derived from arch/ppc/kernel/head_4xx.S:
 13  *      Copyright (C) 1995-1996 Gary Thomas <gdt@linuxppc.org>
 14  *              Initial PowerPC version.
 15  *      Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
 16  *              Rewritten for PReP
 17  *      Copyright (C) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
 18  *              Low-level exception handers, MMU support, and rewrite.
 19  *      Copyright (C) 1997 Dan Malek <dmalek@jlc.net>
 20  *              PowerPC 8xx modifications.
 21  *      Copyright (C) 1998-1999 TiVo, Inc.
 22  *              PowerPC 403GCX modifications.
 23  *      Copyright (C) 1999 Grant Erickson <grant@lcse.umn.edu>
 24  *              PowerPC 403GCX/405GP modifications.
 25  *      Copyright 2000 MontaVista Software Inc.
 26  *              PPC405 modifications
 27  *      PowerPC 403GCX/405GP modifications.
 28  *              Author: MontaVista Software, Inc.
 29  *              frank_rowand@mvista.com or source@mvista.com
 30  *              debbie_chu@mvista.com
 31  *
 32  * Original code
 33  * Copyright (C) 2004 Xilinx, Inc.
 34  */
 35 
 36 /*
 37  * Here are the handlers which don't require enabling translation
 38  * and calling other kernel code thus we can keep their design very simple
 39  * and do all processing in real mode. All what they need is a valid current
 40  * (that is an issue for the CONFIG_REGISTER_TASK_PTR case)
 41  * This handlers use r3,r4,r5,r6 and optionally r[current] to work therefore
 42  * these registers are saved/restored
 43  * The handlers which require translation are in entry.S --KAA
 44  *
 45  * Microblaze HW Exception Handler
 46  * - Non self-modifying exception handler for the following exception conditions
 47  *   - Unalignment
 48  *   - Instruction bus error
 49  *   - Data bus error
 50  *   - Illegal instruction opcode
 51  *   - Divide-by-zero
 52  *
 53  *   - Privileged instruction exception (MMU)
 54  *   - Data storage exception (MMU)
 55  *   - Instruction storage exception (MMU)
 56  *   - Data TLB miss exception (MMU)
 57  *   - Instruction TLB miss exception (MMU)
 58  *
 59  * Note we disable interrupts during exception handling, otherwise we will
 60  * possibly get multiple re-entrancy if interrupt handles themselves cause
 61  * exceptions. JW
 62  */
 63 
 64 #include <asm/exceptions.h>
 65 #include <asm/unistd.h>
 66 #include <asm/page.h>
 67 
 68 #include <asm/entry.h>
 69 #include <asm/current.h>
 70 #include <linux/linkage.h>
 71 #include <linux/pgtable.h>
 72 
 73 #include <asm/mmu.h>
 74 #include <asm/signal.h>
 75 #include <asm/registers.h>
 76 #include <asm/asm-offsets.h>
 77 
 78 #undef DEBUG
 79 
 80 /* Helpful Macros */
 81 #define NUM_TO_REG(num)         r ## num
 82 
 83         #define RESTORE_STATE                   \
 84                 lwi     r5, r1, 0;              \
 85                 mts     rmsr, r5;               \
 86                 nop;                            \
 87                 lwi     r3, r1, PT_R3;          \
 88                 lwi     r4, r1, PT_R4;          \
 89                 lwi     r5, r1, PT_R5;          \
 90                 lwi     r6, r1, PT_R6;          \
 91                 lwi     r11, r1, PT_R11;        \
 92                 lwi     r31, r1, PT_R31;        \
 93                 lwi     r1, r1, PT_R1;
 94 
 95 #define LWREG_NOP                       \
 96         bri     ex_handler_unhandled;   \
 97         nop;
 98 
 99 #define SWREG_NOP                       \
100         bri     ex_handler_unhandled;   \
101         nop;
102 
103 /* r3 is the source */
104 #define R3_TO_LWREG_V(regnum)                           \
105         swi     r3, r1, 4 * regnum;                             \
106         bri     ex_handler_done;
107 
108 /* r3 is the source */
109 #define R3_TO_LWREG(regnum)                             \
110         or      NUM_TO_REG (regnum), r0, r3;            \
111         bri     ex_handler_done;
112 
113 /* r3 is the target */
114 #define SWREG_TO_R3_V(regnum)                           \
115         lwi     r3, r1, 4 * regnum;                             \
116         bri     ex_sw_tail;
117 
118 /* r3 is the target */
119 #define SWREG_TO_R3(regnum)                             \
120         or      r3, r0, NUM_TO_REG (regnum);            \
121         bri     ex_sw_tail;
122 
123         #define R3_TO_LWREG_VM_V(regnum)                \
124                 brid    ex_lw_end_vm;                   \
125                 swi     r3, r7, 4 * regnum;
126 
127         #define R3_TO_LWREG_VM(regnum)                  \
128                 brid    ex_lw_end_vm;                   \
129                 or      NUM_TO_REG (regnum), r0, r3;
130 
131         #define SWREG_TO_R3_VM_V(regnum)                \
132                 brid    ex_sw_tail_vm;                  \
133                 lwi     r3, r7, 4 * regnum;
134 
135         #define SWREG_TO_R3_VM(regnum)                  \
136                 brid    ex_sw_tail_vm;                  \
137                 or      r3, r0, NUM_TO_REG (regnum);
138 
139         /* Shift right instruction depending on available configuration */
140         #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL == 0
141         /* Only the used shift constants defined here - add more if needed */
142         #define BSRLI2(rD, rA)                          \
143                 srl rD, rA;             /* << 1 */      \
144                 srl rD, rD;             /* << 2 */
145         #define BSRLI4(rD, rA)          \
146                 BSRLI2(rD, rA);         \
147                 BSRLI2(rD, rD)
148         #define BSRLI10(rD, rA)                         \
149                 srl rD, rA;             /* << 1 */      \
150                 srl rD, rD;             /* << 2 */      \
151                 srl rD, rD;             /* << 3 */      \
152                 srl rD, rD;             /* << 4 */      \
153                 srl rD, rD;             /* << 5 */      \
154                 srl rD, rD;             /* << 6 */      \
155                 srl rD, rD;             /* << 7 */      \
156                 srl rD, rD;             /* << 8 */      \
157                 srl rD, rD;             /* << 9 */      \
158                 srl rD, rD              /* << 10 */
159         #define BSRLI20(rD, rA)         \
160                 BSRLI10(rD, rA);        \
161                 BSRLI10(rD, rD)
162 
163         .macro  bsrli, rD, rA, IMM
164         .if (\IMM) == 2
165                 BSRLI2(\rD, \rA)
166         .elseif (\IMM) == 10
167                 BSRLI10(\rD, \rA)
168         .elseif (\IMM) == 12
169                 BSRLI2(\rD, \rA)
170                 BSRLI10(\rD, \rD)
171         .elseif (\IMM) == 14
172                 BSRLI4(\rD, \rA)
173                 BSRLI10(\rD, \rD)
174         .elseif (\IMM) == 20
175                 BSRLI20(\rD, \rA)
176         .elseif (\IMM) == 24
177                 BSRLI4(\rD, \rA)
178                 BSRLI20(\rD, \rD)
179         .elseif (\IMM) == 28
180                 BSRLI4(\rD, \rA)
181                 BSRLI4(\rD, \rD)
182                 BSRLI20(\rD, \rD)
183         .else
184         .error "BSRLI shift macros \IMM"
185         .endif
186         .endm
187         #endif
188 
189 
190 .extern other_exception_handler /* Defined in exception.c */
191 
192 /*
193  * hw_exception_handler - Handler for exceptions
194  *
195  * Exception handler notes:
196  * - Handles all exceptions
197  * - Does not handle unaligned exceptions during load into r17, r1, r0.
198  * - Does not handle unaligned exceptions during store from r17 (cannot be
199  *   done) and r1 (slows down common case)
200  *
201  *  Relevant register structures
202  *
203  *  EAR - |----|----|----|----|----|----|----|----|
204  *      - <  ##   32 bit faulting address     ##  >
205  *
206  *  ESR - |----|----|----|----|----| - | - |-----|-----|
207  *      -                            W   S   REG   EXC
208  *
209  *
210  * STACK FRAME STRUCTURE (for CONFIG_MMU=n)
211  * ----------------------------------------
212  *
213  *      +-------------+         + 0
214  *      |     MSR     |
215  *      +-------------+         + 4
216  *      |     r1      |
217  *      |      .      |
218  *      |      .      |
219  *      |      .      |
220  *      |      .      |
221  *      |     r18     |
222  *      +-------------+         + 76
223  *      |      .      |
224  *      |      .      |
225  *
226  * MMU kernel uses the same 'pt_pool_space' pointed space
227  * which is used for storing register values - noMMu style was, that values were
228  * stored in stack but in case of failure you lost information about register.
229  * Currently you can see register value in memory in specific place.
230  * In compare to with previous solution the speed should be the same.
231  *
232  * MMU exception handler has different handling compare to no MMU kernel.
233  * Exception handler use jump table for directing of what happen. For MMU kernel
234  * is this approach better because MMU relate exception are handled by asm code
235  * in this file. In compare to with MMU expect of unaligned exception
236  * is everything handled by C code.
237  */
238 
239 /*
240  * every of these handlers is entered having R3/4/5/6/11/current saved on stack
241  * and clobbered so care should be taken to restore them if someone is going to
242  * return from exception
243  */
244 
245 /* wrappers to restore state before coming to entry.S */
246 .section .data
247 .align 4
248 pt_pool_space:
249         .space  PT_SIZE
250 
251 #ifdef DEBUG
252 /* Create space for exception counting. */
253 .section .data
254 .global exception_debug_table
255 .align 4
256 exception_debug_table:
257         /* Look at exception vector table. There is 32 exceptions * word size */
258         .space  (32 * 4)
259 #endif /* DEBUG */
260 
261 .section .rodata
262 .align 4
263 _MB_HW_ExceptionVectorTable:
264 /*  0 - Undefined */
265         .long   TOPHYS(ex_handler_unhandled)
266 /*  1 - Unaligned data access exception */
267         .long   TOPHYS(handle_unaligned_ex)
268 /*  2 - Illegal op-code exception */
269         .long   TOPHYS(full_exception_trapw)
270 /*  3 - Instruction bus error exception */
271         .long   TOPHYS(full_exception_trapw)
272 /*  4 - Data bus error exception */
273         .long   TOPHYS(full_exception_trapw)
274 /*  5 - Divide by zero exception */
275         .long   TOPHYS(full_exception_trapw)
276 /*  6 - Floating point unit exception */
277         .long   TOPHYS(full_exception_trapw)
278 /*  7 - Privileged instruction exception */
279         .long   TOPHYS(full_exception_trapw)
280 /*  8 - 15 - Undefined */
281         .long   TOPHYS(ex_handler_unhandled)
282         .long   TOPHYS(ex_handler_unhandled)
283         .long   TOPHYS(ex_handler_unhandled)
284         .long   TOPHYS(ex_handler_unhandled)
285         .long   TOPHYS(ex_handler_unhandled)
286         .long   TOPHYS(ex_handler_unhandled)
287         .long   TOPHYS(ex_handler_unhandled)
288         .long   TOPHYS(ex_handler_unhandled)
289 /* 16 - Data storage exception */
290         .long   TOPHYS(handle_data_storage_exception)
291 /* 17 - Instruction storage exception */
292         .long   TOPHYS(handle_instruction_storage_exception)
293 /* 18 - Data TLB miss exception */
294         .long   TOPHYS(handle_data_tlb_miss_exception)
295 /* 19 - Instruction TLB miss exception */
296         .long   TOPHYS(handle_instruction_tlb_miss_exception)
297 /* 20 - 31 - Undefined */
298         .long   TOPHYS(ex_handler_unhandled)
299         .long   TOPHYS(ex_handler_unhandled)
300         .long   TOPHYS(ex_handler_unhandled)
301         .long   TOPHYS(ex_handler_unhandled)
302         .long   TOPHYS(ex_handler_unhandled)
303         .long   TOPHYS(ex_handler_unhandled)
304         .long   TOPHYS(ex_handler_unhandled)
305         .long   TOPHYS(ex_handler_unhandled)
306         .long   TOPHYS(ex_handler_unhandled)
307         .long   TOPHYS(ex_handler_unhandled)
308         .long   TOPHYS(ex_handler_unhandled)
309         .long   TOPHYS(ex_handler_unhandled)
310 
311 .global _hw_exception_handler
312 .section .text
313 .align 4
314 .ent _hw_exception_handler
315 _hw_exception_handler:
316         swi     r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */
317         /* Save date to kernel memory. Here is the problem
318          * when you came from user space */
319         ori     r1, r0, TOPHYS(pt_pool_space);
320         swi     r3, r1, PT_R3
321         swi     r4, r1, PT_R4
322         swi     r5, r1, PT_R5
323         swi     r6, r1, PT_R6
324 
325         swi     r11, r1, PT_R11
326         swi     r31, r1, PT_R31
327         lwi     r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
328 
329         mfs     r5, rmsr;
330         nop
331         swi     r5, r1, 0;
332         mfs     r4, resr
333         nop
334         mfs     r3, rear;
335         nop
336 
337         andi    r5, r4, 0x1F;           /* Extract ESR[EXC] */
338 
339         /* Calculate exception vector offset = r5 << 2 */
340         addk    r6, r5, r5; /* << 1 */
341         addk    r6, r6, r6; /* << 2 */
342 
343 #ifdef DEBUG
344 /* counting which exception happen */
345         lwi     r5, r0, TOPHYS(exception_debug_table)
346         addi    r5, r5, 1
347         swi     r5, r0, TOPHYS(exception_debug_table)
348         lwi     r5, r6, TOPHYS(exception_debug_table)
349         addi    r5, r5, 1
350         swi     r5, r6, TOPHYS(exception_debug_table)
351 #endif
352 /* end */
353         /* Load the HW Exception vector */
354         lwi     r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
355         bra     r6
356 
357 full_exception_trapw:
358         RESTORE_STATE
359         bri     full_exception_trap
360 
361 /* 0x01 - Unaligned data access exception
362  * This occurs when a word access is not aligned on a word boundary,
363  * or when a 16-bit access is not aligned on a 16-bit boundary.
364  * This handler perform the access, and returns, except for MMU when
365  * the unaligned address is last on a 4k page or the physical address is
366  * not found in the page table, in which case unaligned_data_trap is called.
367  */
368 handle_unaligned_ex:
369         /* Working registers already saved: R3, R4, R5, R6
370          *  R4 = ESR
371          *  R3 = EAR
372          */
373         andi    r6, r4, 0x1000                  /* Check ESR[DS] */
374         beqi    r6, _no_delayslot               /* Branch if ESR[DS] not set */
375         mfs     r17, rbtr;      /* ESR[DS] set - return address in BTR */
376         nop
377 _no_delayslot:
378         /* jump to high level unaligned handler */
379         RESTORE_STATE;
380         bri     unaligned_data_trap
381 
382         andi    r6, r4, 0x3E0; /* Mask and extract the register operand */
383         srl     r6, r6; /* r6 >> 5 */
384         srl     r6, r6;
385         srl     r6, r6;
386         srl     r6, r6;
387         srl     r6, r6;
388         /* Store the register operand in a temporary location */
389         sbi     r6, r0, TOPHYS(ex_reg_op);
390 
391         andi    r6, r4, 0x400; /* Extract ESR[S] */
392         bnei    r6, ex_sw;
393 ex_lw:
394         andi    r6, r4, 0x800; /* Extract ESR[W] */
395         beqi    r6, ex_lhw;
396         lbui    r5, r3, 0; /* Exception address in r3 */
397         /* Load a word, byte-by-byte from destination address
398                 and save it in tmp space */
399         sbi     r5, r0, TOPHYS(ex_tmp_data_loc_0);
400         lbui    r5, r3, 1;
401         sbi     r5, r0, TOPHYS(ex_tmp_data_loc_1);
402         lbui    r5, r3, 2;
403         sbi     r5, r0, TOPHYS(ex_tmp_data_loc_2);
404         lbui    r5, r3, 3;
405         sbi     r5, r0, TOPHYS(ex_tmp_data_loc_3);
406         /* Get the destination register value into r4 */
407         lwi     r4, r0, TOPHYS(ex_tmp_data_loc_0);
408         bri     ex_lw_tail;
409 ex_lhw:
410         lbui    r5, r3, 0; /* Exception address in r3 */
411         /* Load a half-word, byte-by-byte from destination
412                 address and save it in tmp space */
413         sbi     r5, r0, TOPHYS(ex_tmp_data_loc_0);
414         lbui    r5, r3, 1;
415         sbi     r5, r0, TOPHYS(ex_tmp_data_loc_1);
416         /* Get the destination register value into r4 */
417         lhui    r4, r0, TOPHYS(ex_tmp_data_loc_0);
418 ex_lw_tail:
419         /* Get the destination register number into r5 */
420         lbui    r5, r0, TOPHYS(ex_reg_op);
421         /* Form load_word jump table offset (lw_table + (8 * regnum)) */
422         addik   r6, r0, TOPHYS(lw_table);
423         addk    r5, r5, r5;
424         addk    r5, r5, r5;
425         addk    r5, r5, r5;
426         addk    r5, r5, r6;
427         bra     r5;
428 ex_lw_end: /* Exception handling of load word, ends */
429 ex_sw:
430         /* Get the destination register number into r5 */
431         lbui    r5, r0, TOPHYS(ex_reg_op);
432         /* Form store_word jump table offset (sw_table + (8 * regnum)) */
433         addik   r6, r0, TOPHYS(sw_table);
434         add     r5, r5, r5;
435         add     r5, r5, r5;
436         add     r5, r5, r5;
437         add     r5, r5, r6;
438         bra     r5;
439 ex_sw_tail:
440         mfs     r6, resr;
441         nop
442         andi    r6, r6, 0x800; /* Extract ESR[W] */
443         beqi    r6, ex_shw;
444         /* Get the word - delay slot */
445         swi     r4, r0, TOPHYS(ex_tmp_data_loc_0);
446         /* Store the word, byte-by-byte into destination address */
447         lbui    r4, r0, TOPHYS(ex_tmp_data_loc_0);
448         sbi     r4, r3, 0;
449         lbui    r4, r0, TOPHYS(ex_tmp_data_loc_1);
450         sbi     r4, r3, 1;
451         lbui    r4, r0, TOPHYS(ex_tmp_data_loc_2);
452         sbi     r4, r3, 2;
453         lbui    r4, r0, TOPHYS(ex_tmp_data_loc_3);
454         sbi     r4, r3, 3;
455         bri     ex_handler_done;
456 
457 ex_shw:
458         /* Store the lower half-word, byte-by-byte into destination address */
459         swi     r4, r0, TOPHYS(ex_tmp_data_loc_0);
460         lbui    r4, r0, TOPHYS(ex_tmp_data_loc_2);
461         sbi     r4, r3, 0;
462         lbui    r4, r0, TOPHYS(ex_tmp_data_loc_3);
463         sbi     r4, r3, 1;
464 ex_sw_end: /* Exception handling of store word, ends. */
465 
466 ex_handler_done:
467         RESTORE_STATE;
468         rted    r17, 0
469         nop
470 
471         /* Exception vector entry code. This code runs with address translation
472          * turned off (i.e. using physical addresses). */
473 
474         /* Exception vectors. */
475 
476         /* 0x10 - Data Storage Exception
477          * This happens for just a few reasons. U0 set (but we don't do that),
478          * or zone protection fault (user violation, write to protected page).
479          * If this is just an update of modified status, we do that quickly
480          * and exit. Otherwise, we call heavyweight functions to do the work.
481          */
482         handle_data_storage_exception:
483                 /* Working registers already saved: R3, R4, R5, R6
484                  * R3 = ESR
485                  */
486                 mfs     r11, rpid
487                 nop
488                 /* If we are faulting a kernel address, we have to use the
489                  * kernel page tables.
490                  */
491                 ori     r5, r0, CONFIG_KERNEL_START
492                 cmpu    r5, r3, r5
493                 bgti    r5, ex3
494                 /* First, check if it was a zone fault (which means a user
495                  * tried to access a kernel or read-protected page - always
496                  * a SEGV). All other faults here must be stores, so no
497                  * need to check ESR_S as well. */
498                 andi    r4, r4, ESR_DIZ         /* ESR_Z - zone protection */
499                 bnei    r4, ex2
500 
501                 ori     r4, r0, swapper_pg_dir
502                 mts     rpid, r0                /* TLB will have 0 TID */
503                 nop
504                 bri     ex4
505 
506                 /* Get the PGD for the current thread. */
507         ex3:
508                 /* First, check if it was a zone fault (which means a user
509                  * tried to access a kernel or read-protected page - always
510                  * a SEGV). All other faults here must be stores, so no
511                  * need to check ESR_S as well. */
512                 andi    r4, r4, ESR_DIZ         /* ESR_Z */
513                 bnei    r4, ex2
514                 /* get current task address */
515                 addi    r4 ,CURRENT_TASK, TOPHYS(0);
516                 lwi     r4, r4, TASK_THREAD+PGDIR
517         ex4:
518                 tophys(r4,r4)
519                 /* Create L1 (pgdir/pmd) address */
520                 bsrli   r5, r3, PGDIR_SHIFT - 2
521                 andi    r5, r5, PAGE_SIZE - 4
522 /* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
523                 or      r4, r4, r5
524                 lwi     r4, r4, 0               /* Get L1 entry */
525                 andi    r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
526                 beqi    r5, ex2                 /* Bail if no table */
527 
528                 tophys(r5,r5)
529                 bsrli   r6, r3, PTE_SHIFT /* Compute PTE address */
530                 andi    r6, r6, PAGE_SIZE - 4
531                 or      r5, r5, r6
532                 lwi     r4, r5, 0               /* Get Linux PTE */
533 
534                 andi    r6, r4, _PAGE_RW        /* Is it writeable? */
535                 beqi    r6, ex2                 /* Bail if not */
536 
537                 /* Update 'changed' */
538                 ori     r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
539                 swi     r4, r5, 0               /* Update Linux page table */
540 
541                 /* Most of the Linux PTE is ready to load into the TLB LO.
542                  * We set ZSEL, where only the LS-bit determines user access.
543                  * We set execute, because we don't have the granularity to
544                  * properly set this at the page level (Linux problem).
545                  * If shared is set, we cause a zero PID->TID load.
546                  * Many of these bits are software only. Bits we don't set
547                  * here we (properly should) assume have the appropriate value.
548                  */
549 /* Ignore memory coherent, just LSB on ZSEL is used + EX/WR */
550                 andi    r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
551                                                 TLB_ZSEL(1) | TLB_ATTR_MASK
552                 ori     r4, r4, _PAGE_HWEXEC    /* make it executable */
553 
554                 /* find the TLB index that caused the fault. It has to be here*/
555                 mts     rtlbsx, r3
556                 nop
557                 mfs     r5, rtlbx               /* DEBUG: TBD */
558                 nop
559                 mts     rtlblo, r4              /* Load TLB LO */
560                 nop
561                                                 /* Will sync shadow TLBs */
562 
563                 /* Done...restore registers and get out of here. */
564                 mts     rpid, r11
565                 nop
566                 bri 4
567 
568                 RESTORE_STATE;
569                 rted    r17, 0
570                 nop
571         ex2:
572                 /* The bailout. Restore registers to pre-exception conditions
573                  * and call the heavyweights to help us out. */
574                 mts     rpid, r11
575                 nop
576                 bri 4
577                 RESTORE_STATE;
578                 bri     page_fault_data_trap
579 
580 
581         /* 0x11 - Instruction Storage Exception
582          * This is caused by a fetch from non-execute or guarded pages. */
583         handle_instruction_storage_exception:
584                 /* Working registers already saved: R3, R4, R5, R6
585                  * R3 = ESR
586                  */
587 
588                 RESTORE_STATE;
589                 bri     page_fault_instr_trap
590 
591         /* 0x12 - Data TLB Miss Exception
592          * As the name implies, translation is not in the MMU, so search the
593          * page tables and fix it. The only purpose of this function is to
594          * load TLB entries from the page table if they exist.
595          */
596         handle_data_tlb_miss_exception:
597                 /* Working registers already saved: R3, R4, R5, R6
598                  * R3 = EAR, R4 = ESR
599                  */
600                 mfs     r11, rpid
601                 nop
602 
603                 /* If we are faulting a kernel address, we have to use the
604                  * kernel page tables. */
605                 ori     r6, r0, CONFIG_KERNEL_START
606                 cmpu    r4, r3, r6
607                 bgti    r4, ex5
608                 ori     r4, r0, swapper_pg_dir
609                 mts     rpid, r0                /* TLB will have 0 TID */
610                 nop
611                 bri     ex6
612 
613                 /* Get the PGD for the current thread. */
614         ex5:
615                 /* get current task address */
616                 addi    r4 ,CURRENT_TASK, TOPHYS(0);
617                 lwi     r4, r4, TASK_THREAD+PGDIR
618         ex6:
619                 tophys(r4,r4)
620                 /* Create L1 (pgdir/pmd) address */
621                 bsrli   r5, r3, PGDIR_SHIFT - 2
622                 andi    r5, r5, PAGE_SIZE - 4
623 /* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
624                 or      r4, r4, r5
625                 lwi     r4, r4, 0               /* Get L1 entry */
626                 andi    r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
627                 beqi    r5, ex7                 /* Bail if no table */
628 
629                 tophys(r5,r5)
630                 bsrli   r6, r3, PTE_SHIFT /* Compute PTE address */
631                 andi    r6, r6, PAGE_SIZE - 4
632                 or      r5, r5, r6
633                 lwi     r4, r5, 0               /* Get Linux PTE */
634 
635                 andi    r6, r4, _PAGE_PRESENT
636                 beqi    r6, ex7
637 
638                 ori     r4, r4, _PAGE_ACCESSED
639                 swi     r4, r5, 0
640 
641                 /* Most of the Linux PTE is ready to load into the TLB LO.
642                  * We set ZSEL, where only the LS-bit determines user access.
643                  * We set execute, because we don't have the granularity to
644                  * properly set this at the page level (Linux problem).
645                  * If shared is set, we cause a zero PID->TID load.
646                  * Many of these bits are software only. Bits we don't set
647                  * here we (properly should) assume have the appropriate value.
648                  */
649                 brid    finish_tlb_load
650                 andi    r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
651                                                 TLB_ZSEL(1) | TLB_ATTR_MASK
652         ex7:
653                 /* The bailout. Restore registers to pre-exception conditions
654                  * and call the heavyweights to help us out.
655                  */
656                 mts     rpid, r11
657                 nop
658                 bri     4
659                 RESTORE_STATE;
660                 bri     page_fault_data_trap
661 
662         /* 0x13 - Instruction TLB Miss Exception
663          * Nearly the same as above, except we get our information from
664          * different registers and bailout to a different point.
665          */
666         handle_instruction_tlb_miss_exception:
667                 /* Working registers already saved: R3, R4, R5, R6
668                  *  R3 = ESR
669                  */
670                 mfs     r11, rpid
671                 nop
672 
673                 /* If we are faulting a kernel address, we have to use the
674                  * kernel page tables.
675                  */
676                 ori     r4, r0, CONFIG_KERNEL_START
677                 cmpu    r4, r3, r4
678                 bgti    r4, ex8
679                 ori     r4, r0, swapper_pg_dir
680                 mts     rpid, r0                /* TLB will have 0 TID */
681                 nop
682                 bri     ex9
683 
684                 /* Get the PGD for the current thread. */
685         ex8:
686                 /* get current task address */
687                 addi    r4 ,CURRENT_TASK, TOPHYS(0);
688                 lwi     r4, r4, TASK_THREAD+PGDIR
689         ex9:
690                 tophys(r4,r4)
691                 /* Create L1 (pgdir/pmd) address */
692                 bsrli   r5, r3, PGDIR_SHIFT - 2
693                 andi    r5, r5, PAGE_SIZE - 4
694 /* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
695                 or      r4, r4, r5
696                 lwi     r4, r4, 0               /* Get L1 entry */
697                 andi    r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
698                 beqi    r5, ex10                /* Bail if no table */
699 
700                 tophys(r5,r5)
701                 bsrli   r6, r3, PTE_SHIFT /* Compute PTE address */
702                 andi    r6, r6, PAGE_SIZE - 4
703                 or      r5, r5, r6
704                 lwi     r4, r5, 0               /* Get Linux PTE */
705 
706                 andi    r6, r4, _PAGE_PRESENT
707                 beqi    r6, ex10
708 
709                 ori     r4, r4, _PAGE_ACCESSED
710                 swi     r4, r5, 0
711 
712                 /* Most of the Linux PTE is ready to load into the TLB LO.
713                  * We set ZSEL, where only the LS-bit determines user access.
714                  * We set execute, because we don't have the granularity to
715                  * properly set this at the page level (Linux problem).
716                  * If shared is set, we cause a zero PID->TID load.
717                  * Many of these bits are software only. Bits we don't set
718                  * here we (properly should) assume have the appropriate value.
719                  */
720                 brid    finish_tlb_load
721                 andi    r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
722                                                 TLB_ZSEL(1) | TLB_ATTR_MASK
723         ex10:
724                 /* The bailout. Restore registers to pre-exception conditions
725                  * and call the heavyweights to help us out.
726                  */
727                 mts     rpid, r11
728                 nop
729                 bri 4
730                 RESTORE_STATE;
731                 bri     page_fault_instr_trap
732 
733 /* Both the instruction and data TLB miss get to this point to load the TLB.
734  *      r3 - EA of fault
735  *      r4 - TLB LO (info from Linux PTE)
736  *      r5, r6 - available to use
737  *      PID - loaded with proper value when we get here
738  *      Upon exit, we reload everything and RFI.
739  * A common place to load the TLB.
740  */
741 .section .data
742 .align 4
743 .global tlb_skip
744         tlb_skip:
745                 .long   MICROBLAZE_TLB_SKIP
746         tlb_index:
747                 /* MS: storing last used tlb index */
748                 .long   MICROBLAZE_TLB_SIZE/2
749 .previous
750         finish_tlb_load:
751                 /* MS: load the last used TLB index. */
752                 lwi     r5, r0, TOPHYS(tlb_index)
753                 addik   r5, r5, 1 /* MS: inc tlb_index -> use next one */
754 
755 /* MS: FIXME this is potential fault, because this is mask not count */
756                 andi    r5, r5, MICROBLAZE_TLB_SIZE - 1
757                 ori     r6, r0, 1
758                 cmp     r31, r5, r6
759                 blti    r31, ex12
760                 lwi     r5, r0, TOPHYS(tlb_skip)
761         ex12:
762                 /* MS: save back current TLB index */
763                 swi     r5, r0, TOPHYS(tlb_index)
764 
765                 ori     r4, r4, _PAGE_HWEXEC    /* make it executable */
766                 mts     rtlbx, r5               /* MS: save current TLB */
767                 nop
768                 mts     rtlblo, r4              /* MS: save to TLB LO */
769                 nop
770 
771                 /* Create EPN. This is the faulting address plus a static
772                  * set of bits. These are size, valid, E, U0, and ensure
773                  * bits 20 and 21 are zero.
774                  */
775                 andi    r3, r3, PAGE_MASK
776                 ori     r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K)
777                 mts     rtlbhi, r3              /* Load TLB HI */
778                 nop
779 
780                 /* Done...restore registers and get out of here. */
781                 mts     rpid, r11
782                 nop
783                 bri 4
784                 RESTORE_STATE;
785                 rted    r17, 0
786                 nop
787 
788         /* extern void giveup_fpu(struct task_struct *prev)
789          *
790          * The MicroBlaze processor may have an FPU, so this should not just
791          * return: TBD.
792          */
793         .globl giveup_fpu;
794         .align 4;
795         giveup_fpu:
796                 bralid  r15,0                   /* TBD */
797                 nop
798 
799         /* At present, this routine just hangs. - extern void abort(void) */
800         .globl abort;
801         .align 4;
802         abort:
803                 br      r0
804 
805         .globl set_context;
806         .align 4;
807         set_context:
808                 mts     rpid, r5        /* Shadow TLBs are automatically */
809                 nop
810                 bri     4               /* flushed by changing PID */
811                 rtsd    r15,8
812                 nop
813 
814 .end _hw_exception_handler
815 
816 /* Unaligned data access exception last on a 4k page for MMU.
817  * When this is called, we are in virtual mode with exceptions enabled
818  * and registers 1-13,15,17,18 saved.
819  *
820  * R3 = ESR
821  * R4 = EAR
822  * R7 = pointer to saved registers (struct pt_regs *regs)
823  *
824  * This handler perform the access, and returns via ret_from_exc.
825  */
826 .global _unaligned_data_exception
827 .ent _unaligned_data_exception
828 _unaligned_data_exception:
829         andi    r8, r3, 0x3E0;  /* Mask and extract the register operand */
830         bsrli   r8, r8, 2;              /* r8 >> 2 = register operand * 8 */
831         andi    r6, r3, 0x400;  /* Extract ESR[S] */
832         bneid   r6, ex_sw_vm;
833         andi    r6, r3, 0x800;  /* Extract ESR[W] - delay slot */
834 ex_lw_vm:
835         beqid   r6, ex_lhw_vm;
836 load1:  lbui    r5, r4, 0;      /* Exception address in r4 - delay slot */
837 /* Load a word, byte-by-byte from destination address and save it in tmp space*/
838         addik   r6, r0, ex_tmp_data_loc_0;
839         sbi     r5, r6, 0;
840 load2:  lbui    r5, r4, 1;
841         sbi     r5, r6, 1;
842 load3:  lbui    r5, r4, 2;
843         sbi     r5, r6, 2;
844 load4:  lbui    r5, r4, 3;
845         sbi     r5, r6, 3;
846         brid    ex_lw_tail_vm;
847 /* Get the destination register value into r3 - delay slot */
848         lwi     r3, r6, 0;
849 ex_lhw_vm:
850         /* Load a half-word, byte-by-byte from destination address and
851          * save it in tmp space */
852         addik   r6, r0, ex_tmp_data_loc_0;
853         sbi     r5, r6, 0;
854 load5:  lbui    r5, r4, 1;
855         sbi     r5, r6, 1;
856         lhui    r3, r6, 0;      /* Get the destination register value into r3 */
857 ex_lw_tail_vm:
858         /* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */
859         addik   r5, r8, lw_table_vm;
860         bra     r5;
861 ex_lw_end_vm:                   /* Exception handling of load word, ends */
862         brai    ret_from_exc;
863 ex_sw_vm:
864 /* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */
865         addik   r5, r8, sw_table_vm;
866         bra     r5;
867 ex_sw_tail_vm:
868         addik   r5, r0, ex_tmp_data_loc_0;
869         beqid   r6, ex_shw_vm;
870         swi     r3, r5, 0;      /* Get the word - delay slot */
871         /* Store the word, byte-by-byte into destination address */
872         lbui    r3, r5, 0;
873 store1: sbi     r3, r4, 0;
874         lbui    r3, r5, 1;
875 store2: sbi     r3, r4, 1;
876         lbui    r3, r5, 2;
877 store3: sbi     r3, r4, 2;
878         lbui    r3, r5, 3;
879         brid    ret_from_exc;
880 store4: sbi     r3, r4, 3;      /* Delay slot */
881 ex_shw_vm:
882         /* Store the lower half-word, byte-by-byte into destination address */
883 #ifdef __MICROBLAZEEL__
884         lbui    r3, r5, 0;
885 store5: sbi     r3, r4, 0;
886         lbui    r3, r5, 1;
887         brid    ret_from_exc;
888 store6: sbi     r3, r4, 1;      /* Delay slot */
889 #else
890         lbui    r3, r5, 2;
891 store5: sbi     r3, r4, 0;
892         lbui    r3, r5, 3;
893         brid    ret_from_exc;
894 store6: sbi     r3, r4, 1;      /* Delay slot */
895 #endif
896 
897 ex_sw_end_vm:                   /* Exception handling of store word, ends. */
898 
899 /* We have to prevent cases that get/put_user macros get unaligned pointer
900  * to bad page area. We have to find out which origin instruction caused it
901  * and called fixup for that origin instruction not instruction in unaligned
902  * handler */
903 ex_unaligned_fixup:
904         ori     r5, r7, 0 /* setup pointer to pt_regs */
905         lwi     r6, r7, PT_PC; /* faulting address is one instruction above */
906         addik   r6, r6, -4 /* for finding proper fixup */
907         swi     r6, r7, PT_PC; /* a save back it to PT_PC */
908         addik   r7, r0, SIGSEGV
909         /* call bad_page_fault for finding aligned fixup, fixup address is saved
910          * in PT_PC which is used as return address from exception */
911         addik   r15, r0, ret_from_exc-8 /* setup return address */
912         brid    bad_page_fault
913         nop
914 
915 /* We prevent all load/store because it could failed any attempt to access */
916 .section __ex_table,"a";
917         .word   load1,ex_unaligned_fixup;
918         .word   load2,ex_unaligned_fixup;
919         .word   load3,ex_unaligned_fixup;
920         .word   load4,ex_unaligned_fixup;
921         .word   load5,ex_unaligned_fixup;
922         .word   store1,ex_unaligned_fixup;
923         .word   store2,ex_unaligned_fixup;
924         .word   store3,ex_unaligned_fixup;
925         .word   store4,ex_unaligned_fixup;
926         .word   store5,ex_unaligned_fixup;
927         .word   store6,ex_unaligned_fixup;
928 .previous;
929 .end _unaligned_data_exception
930 
931 .global ex_handler_unhandled
932 ex_handler_unhandled:
933 /* FIXME add handle function for unhandled exception - dump register */
934         bri 0
935 
936 /*
937  * hw_exception_handler Jump Table
938  * - Contains code snippets for each register that caused the unalign exception
939  * - Hence exception handler is NOT self-modifying
940  * - Separate table for load exceptions and store exceptions.
941  * - Each table is of size: (8 * 32) = 256 bytes
942  */
943 
944 .section .text
945 .align 4
946 lw_table:
947 lw_r0:          R3_TO_LWREG     (0);
948 lw_r1:          LWREG_NOP;
949 lw_r2:          R3_TO_LWREG     (2);
950 lw_r3:          R3_TO_LWREG_V   (3);
951 lw_r4:          R3_TO_LWREG_V   (4);
952 lw_r5:          R3_TO_LWREG_V   (5);
953 lw_r6:          R3_TO_LWREG_V   (6);
954 lw_r7:          R3_TO_LWREG     (7);
955 lw_r8:          R3_TO_LWREG     (8);
956 lw_r9:          R3_TO_LWREG     (9);
957 lw_r10:         R3_TO_LWREG     (10);
958 lw_r11:         R3_TO_LWREG     (11);
959 lw_r12:         R3_TO_LWREG     (12);
960 lw_r13:         R3_TO_LWREG     (13);
961 lw_r14:         R3_TO_LWREG     (14);
962 lw_r15:         R3_TO_LWREG     (15);
963 lw_r16:         R3_TO_LWREG     (16);
964 lw_r17:         LWREG_NOP;
965 lw_r18:         R3_TO_LWREG     (18);
966 lw_r19:         R3_TO_LWREG     (19);
967 lw_r20:         R3_TO_LWREG     (20);
968 lw_r21:         R3_TO_LWREG     (21);
969 lw_r22:         R3_TO_LWREG     (22);
970 lw_r23:         R3_TO_LWREG     (23);
971 lw_r24:         R3_TO_LWREG     (24);
972 lw_r25:         R3_TO_LWREG     (25);
973 lw_r26:         R3_TO_LWREG     (26);
974 lw_r27:         R3_TO_LWREG     (27);
975 lw_r28:         R3_TO_LWREG     (28);
976 lw_r29:         R3_TO_LWREG     (29);
977 lw_r30:         R3_TO_LWREG     (30);
978 lw_r31:         R3_TO_LWREG_V   (31);
979 
980 sw_table:
981 sw_r0:          SWREG_TO_R3     (0);
982 sw_r1:          SWREG_NOP;
983 sw_r2:          SWREG_TO_R3     (2);
984 sw_r3:          SWREG_TO_R3_V   (3);
985 sw_r4:          SWREG_TO_R3_V   (4);
986 sw_r5:          SWREG_TO_R3_V   (5);
987 sw_r6:          SWREG_TO_R3_V   (6);
988 sw_r7:          SWREG_TO_R3     (7);
989 sw_r8:          SWREG_TO_R3     (8);
990 sw_r9:          SWREG_TO_R3     (9);
991 sw_r10:         SWREG_TO_R3     (10);
992 sw_r11:         SWREG_TO_R3     (11);
993 sw_r12:         SWREG_TO_R3     (12);
994 sw_r13:         SWREG_TO_R3     (13);
995 sw_r14:         SWREG_TO_R3     (14);
996 sw_r15:         SWREG_TO_R3     (15);
997 sw_r16:         SWREG_TO_R3     (16);
998 sw_r17:         SWREG_NOP;
999 sw_r18:         SWREG_TO_R3     (18);
1000 sw_r19:         SWREG_TO_R3     (19);
1001 sw_r20:         SWREG_TO_R3     (20);
1002 sw_r21:         SWREG_TO_R3     (21);
1003 sw_r22:         SWREG_TO_R3     (22);
1004 sw_r23:         SWREG_TO_R3     (23);
1005 sw_r24:         SWREG_TO_R3     (24);
1006 sw_r25:         SWREG_TO_R3     (25);
1007 sw_r26:         SWREG_TO_R3     (26);
1008 sw_r27:         SWREG_TO_R3     (27);
1009 sw_r28:         SWREG_TO_R3     (28);
1010 sw_r29:         SWREG_TO_R3     (29);
1011 sw_r30:         SWREG_TO_R3     (30);
1012 sw_r31:         SWREG_TO_R3_V   (31);
1013 
1014 lw_table_vm:
1015 lw_r0_vm:       R3_TO_LWREG_VM          (0);
1016 lw_r1_vm:       R3_TO_LWREG_VM_V        (1);
1017 lw_r2_vm:       R3_TO_LWREG_VM_V        (2);
1018 lw_r3_vm:       R3_TO_LWREG_VM_V        (3);
1019 lw_r4_vm:       R3_TO_LWREG_VM_V        (4);
1020 lw_r5_vm:       R3_TO_LWREG_VM_V        (5);
1021 lw_r6_vm:       R3_TO_LWREG_VM_V        (6);
1022 lw_r7_vm:       R3_TO_LWREG_VM_V        (7);
1023 lw_r8_vm:       R3_TO_LWREG_VM_V        (8);
1024 lw_r9_vm:       R3_TO_LWREG_VM_V        (9);
1025 lw_r10_vm:      R3_TO_LWREG_VM_V        (10);
1026 lw_r11_vm:      R3_TO_LWREG_VM_V        (11);
1027 lw_r12_vm:      R3_TO_LWREG_VM_V        (12);
1028 lw_r13_vm:      R3_TO_LWREG_VM_V        (13);
1029 lw_r14_vm:      R3_TO_LWREG_VM_V        (14);
1030 lw_r15_vm:      R3_TO_LWREG_VM_V        (15);
1031 lw_r16_vm:      R3_TO_LWREG_VM_V        (16);
1032 lw_r17_vm:      R3_TO_LWREG_VM_V        (17);
1033 lw_r18_vm:      R3_TO_LWREG_VM_V        (18);
1034 lw_r19_vm:      R3_TO_LWREG_VM_V        (19);
1035 lw_r20_vm:      R3_TO_LWREG_VM_V        (20);
1036 lw_r21_vm:      R3_TO_LWREG_VM_V        (21);
1037 lw_r22_vm:      R3_TO_LWREG_VM_V        (22);
1038 lw_r23_vm:      R3_TO_LWREG_VM_V        (23);
1039 lw_r24_vm:      R3_TO_LWREG_VM_V        (24);
1040 lw_r25_vm:      R3_TO_LWREG_VM_V        (25);
1041 lw_r26_vm:      R3_TO_LWREG_VM_V        (26);
1042 lw_r27_vm:      R3_TO_LWREG_VM_V        (27);
1043 lw_r28_vm:      R3_TO_LWREG_VM_V        (28);
1044 lw_r29_vm:      R3_TO_LWREG_VM_V        (29);
1045 lw_r30_vm:      R3_TO_LWREG_VM_V        (30);
1046 lw_r31_vm:      R3_TO_LWREG_VM_V        (31);
1047 
1048 sw_table_vm:
1049 sw_r0_vm:       SWREG_TO_R3_VM          (0);
1050 sw_r1_vm:       SWREG_TO_R3_VM_V        (1);
1051 sw_r2_vm:       SWREG_TO_R3_VM_V        (2);
1052 sw_r3_vm:       SWREG_TO_R3_VM_V        (3);
1053 sw_r4_vm:       SWREG_TO_R3_VM_V        (4);
1054 sw_r5_vm:       SWREG_TO_R3_VM_V        (5);
1055 sw_r6_vm:       SWREG_TO_R3_VM_V        (6);
1056 sw_r7_vm:       SWREG_TO_R3_VM_V        (7);
1057 sw_r8_vm:       SWREG_TO_R3_VM_V        (8);
1058 sw_r9_vm:       SWREG_TO_R3_VM_V        (9);
1059 sw_r10_vm:      SWREG_TO_R3_VM_V        (10);
1060 sw_r11_vm:      SWREG_TO_R3_VM_V        (11);
1061 sw_r12_vm:      SWREG_TO_R3_VM_V        (12);
1062 sw_r13_vm:      SWREG_TO_R3_VM_V        (13);
1063 sw_r14_vm:      SWREG_TO_R3_VM_V        (14);
1064 sw_r15_vm:      SWREG_TO_R3_VM_V        (15);
1065 sw_r16_vm:      SWREG_TO_R3_VM_V        (16);
1066 sw_r17_vm:      SWREG_TO_R3_VM_V        (17);
1067 sw_r18_vm:      SWREG_TO_R3_VM_V        (18);
1068 sw_r19_vm:      SWREG_TO_R3_VM_V        (19);
1069 sw_r20_vm:      SWREG_TO_R3_VM_V        (20);
1070 sw_r21_vm:      SWREG_TO_R3_VM_V        (21);
1071 sw_r22_vm:      SWREG_TO_R3_VM_V        (22);
1072 sw_r23_vm:      SWREG_TO_R3_VM_V        (23);
1073 sw_r24_vm:      SWREG_TO_R3_VM_V        (24);
1074 sw_r25_vm:      SWREG_TO_R3_VM_V        (25);
1075 sw_r26_vm:      SWREG_TO_R3_VM_V        (26);
1076 sw_r27_vm:      SWREG_TO_R3_VM_V        (27);
1077 sw_r28_vm:      SWREG_TO_R3_VM_V        (28);
1078 sw_r29_vm:      SWREG_TO_R3_VM_V        (29);
1079 sw_r30_vm:      SWREG_TO_R3_VM_V        (30);
1080 sw_r31_vm:      SWREG_TO_R3_VM_V        (31);
1081 
1082 /* Temporary data structures used in the handler */
1083 .section .data
1084 .align 4
1085 ex_tmp_data_loc_0:
1086         .byte 0
1087 ex_tmp_data_loc_1:
1088         .byte 0
1089 ex_tmp_data_loc_2:
1090         .byte 0
1091 ex_tmp_data_loc_3:
1092         .byte 0
1093 ex_reg_op:
1094         .byte 0

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php