~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/xtensa/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/xtensa/kernel/entry.S (Version linux-6.12-rc7) and /arch/m68k/kernel/entry.S (Version policy-sample)


  1 /*                                                
  2  * Low-level exception handling                   
  3  *                                                
  4  * This file is subject to the terms and condi    
  5  * License.  See the file "COPYING" in the mai    
  6  * for more details.                              
  7  *                                                
  8  * Copyright (C) 2004 - 2008 by Tensilica Inc.    
  9  * Copyright (C) 2015 Cadence Design Systems I    
 10  *                                                
 11  * Chris Zankel <chris@zankel.net>                 
 12  *                                                
 13  */                                               
 14                                                   
 15 #include <linux/linkage.h>                        
 16 #include <linux/pgtable.h>                        
 17 #include <asm/asm-offsets.h>                      
 18 #include <asm/asmmacro.h>                         
 19 #include <asm/processor.h>                        
 20 #include <asm/coprocessor.h>                      
 21 #include <asm/thread_info.h>                      
 22 #include <asm/asm-uaccess.h>                      
 23 #include <asm/unistd.h>                           
 24 #include <asm/ptrace.h>                           
 25 #include <asm/current.h>                          
 26 #include <asm/page.h>                             
 27 #include <asm/signal.h>                           
 28 #include <asm/tlbflush.h>                         
 29 #include <variant/tie-asm.h>                      
 30                                                   
 31 /*                                                
 32  * Macro to find first bit set in WINDOWBASE f    
 33  *                                                
 34  * 100....0 -> 1                                  
 35  * 010....0 -> 2                                  
 36  * 000....1 -> WSBITS                             
 37  */                                               
 38                                                   
 39         .macro ffs_ws bit mask                    
 40                                                   
 41 #if XCHAL_HAVE_NSA                                
 42         nsau    \bit, \mask                       
 43         addi    \bit, \bit, WSBITS - 32 + 1       
 44 #else                                             
 45         movi    \bit, WSBITS                      
 46 #if WSBITS > 16                                   
 47         _bltui  \mask, 0x10000, 99f               
 48         addi    \bit, \bit, -16                   
 49         extui   \mask, \mask, 16, 16              
 50 #endif                                            
 51 #if WSBITS > 8                                    
 52 99:     _bltui  \mask, 0x100, 99f                 
 53         addi    \bit, \bit, -8                    
 54         srli    \mask, \mask, 8                   
 55 #endif                                            
 56 99:     _bltui  \mask, 0x10, 99f                  
 57         addi    \bit, \bit, -4                    
 58         srli    \mask, \mask, 4                   
 59 99:     _bltui  \mask, 0x4, 99f                   
 60         addi    \bit, \bit, -2                    
 61         srli    \mask, \mask, 2                   
 62 99:     _bltui  \mask, 0x2, 99f                   
 63         addi    \bit, \bit, -1                    
 64 99:                                               
 65                                                   
 66 #endif                                            
 67         .endm                                     
 68                                                   
 69                                                   
 70         .macro  irq_save flags tmp                
 71 #if XTENSA_FAKE_NMI                               
 72 #if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL    
 73         rsr     \flags, ps                        
 74         extui   \tmp, \flags, PS_INTLEVEL_SHIF    
 75         bgei    \tmp, LOCKLEVEL, 99f              
 76         rsil    \tmp, LOCKLEVEL                   
 77 99:                                               
 78 #else                                             
 79         movi    \tmp, LOCKLEVEL                   
 80         rsr     \flags, ps                        
 81         or      \flags, \flags, \tmp              
 82         xsr     \flags, ps                        
 83         rsync                                     
 84 #endif                                            
 85 #else                                             
 86         rsil    \flags, LOCKLEVEL                 
 87 #endif                                            
 88         .endm                                     
 89                                                   
 90 /* ----------------- DEFAULT FIRST LEVEL EXCEP    
 91                                                   
 92 /*                                                
 93  * First-level exception handler for user exce    
 94  * Save some special registers, extra states a    
 95  * register file that were in use in the user     
 96  * exception code.                                
 97  * We save SAR (used to calculate WMASK), and     
 98  * save them for kernel exceptions).              
 99  *                                                
100  * Entry condition for user_exception:            
101  *                                                
102  *   a0:        trashed, original value saved     
103  *   a1:        a1                                
104  *   a2:        new stack pointer, original va    
105  *   a3:        a3                                
106  *   depc:      a2, original value saved on st    
107  *   excsave1:  dispatch table                    
108  *                                                
109  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS    
110  *           <  VALID_DOUBLE_EXCEPTION_ADDRESS    
111  *                                                
112  * Entry condition for _user_exception:           
113  *                                                
114  *   a0-a3 and depc have been saved to PT_AREG    
115  *   excsave has been restored, and               
116  *   stack pointer (a1) has been set.             
117  *                                                
118  * Note: _user_exception might be at an odd ad    
119  */                                               
120         .literal_position                         
121                                                   
122 ENTRY(user_exception)                             
123                                                   
124         /* Save a1, a2, a3, and set SP. */        
125                                                   
126         rsr     a0, depc                          
127         s32i    a1, a2, PT_AREG1                  
128         s32i    a0, a2, PT_AREG2                  
129         s32i    a3, a2, PT_AREG3                  
130         mov     a1, a2                            
131                                                   
132         .globl _user_exception                    
133 _user_exception:                                  
134                                                   
135         /* Save SAR and turn off single steppi    
136                                                   
137         movi    a2, 0                             
138         wsr     a2, depc                # term    
139         rsr     a3, sar                           
140         xsr     a2, icountlevel                   
141         s32i    a3, a1, PT_SAR                    
142         s32i    a2, a1, PT_ICOUNTLEVEL            
143                                                   
144 #if XCHAL_HAVE_THREADPTR                          
145         rur     a2, threadptr                     
146         s32i    a2, a1, PT_THREADPTR              
147 #endif                                            
148                                                   
149         /* Rotate ws so that the current windo    
150         /* Assume ws = xxwww1yyyy. Rotate ws r    
151                                                   
152 #if defined(USER_SUPPORT_WINDOWED)                
153         rsr     a2, windowbase                    
154         rsr     a3, windowstart                   
155         ssr     a2                                
156         s32i    a2, a1, PT_WINDOWBASE             
157         s32i    a3, a1, PT_WINDOWSTART            
158         slli    a2, a3, 32-WSBITS                 
159         src     a2, a3, a2                        
160         srli    a2, a2, 32-WSBITS                 
161         s32i    a2, a1, PT_WMASK        # need    
162 #else                                             
163         movi    a2, 0                             
164         movi    a3, 1                             
165         s32i    a2, a1, PT_WINDOWBASE             
166         s32i    a3, a1, PT_WINDOWSTART            
167         s32i    a3, a1, PT_WMASK                  
168 #endif                                            
169                                                   
170         /* Save only live registers. */           
171                                                   
172 UABI_W  _bbsi.l a2, 1, .Lsave_window_registers    
173         s32i    a4, a1, PT_AREG4                  
174         s32i    a5, a1, PT_AREG5                  
175         s32i    a6, a1, PT_AREG6                  
176         s32i    a7, a1, PT_AREG7                  
177 UABI_W  _bbsi.l a2, 2, .Lsave_window_registers    
178         s32i    a8, a1, PT_AREG8                  
179         s32i    a9, a1, PT_AREG9                  
180         s32i    a10, a1, PT_AREG10                
181         s32i    a11, a1, PT_AREG11                
182 UABI_W  _bbsi.l a2, 3, .Lsave_window_registers    
183         s32i    a12, a1, PT_AREG12                
184         s32i    a13, a1, PT_AREG13                
185         s32i    a14, a1, PT_AREG14                
186         s32i    a15, a1, PT_AREG15                
187                                                   
188 #if defined(USER_SUPPORT_WINDOWED)                
189         /* If only one valid frame skip saving    
190                                                   
191         beqi    a2, 1, common_exception           
192                                                   
193         /* Save the remaining registers.          
194          * We have to save all registers up to    
195          * the right, except the current frame    
196          * Assume a2 is:  001001000110001         
197          * All register frames starting from t    
198          * must be saved.                         
199          */                                       
200 .Lsave_window_registers:                          
201         addi    a3, a2, -1              # elim    
202         neg     a3, a3                  # yyyy    
203         and     a3, a3, a2              # max.    
204                                                   
205         /* Find number of frames to save */       
206                                                   
207         ffs_ws  a0, a3                  # numb    
208                                                   
209         /* Store information into WMASK:          
210          * bits 0..3: xxx1 masked lower 4 bits    
211          * bits 4...: number of valid 4-regist    
212          */                                       
213                                                   
214         slli    a3, a0, 4               # numb    
215         extui   a2, a2, 0, 4            # mask    
216         or      a2, a3, a2                        
217         s32i    a2, a1, PT_WMASK        # need    
218                                                   
219         /* Save 4 registers at a time */          
220                                                   
221 1:      rotw    -1                                
222         s32i    a0, a5, PT_AREG_END - 16          
223         s32i    a1, a5, PT_AREG_END - 12          
224         s32i    a2, a5, PT_AREG_END - 8           
225         s32i    a3, a5, PT_AREG_END - 4           
226         addi    a0, a4, -1                        
227         addi    a1, a5, -16                       
228         _bnez   a0, 1b                            
229                                                   
230         /* WINDOWBASE still in SAR! */            
231                                                   
232         rsr     a2, sar                 # orig    
233         movi    a3, 1                             
234         ssl     a2                                
235         sll     a3, a3                            
236         wsr     a3, windowstart         # set     
237         wsr     a2, windowbase          # and     
238         rsync                                     
239                                                   
240         /* We are back to the original stack p    
241 #endif                                            
242         /* Now, jump to the common exception h    
243                                                   
244         j       common_exception                  
245                                                   
246 ENDPROC(user_exception)                           
247                                                   
248 /*                                                
249  * First-level exit handler for kernel excepti    
250  * Save special registers and the live window     
251  * Note: Even though we changes the stack poin    
252  *       MOVSP here, as we do that when we ret    
253  *       (See comment in the kernel exception     
254  *                                                
255  * Entry condition for kernel_exception:          
256  *                                                
257  *   a0:        trashed, original value saved     
258  *   a1:        a1                                
259  *   a2:        new stack pointer, original in    
260  *   a3:        a3                                
261  *   depc:      a2, original value saved on st    
262  *   excsave_1: dispatch table                    
263  *                                                
264  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS    
265  *           <  VALID_DOUBLE_EXCEPTION_ADDRESS    
266  *                                                
267  * Entry condition for _kernel_exception:         
268  *                                                
269  *   a0-a3 and depc have been saved to PT_AREG    
270  *   excsave has been restored, and               
271  *   stack pointer (a1) has been set.             
272  *                                                
273  * Note: _kernel_exception might be at an odd     
274  */                                               
275                                                   
276 ENTRY(kernel_exception)                           
277                                                   
278         /* Save a1, a2, a3, and set SP. */        
279                                                   
280         rsr     a0, depc                # get     
281         s32i    a1, a2, PT_AREG1                  
282         s32i    a0, a2, PT_AREG2                  
283         s32i    a3, a2, PT_AREG3                  
284         mov     a1, a2                            
285                                                   
286         .globl _kernel_exception                  
287 _kernel_exception:                                
288                                                   
289         /* Save SAR and turn off single steppi    
290                                                   
291         movi    a2, 0                             
292         rsr     a3, sar                           
293         xsr     a2, icountlevel                   
294         s32i    a3, a1, PT_SAR                    
295         s32i    a2, a1, PT_ICOUNTLEVEL            
296                                                   
297 #if defined(__XTENSA_WINDOWED_ABI__)              
298         /* Rotate ws so that the current windo    
299         /* Assume ws = xxwww1yyyy. Rotate ws r    
300                                                   
301         rsr     a2, windowbase          # don'    
302         rsr     a3, windowstart         # need    
303         ssr     a2                                
304         slli    a2, a3, 32-WSBITS                 
305         src     a2, a3, a2                        
306         srli    a2, a2, 32-WSBITS                 
307         s32i    a2, a1, PT_WMASK        # need    
308 #endif                                            
309                                                   
310         /* Save only the live window-frame */     
311                                                   
312 KABI_W  _bbsi.l a2, 1, 1f                         
313         s32i    a4, a1, PT_AREG4                  
314         s32i    a5, a1, PT_AREG5                  
315         s32i    a6, a1, PT_AREG6                  
316         s32i    a7, a1, PT_AREG7                  
317 KABI_W  _bbsi.l a2, 2, 1f                         
318         s32i    a8, a1, PT_AREG8                  
319         s32i    a9, a1, PT_AREG9                  
320         s32i    a10, a1, PT_AREG10                
321         s32i    a11, a1, PT_AREG11                
322 KABI_W  _bbsi.l a2, 3, 1f                         
323         s32i    a12, a1, PT_AREG12                
324         s32i    a13, a1, PT_AREG13                
325         s32i    a14, a1, PT_AREG14                
326         s32i    a15, a1, PT_AREG15                
327                                                   
328 #ifdef __XTENSA_WINDOWED_ABI__                    
329         _bnei   a2, 1, 1f                         
330         /* Copy spill slots of a0 and a1 to im    
331          * in order to keep exception stack co    
332          */                                       
333         l32i    a3, a1, PT_KERNEL_SIZE            
334         l32i    a0, a1, PT_KERNEL_SIZE + 4        
335         s32e    a3, a1, -16                       
336         s32e    a0, a1, -12                       
337 #endif                                            
338 1:                                                
339         l32i    a0, a1, PT_AREG0        # rest    
340         wsr     a0, depc                          
341                                                   
342 /*                                                
343  * This is the common exception handler.          
344  * We get here from the user exception handler    
345  * from the kernel exception handler.             
346  * Save the remaining special registers, switc    
347  * to the second-level exception handler.         
348  *                                                
349  */                                               
350                                                   
351 common_exception:                                 
352                                                   
353         /* Save some registers, disable loops     
354                                                   
355         rsr     a2, debugcause                    
356         rsr     a3, epc1                          
357         s32i    a2, a1, PT_DEBUGCAUSE             
358         s32i    a3, a1, PT_PC                     
359                                                   
360         movi    a2, NO_SYSCALL                    
361         rsr     a3, excvaddr                      
362         s32i    a2, a1, PT_SYSCALL                
363         movi    a2, 0                             
364         s32i    a3, a1, PT_EXCVADDR               
365 #if XCHAL_HAVE_LOOPS                              
366         xsr     a2, lcount                        
367         s32i    a2, a1, PT_LCOUNT                 
368 #endif                                            
369                                                   
370 #if XCHAL_HAVE_EXCLUSIVE                          
371         /* Clear exclusive access monitor set     
372         clrex                                     
373 #endif                                            
374                                                   
375         /* It is now save to restore the EXC_T    
376                                                   
377         rsr     a2, exccause                      
378         movi    a3, 0                             
379         rsr     a0, excsave1                      
380         s32i    a2, a1, PT_EXCCAUSE               
381         s32i    a3, a0, EXC_TABLE_FIXUP           
382                                                   
383         /* All unrecoverable states are saved     
384          * Now we can allow exceptions again.     
385          * PS.INTLEVEL is set to LOCKLEVEL dis    
386          * otherwise it's left unchanged.         
387          *                                        
388          * Set PS(EXCM = 0, UM = 0, RING = 0,     
389          */                                       
390                                                   
391         rsr     a3, ps                            
392         s32i    a3, a1, PT_PS           # save    
393                                                   
394 #if XTENSA_FAKE_NMI                               
395         /* Correct PS needs to be saved in the    
396          * - in case of exception or level-1 i    
397          *   and is already saved.                
398          * - in case of medium level interrupt    
399          */                                       
400         movi    a0, EXCCAUSE_MAPPED_NMI           
401         extui   a3, a3, PS_INTLEVEL_SHIFT, PS_    
402         beq     a2, a0, .Lmedium_level_irq        
403         bnei    a2, EXCCAUSE_LEVEL1_INTERRUPT,    
404         beqz    a3, .Llevel1_irq        # leve    
405                                                   
406 .Lmedium_level_irq:                               
407         rsr     a0, excsave2                      
408         s32i    a0, a1, PT_PS           # save    
409         bgei    a3, LOCKLEVEL, .Lexception        
410                                                   
411 .Llevel1_irq:                                     
412         movi    a3, LOCKLEVEL                     
413                                                   
414 .Lexception:                                      
415 KABI_W  movi    a0, PS_WOE_MASK                   
416 KABI_W  or      a3, a3, a0                        
417 #else                                             
418         addi    a2, a2, -EXCCAUSE_LEVEL1_INTER    
419         movi    a0, LOCKLEVEL                     
420         extui   a3, a3, PS_INTLEVEL_SHIFT, PS_    
421                                         # a3 =    
422         moveqz  a3, a0, a2              # a3 =    
423 KABI_W  movi    a2, PS_WOE_MASK                   
424 KABI_W  or      a3, a3, a2                        
425 #endif                                            
426                                                   
427         /* restore return address (or 0 if ret    
428         rsr     a0, depc                          
429         wsr     a3, ps                            
430         rsync                           # PS.W    
431                                                   
432         /* Save lbeg, lend */                     
433 #if XCHAL_HAVE_LOOPS                              
434         rsr     a4, lbeg                          
435         rsr     a3, lend                          
436         s32i    a4, a1, PT_LBEG                   
437         s32i    a3, a1, PT_LEND                   
438 #endif                                            
439                                                   
440         /* Save SCOMPARE1 */                      
441                                                   
442 #if XCHAL_HAVE_S32C1I                             
443         rsr     a3, scompare1                     
444         s32i    a3, a1, PT_SCOMPARE1              
445 #endif                                            
446                                                   
447         /* Save optional registers. */            
448                                                   
449         save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_X    
450                                                   
451 #ifdef CONFIG_TRACE_IRQFLAGS                      
452         rsr             abi_tmp0, ps              
453         extui           abi_tmp0, abi_tmp0, PS    
454         beqz            abi_tmp0, 1f              
455         abi_call        trace_hardirqs_off        
456 1:                                                
457 #endif                                            
458 #ifdef CONFIG_CONTEXT_TRACKING_USER               
459         l32i            abi_tmp0, a1, PT_PS       
460         bbci.l          abi_tmp0, PS_UM_BIT, 1    
461         abi_call        user_exit_callable        
462 1:                                                
463 #endif                                            
464                                                   
465         /* Go to second-level dispatcher. Set     
466          * exception handler and call the exce    
467          */                                       
468                                                   
469         l32i            abi_arg1, a1, PT_EXCCA    
470         rsr             abi_tmp0, excsave1        
471         addx4           abi_tmp0, abi_arg1, ab    
472         l32i            abi_tmp0, abi_tmp0, EX    
473         mov             abi_arg0, a1              
474                                                   
475         /* Call the second-level handler */       
476                                                   
477         abi_callx       abi_tmp0                  
478                                                   
479         /* Jump here for exception exit */        
480         .global common_exception_return           
481 common_exception_return:                          
482                                                   
483 #if XTENSA_FAKE_NMI                               
484         l32i            abi_tmp0, a1, PT_EXCCA    
485         movi            abi_tmp1, EXCCAUSE_MAP    
486         l32i            abi_saved1, a1, PT_PS     
487         beq             abi_tmp0, abi_tmp1, .L    
488 #endif                                            
489 .Ltif_loop:                                       
490         irq_save        abi_tmp0, abi_tmp1        
491 #ifdef CONFIG_TRACE_IRQFLAGS                      
492         abi_call        trace_hardirqs_off        
493 #endif                                            
494                                                   
495         /* Jump if we are returning from kerne    
496                                                   
497         l32i            abi_saved1, a1, PT_PS     
498         GET_THREAD_INFO(abi_tmp0, a1)             
499         l32i            abi_saved0, abi_tmp0,     
500         _bbci.l         abi_saved1, PS_UM_BIT,    
501                                                   
502         /* Specific to a user exception exit:     
503          * We need to check some flags for sig    
504          * and have to restore WB and WS, extr    
505          * in the register file that were in u    
506          * Note that we don't disable interrup    
507          */                                       
508                                                   
509         _bbsi.l         abi_saved0, TIF_NEED_R    
510         movi            abi_tmp0, _TIF_SIGPEND    
511         bnone           abi_saved0, abi_tmp0,     
512                                                   
513         l32i            abi_tmp0, a1, PT_DEPC     
514         bgeui           abi_tmp0, VALID_DOUBLE    
515                                                   
516         /* Call do_signal() */                    
517                                                   
518 #ifdef CONFIG_TRACE_IRQFLAGS                      
519         abi_call        trace_hardirqs_on         
520 #endif                                            
521         rsil            abi_tmp0, 0               
522         mov             abi_arg0, a1              
523         abi_call        do_notify_resume          
524         j               .Ltif_loop                
525                                                   
526 .Lresched:                                        
527 #ifdef CONFIG_TRACE_IRQFLAGS                      
528         abi_call        trace_hardirqs_on         
529 #endif                                            
530         rsil            abi_tmp0, 0               
531         abi_call        schedule        # void    
532         j               .Ltif_loop                
533                                                   
534 .Lexit_tif_loop_kernel:                           
535 #ifdef CONFIG_PREEMPTION                          
536         _bbci.l         abi_saved0, TIF_NEED_R    
537                                                   
538         /* Check current_thread_info->preempt_    
539                                                   
540         l32i            abi_tmp1, abi_tmp0, TI    
541         bnez            abi_tmp1, .Lrestore_st    
542         abi_call        preempt_schedule_irq      
543 #endif                                            
544         j               .Lrestore_state           
545                                                   
546 .Lexit_tif_loop_user:                             
547 #ifdef CONFIG_CONTEXT_TRACKING_USER               
548         abi_call        user_enter_callable       
549 #endif                                            
550 #ifdef CONFIG_HAVE_HW_BREAKPOINT                  
551         _bbci.l         abi_saved0, TIF_DB_DIS    
552         abi_call        restore_dbreak            
553 1:                                                
554 #endif                                            
555 #ifdef CONFIG_DEBUG_TLB_SANITY                    
556         l32i            abi_tmp0, a1, PT_DEPC     
557         bgeui           abi_tmp0, VALID_DOUBLE    
558         abi_call        check_tlb_sanity          
559 #endif                                            
560                                                   
561 .Lrestore_state:                                  
562 #ifdef CONFIG_TRACE_IRQFLAGS                      
563         extui           abi_tmp0, abi_saved1,     
564         bgei            abi_tmp0, LOCKLEVEL, 1    
565         abi_call        trace_hardirqs_on         
566 1:                                                
567 #endif                                            
568         /*                                        
569          * Restore optional registers.            
570          * abi_arg* are used as temporary regi    
571          */                                       
572                                                   
573         load_xtregs_opt a1 abi_tmp0 abi_arg0 a    
574                                                   
575         /* Restore SCOMPARE1 */                   
576                                                   
577 #if XCHAL_HAVE_S32C1I                             
578         l32i            abi_tmp0, a1, PT_SCOMP    
579         wsr             abi_tmp0, scompare1       
580 #endif                                            
581         wsr             abi_saved1, ps            
582         _bbci.l         abi_saved1, PS_UM_BIT,    
583                                                   
584 user_exception_exit:                              
585                                                   
586         /* Restore the state of the task and r    
587                                                   
588 #if defined(USER_SUPPORT_WINDOWED)                
589         /* Switch to the user thread WINDOWBAS    
590                                                   
591         l32i    a2, a1, PT_WINDOWBASE             
592         l32i    a3, a1, PT_WINDOWSTART            
593         wsr     a1, depc                # use     
594         wsr     a3, windowstart         # rest    
595         ssr     a2                      # pres    
596         wsr     a2, windowbase          # swit    
597         rsync                                     
598         rsr     a1, depc                # rest    
599         l32i    a2, a1, PT_WMASK        # regi    
600         rotw    -1                      # we r    
601         _bltui  a6, 16, .Lclear_regs    # only    
602                                                   
603         /* The working registers are a0 and a3    
604          * a4..a7.  Be careful not to destroy     
605          * Note: wmask has the format YYYYM:      
606          *       Y: number of registers saved     
607          *       M: 4 bit mask of first 16 reg    
608          */                                       
609                                                   
610         mov     a2, a6                            
611         mov     a3, a5                            
612                                                   
613 1:      rotw    -1                      # a0..    
614         addi    a3, a7, -4*4            # next    
615         addi    a2, a6, -16             # decr    
616         l32i    a4, a3, PT_AREG_END + 0           
617         l32i    a5, a3, PT_AREG_END + 4           
618         l32i    a6, a3, PT_AREG_END + 8           
619         l32i    a7, a3, PT_AREG_END + 12          
620         _bgeui  a2, 16, 1b                        
621                                                   
622         /* Clear unrestored registers (don't l    
623                                                   
624 .Lclear_regs:                                     
625         rsr     a0, windowbase                    
626         rsr     a3, sar                           
627         sub     a3, a0, a3                        
628         beqz    a3, 2f                            
629         extui   a3, a3, 0, WBBITS                 
630                                                   
631 1:      rotw    -1                                
632         addi    a3, a7, -1                        
633         movi    a4, 0                             
634         movi    a5, 0                             
635         movi    a6, 0                             
636         movi    a7, 0                             
637         bgei    a3, 1, 1b                         
638                                                   
639         /* We are back were we were when we st    
640          * Note: a2 still contains WMASK (if w    
641          *       frame where we had loaded a2)    
642          *       (if we have restored WSBITS-1    
643          */                                       
644 2:                                                
645 #else                                             
646         movi    a2, 1                             
647 #endif                                            
648 #if XCHAL_HAVE_THREADPTR                          
649         l32i    a3, a1, PT_THREADPTR              
650         wur     a3, threadptr                     
651 #endif                                            
652                                                   
653         j       common_exception_exit             
654                                                   
655         /* This is the kernel exception exit.     
656          * We avoided to do a MOVSP when we en    
657          * have to do it here.                    
658          */                                       
659                                                   
660 kernel_exception_exit:                            
661                                                   
662 #if defined(__XTENSA_WINDOWED_ABI__)              
663         /* Check if we have to do a movsp.        
664          *                                        
665          * We only have to do a movsp if the p    
666          * been spilled to the *temporary* exc    
667          * task's stack. This is the case if t    
668          * WINDOWSTART for the previous window    
669          * (not spilled) but is zero now (spil    
670          * If this bit is zero, all other bits    
671          * current window frame are also zero.    
672          * 'and' WINDOWSTART and WINDOWSTART-1    
673          *                                        
674          *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]*     
675          *                                        
676          * The result is zero only if one bit     
677          *                                        
678          * (Note: We might have gone through s    
679          *        we come back to the current     
680          *        different from the time the     
681          */                                       
682                                                   
683         /* Test WINDOWSTART before and after t    
684          * We actually have WMASK, so we only     
685          */                                       
686                                                   
687         l32i    a2, a1, PT_WMASK                  
688         _beqi   a2, 1, common_exception_exit      
689                                                   
690         /* Test WINDOWSTART now. If spilled, d    
691                                                   
692         rsr     a3, windowstart                   
693         addi    a0, a3, -1                        
694         and     a3, a3, a0                        
695         _bnez   a3, common_exception_exit         
696                                                   
697         /* Do a movsp (we returned from a call    
698                                                   
699         addi    a0, a1, -16                       
700         l32i    a3, a0, 0                         
701         l32i    a4, a0, 4                         
702         s32i    a3, a1, PT_KERNEL_SIZE + 0        
703         s32i    a4, a1, PT_KERNEL_SIZE + 4        
704         l32i    a3, a0, 8                         
705         l32i    a4, a0, 12                        
706         s32i    a3, a1, PT_KERNEL_SIZE + 8        
707         s32i    a4, a1, PT_KERNEL_SIZE + 12       
708                                                   
709         /* Common exception exit.                 
710          * We restore the special register and    
711          * return from the exception.             
712          *                                        
713          * Note: We expect a2 to hold PT_WMASK    
714          */                                       
715 #else                                             
716         movi    a2, 1                             
717 #endif                                            
718                                                   
719 common_exception_exit:                            
720                                                   
721         /* Restore address registers. */          
722                                                   
723         _bbsi.l a2, 1, 1f                         
724         l32i    a4,  a1, PT_AREG4                 
725         l32i    a5,  a1, PT_AREG5                 
726         l32i    a6,  a1, PT_AREG6                 
727         l32i    a7,  a1, PT_AREG7                 
728         _bbsi.l a2, 2, 1f                         
729         l32i    a8,  a1, PT_AREG8                 
730         l32i    a9,  a1, PT_AREG9                 
731         l32i    a10, a1, PT_AREG10                
732         l32i    a11, a1, PT_AREG11                
733         _bbsi.l a2, 3, 1f                         
734         l32i    a12, a1, PT_AREG12                
735         l32i    a13, a1, PT_AREG13                
736         l32i    a14, a1, PT_AREG14                
737         l32i    a15, a1, PT_AREG15                
738                                                   
739         /* Restore PC, SAR */                     
740                                                   
741 1:      l32i    a2, a1, PT_PC                     
742         l32i    a3, a1, PT_SAR                    
743         wsr     a2, epc1                          
744         wsr     a3, sar                           
745                                                   
746         /* Restore LBEG, LEND, LCOUNT */          
747 #if XCHAL_HAVE_LOOPS                              
748         l32i    a2, a1, PT_LBEG                   
749         l32i    a3, a1, PT_LEND                   
750         wsr     a2, lbeg                          
751         l32i    a2, a1, PT_LCOUNT                 
752         wsr     a3, lend                          
753         wsr     a2, lcount                        
754 #endif                                            
755                                                   
756         /* We control single stepping through     
757                                                   
758         l32i    a2, a1, PT_ICOUNTLEVEL            
759         movi    a3, -2                            
760         wsr     a2, icountlevel                   
761         wsr     a3, icount                        
762                                                   
763         /* Check if it was double exception. *    
764                                                   
765         l32i    a0, a1, PT_DEPC                   
766         l32i    a3, a1, PT_AREG3                  
767         l32i    a2, a1, PT_AREG2                  
768         _bgeui  a0, VALID_DOUBLE_EXCEPTION_ADD    
769                                                   
770         /* Restore a0...a3 and return */          
771                                                   
772         l32i    a0, a1, PT_AREG0                  
773         l32i    a1, a1, PT_AREG1                  
774         rfe                                       
775                                                   
776 1:      wsr     a0, depc                          
777         l32i    a0, a1, PT_AREG0                  
778         l32i    a1, a1, PT_AREG1                  
779         rfde                                      
780                                                   
781 ENDPROC(kernel_exception)                         
782                                                   
783 /*                                                
784  * Debug exception handler.                       
785  *                                                
786  * Currently, we don't support KGDB, so only u    
787  *                                                
788  * When we get here,  a0 is trashed and saved     
789  */                                               
790                                                   
791         .literal_position                         
792                                                   
793 ENTRY(debug_exception)                            
794                                                   
795         rsr     a0, SREG_EPS + XCHAL_DEBUGLEVE    
796         bbsi.l  a0, PS_EXCM_BIT, .Ldebug_excep    
797                                                   
798         /* Set EPC1 and EXCCAUSE */               
799                                                   
800         wsr     a2, depc                # save    
801         rsr     a2, SREG_EPC + XCHAL_DEBUGLEVE    
802         wsr     a2, epc1                          
803                                                   
804         movi    a2, EXCCAUSE_MAPPED_DEBUG         
805         wsr     a2, exccause                      
806                                                   
807         /* Restore PS to the value before the     
808                                                   
809         movi    a2, 1 << PS_EXCM_BIT              
810         or      a2, a0, a2                        
811         wsr     a2, ps                            
812                                                   
813         /* Switch to kernel/user stack, restor    
814                                                   
815         bbsi.l  a2, PS_UM_BIT, .Ldebug_excepti    
816         addi    a2, a1, -16 - PT_KERNEL_SIZE      
817                                                   
818 .Ldebug_exception_continue:                       
819         l32i    a0, a3, DT_DEBUG_SAVE             
820         s32i    a1, a2, PT_AREG1                  
821         s32i    a0, a2, PT_AREG0                  
822         movi    a0, 0                             
823         s32i    a0, a2, PT_DEPC         # mark    
824         xsr     a3, SREG_EXCSAVE + XCHAL_DEBUG    
825         xsr     a0, depc                          
826         s32i    a3, a2, PT_AREG3                  
827         s32i    a0, a2, PT_AREG2                  
828         mov     a1, a2                            
829                                                   
830         /* Debug exception is handled as an ex    
831          * likely be enabled in the common exc    
832          * preemption if we have HW breakpoint    
833          * meaning.                               
834          */                                       
835 #if defined(CONFIG_PREEMPT_COUNT) && defined(C    
836         GET_THREAD_INFO(a2, a1)                   
837         l32i    a3, a2, TI_PRE_COUNT              
838         addi    a3, a3, 1                         
839         s32i    a3, a2, TI_PRE_COUNT              
840 #endif                                            
841                                                   
842         rsr     a2, ps                            
843         bbsi.l  a2, PS_UM_BIT, _user_exception    
844         j       _kernel_exception                 
845                                                   
846 .Ldebug_exception_user:                           
847         rsr     a2, excsave1                      
848         l32i    a2, a2, EXC_TABLE_KSTK  # load    
849         j       .Ldebug_exception_continue        
850                                                   
851 .Ldebug_exception_in_exception:                   
852 #ifdef CONFIG_HAVE_HW_BREAKPOINT                  
853         /* Debug exception while in exception     
854          * window overflow/underflow handler o    
855          * data breakpoint, in which case save    
856          * breakpoints, single-step faulting i    
857          * breakpoints.                           
858          */                                       
859                                                   
860         bbci.l  a0, PS_UM_BIT, .Ldebug_excepti    
861                                                   
862         rsr     a0, debugcause                    
863         bbsi.l  a0, DEBUGCAUSE_DBREAK_BIT, .Ld    
864                                                   
865         .set    _index, 0                         
866         .rept   XCHAL_NUM_DBREAK                  
867         l32i    a0, a3, DT_DBREAKC_SAVE + _ind    
868         wsr     a0, SREG_DBREAKC + _index         
869         .set    _index, _index + 1                
870         .endr                                     
871                                                   
872         l32i    a0, a3, DT_ICOUNT_LEVEL_SAVE      
873         wsr     a0, icountlevel                   
874                                                   
875         l32i    a0, a3, DT_ICOUNT_SAVE            
876         xsr     a0, icount                        
877                                                   
878         l32i    a0, a3, DT_DEBUG_SAVE             
879         xsr     a3, SREG_EXCSAVE + XCHAL_DEBUG    
880         rfi     XCHAL_DEBUGLEVEL                  
881                                                   
882 .Ldebug_save_dbreak:                              
883         .set    _index, 0                         
884         .rept   XCHAL_NUM_DBREAK                  
885         movi    a0, 0                             
886         xsr     a0, SREG_DBREAKC + _index         
887         s32i    a0, a3, DT_DBREAKC_SAVE + _ind    
888         .set    _index, _index + 1                
889         .endr                                     
890                                                   
891         movi    a0, XCHAL_EXCM_LEVEL + 1          
892         xsr     a0, icountlevel                   
893         s32i    a0, a3, DT_ICOUNT_LEVEL_SAVE      
894                                                   
895         movi    a0, 0xfffffffe                    
896         xsr     a0, icount                        
897         s32i    a0, a3, DT_ICOUNT_SAVE            
898                                                   
899         l32i    a0, a3, DT_DEBUG_SAVE             
900         xsr     a3, SREG_EXCSAVE + XCHAL_DEBUG    
901         rfi     XCHAL_DEBUGLEVEL                  
902 #else                                             
903         /* Debug exception while in exception     
904         j       .Ldebug_exception_in_exception    
905 #endif                                            
906                                                   
907 ENDPROC(debug_exception)                          
908                                                   
909 /*                                                
910  * We get here in case of an unrecoverable exc    
911  * The only thing we can do is to be nice and     
912  * We only produce a single stack frame for pa    
913  *                                                
914  *                                                
915  * Entry conditions:                              
916  *                                                
917  *   - a0 contains the caller address; origina    
918  *   - the original a0 contains a valid return    
919  *   - a2 contains a valid stackpointer           
920  *                                                
921  * Notes:                                         
922  *                                                
923  *   - If the stack pointer could be invalid,     
924  *     dummy stack pointer (e.g. the stack of     
925  *                                                
926  *   - If the return address could be invalid,    
927  *     to 0, so the backtrace would stop.         
928  *                                                
929  */                                               
930         .align 4                                  
931 unrecoverable_text:                               
932         .ascii "Unrecoverable error in excepti    
933                                                   
934         .literal_position                         
935                                                   
936 ENTRY(unrecoverable_exception)                    
937                                                   
938 #if XCHAL_HAVE_WINDOWED                           
939         movi    a0, 1                             
940         movi    a1, 0                             
941                                                   
942         wsr     a0, windowstart                   
943         wsr     a1, windowbase                    
944         rsync                                     
945 #endif                                            
946                                                   
947         movi    a1, KERNEL_PS_WOE_MASK | LOCKL    
948         wsr     a1, ps                            
949         rsync                                     
950                                                   
951         movi    a1, init_task                     
952         movi    a0, 0                             
953         addi    a1, a1, PT_REGS_OFFSET            
954                                                   
955         movi    abi_arg0, unrecoverable_text      
956         abi_call        panic                     
957                                                   
958 1:      j       1b                                
959                                                   
960 ENDPROC(unrecoverable_exception)                  
961                                                   
962 /* -------------------------- FAST EXCEPTION H    
963                                                   
964         __XTENSA_HANDLER                          
965         .literal_position                         
966                                                   
967 #ifdef SUPPORT_WINDOWED                           
968 /*                                                
969  * Fast-handler for alloca exceptions             
970  *                                                
971  *  The ALLOCA handler is entered when user co    
972  *  instruction and the caller's frame is not     
973  *                                                
974  * This algorithm was taken from the Ross Morl    
975  *                                                
976  *    /home/ross/rtos/porting/XtensaRTOS-Porti    
977  *                                                
978  * It leverages the existing window spill/fill    
979  * double exceptions. The 'movsp' instruction     
980  * the next window needs to be loaded. In fact    
981  * replaced at some point by changing the hard    
982  * of the proper size instead.                    
983  *                                                
984  * This algorithm simply backs out the registe    
985  * exception handler, makes it appear that we     
986  * by rotating the window back and then settin    
987  * the 'ps' register with the rolled back wind    
988  * will be re-executed and this time since the    
989  * active AR registers it won't cause an excep    
990  *                                                
991  * If the WindowUnderflow code gets a TLB miss    
992  * the partial WindowUnderflow will be handled    
993  * handler.                                       
994  *                                                
995  * Entry condition:                               
996  *                                                
997  *   a0:        trashed, original value saved     
998  *   a1:        a1                                
999  *   a2:        new stack pointer, original in    
1000  *   a3:        a3                               
1001  *   depc:      a2, original value saved on s    
1002  *   excsave_1: dispatch table                   
1003  *                                               
1004  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES    
1005  *           <  VALID_DOUBLE_EXCEPTION_ADDRES    
1006  */                                              
1007                                                  
1008 ENTRY(fast_alloca)                               
1009         rsr     a0, windowbase                   
1010         rotw    -1                               
1011         rsr     a2, ps                           
1012         extui   a3, a2, PS_OWB_SHIFT, PS_OWB_    
1013         xor     a3, a3, a4                       
1014         l32i    a4, a6, PT_AREG0                 
1015         l32i    a1, a6, PT_DEPC                  
1016         rsr     a6, depc                         
1017         wsr     a1, depc                         
1018         slli    a3, a3, PS_OWB_SHIFT             
1019         xor     a2, a2, a3                       
1020         wsr     a2, ps                           
1021         rsync                                    
1022                                                  
1023         _bbci.l a4, 31, 4f                       
1024         rotw    -1                               
1025         _bbci.l a8, 30, 8f                       
1026         rotw    -1                               
1027         j       _WindowUnderflow12               
1028 8:      j       _WindowUnderflow8                
1029 4:      j       _WindowUnderflow4                
1030 ENDPROC(fast_alloca)                             
1031 #endif                                           
1032                                                  
1033 #ifdef CONFIG_USER_ABI_CALL0_PROBE               
1034 /*                                               
1035  * fast illegal instruction handler.             
1036  *                                               
1037  * This is used to fix up user PS.WOE on the     
1038  * by the first opcode related to register wi    
1039  * already set it goes directly to the common    
1040  *                                               
1041  * Entry condition:                              
1042  *                                               
1043  *   a0:        trashed, original value saved    
1044  *   a1:        a1                               
1045  *   a2:        new stack pointer, original i    
1046  *   a3:        a3                               
1047  *   depc:      a2, original value saved on s    
1048  *   excsave_1: dispatch table                   
1049  */                                              
1050                                                  
1051 ENTRY(fast_illegal_instruction_user)             
1052                                                  
1053         rsr     a0, ps                           
1054         bbsi.l  a0, PS_WOE_BIT, 1f               
1055         s32i    a3, a2, PT_AREG3                 
1056         movi    a3, PS_WOE_MASK                  
1057         or      a0, a0, a3                       
1058         wsr     a0, ps                           
1059 #ifdef CONFIG_USER_ABI_CALL0_PROBE               
1060         GET_THREAD_INFO(a3, a2)                  
1061         rsr     a0, epc1                         
1062         s32i    a0, a3, TI_PS_WOE_FIX_ADDR       
1063 #endif                                           
1064         l32i    a3, a2, PT_AREG3                 
1065         l32i    a0, a2, PT_AREG0                 
1066         rsr     a2, depc                         
1067         rfe                                      
1068 1:                                               
1069         call0   user_exception                   
1070                                                  
1071 ENDPROC(fast_illegal_instruction_user)           
1072 #endif                                           
1073                                                  
1074         /*                                       
1075  * fast system calls.                            
1076  *                                               
1077  * WARNING:  The kernel doesn't save the enti    
1078  * handling a fast system call.  These functi    
1079  * usually offering some functionality not av    
1080  *                                               
1081  * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.    
1082  *                                               
1083  * Entry condition:                              
1084  *                                               
1085  *   a0:        trashed, original value saved    
1086  *   a1:        a1                               
1087  *   a2:        new stack pointer, original i    
1088  *   a3:        a3                               
1089  *   depc:      a2, original value saved on s    
1090  *   excsave_1: dispatch table                   
1091  */                                              
1092                                                  
1093 ENTRY(fast_syscall_user)                         
1094                                                  
1095         /* Skip syscall. */                      
1096                                                  
1097         rsr     a0, epc1                         
1098         addi    a0, a0, 3                        
1099         wsr     a0, epc1                         
1100                                                  
1101         l32i    a0, a2, PT_DEPC                  
1102         bgeui   a0, VALID_DOUBLE_EXCEPTION_AD    
1103                                                  
1104         rsr     a0, depc                         
1105         _beqz   a0, fast_syscall_spill_regist    
1106         _beqi   a0, __NR_xtensa, fast_syscall    
1107                                                  
1108         call0   user_exception                   
1109                                                  
1110 ENDPROC(fast_syscall_user)                       
1111                                                  
1112 ENTRY(fast_syscall_unrecoverable)                
1113                                                  
1114         /* Restore all states. */                
1115                                                  
1116         l32i    a0, a2, PT_AREG0        # res    
1117         xsr     a2, depc                # res    
1118                                                  
1119         wsr     a0, excsave1                     
1120         call0   unrecoverable_exception          
1121                                                  
1122 ENDPROC(fast_syscall_unrecoverable)              
1123                                                  
1124 /*                                               
1125  * sysxtensa syscall handler                     
1126  *                                               
1127  * int sysxtensa (SYS_XTENSA_ATOMIC_SET,         
1128  * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,         
1129  * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD,     
1130  * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP,     
1131  *        a2            a6                       
1132  *                                               
1133  * Entry condition:                              
1134  *                                               
1135  *   a0:        a2 (syscall-nr), original val    
1136  *   a1:        a1                               
1137  *   a2:        new stack pointer, original i    
1138  *   a3:        a3                               
1139  *   a4..a15:   unchanged                        
1140  *   depc:      a2, original value saved on s    
1141  *   excsave_1: dispatch table                   
1142  *                                               
1143  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES    
1144  *           <  VALID_DOUBLE_EXCEPTION_ADDRES    
1145  *                                               
1146  * Note: we don't have to save a2; a2 holds t    
1147  */                                              
1148                                                  
1149         .literal_position                        
1150                                                  
1151 #ifdef CONFIG_FAST_SYSCALL_XTENSA                
1152                                                  
1153 ENTRY(fast_syscall_xtensa)                       
1154                                                  
1155         s32i    a7, a2, PT_AREG7        # we     
1156         movi    a7, 4                   # siz    
1157         access_ok a3, a7, a0, a2, .Leac # a0:    
1158                                                  
1159         _bgeui  a6, SYS_XTENSA_COUNT, .Lill      
1160         _bnei   a6, SYS_XTENSA_ATOMIC_CMP_SWP    
1161                                                  
1162         /* Fall through for ATOMIC_CMP_SWP. *    
1163                                                  
1164 .Lswp:  /* Atomic compare and swap */            
1165                                                  
1166 EX(.Leac) l32i  a0, a3, 0               # rea    
1167         bne     a0, a4, 1f              # sam    
1168 EX(.Leac) s32i  a5, a3, 0               # dif    
1169         l32i    a7, a2, PT_AREG7        # res    
1170         l32i    a0, a2, PT_AREG0        # res    
1171         movi    a2, 1                   # and    
1172         rfe                                      
1173                                                  
1174 1:      l32i    a7, a2, PT_AREG7        # res    
1175         l32i    a0, a2, PT_AREG0        # res    
1176         movi    a2, 0                   # ret    
1177         rfe                                      
1178                                                  
1179 .Lnswp: /* Atomic set, add, and exg_add. */      
1180                                                  
1181 EX(.Leac) l32i  a7, a3, 0               # ori    
1182         addi    a6, a6, -SYS_XTENSA_ATOMIC_SE    
1183         add     a0, a4, a7              # + a    
1184         moveqz  a0, a4, a6              # set    
1185         addi    a6, a6, SYS_XTENSA_ATOMIC_SET    
1186 EX(.Leac) s32i  a0, a3, 0               # wri    
1187                                                  
1188         mov     a0, a2                           
1189         mov     a2, a7                           
1190         l32i    a7, a0, PT_AREG7        # res    
1191         l32i    a0, a0, PT_AREG0        # res    
1192         rfe                                      
1193                                                  
1194 .Leac:  l32i    a7, a2, PT_AREG7        # res    
1195         l32i    a0, a2, PT_AREG0        # res    
1196         movi    a2, -EFAULT                      
1197         rfe                                      
1198                                                  
1199 .Lill:  l32i    a7, a2, PT_AREG7        # res    
1200         l32i    a0, a2, PT_AREG0        # res    
1201         movi    a2, -EINVAL                      
1202         rfe                                      
1203                                                  
1204 ENDPROC(fast_syscall_xtensa)                     
1205                                                  
1206 #else /* CONFIG_FAST_SYSCALL_XTENSA */           
1207                                                  
1208 ENTRY(fast_syscall_xtensa)                       
1209                                                  
1210         l32i    a0, a2, PT_AREG0        # res    
1211         movi    a2, -ENOSYS                      
1212         rfe                                      
1213                                                  
1214 ENDPROC(fast_syscall_xtensa)                     
1215                                                  
1216 #endif /* CONFIG_FAST_SYSCALL_XTENSA */          
1217                                                  
1218                                                  
1219 /* fast_syscall_spill_registers.                 
1220  *                                               
1221  * Entry condition:                              
1222  *                                               
1223  *   a0:        trashed, original value saved    
1224  *   a1:        a1                               
1225  *   a2:        new stack pointer, original i    
1226  *   a3:        a3                               
1227  *   depc:      a2, original value saved on s    
1228  *   excsave_1: dispatch table                   
1229  *                                               
1230  * Note: We assume the stack pointer is EXC_T    
1231  */                                              
1232                                                  
1233 #if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTE    
1234                 defined(USER_SUPPORT_WINDOWED    
1235                                                  
1236 ENTRY(fast_syscall_spill_registers)              
1237                                                  
1238         /* Register a FIXUP handler (pass cur    
1239                                                  
1240         xsr     a3, excsave1                     
1241         movi    a0, fast_syscall_spill_regist    
1242         s32i    a0, a3, EXC_TABLE_FIXUP          
1243         rsr     a0, windowbase                   
1244         s32i    a0, a3, EXC_TABLE_PARAM          
1245         xsr     a3, excsave1            # res    
1246                                                  
1247         /* Save a3, a4 and SAR on stack. */      
1248                                                  
1249         rsr     a0, sar                          
1250         s32i    a3, a2, PT_AREG3                 
1251         s32i    a0, a2, PT_SAR                   
1252                                                  
1253         /* The spill routine might clobber a4    
1254                                                  
1255         s32i    a4, a2, PT_AREG4                 
1256         s32i    a7, a2, PT_AREG7                 
1257         s32i    a8, a2, PT_AREG8                 
1258         s32i    a11, a2, PT_AREG11               
1259         s32i    a12, a2, PT_AREG12               
1260         s32i    a15, a2, PT_AREG15               
1261                                                  
1262         /*                                       
1263          * Rotate ws so that the current wind    
1264          * Assume ws = xxxwww1yy (www1 curren    
1265          * Rotate ws right so that a4 = yyxxx    
1266          */                                      
1267                                                  
1268         rsr     a0, windowbase                   
1269         rsr     a3, windowstart         # a3     
1270         ssr     a0                      # hol    
1271         slli    a0, a3, WSBITS                   
1272         or      a3, a3, a0              # a3     
1273         srl     a3, a3                  # a3     
1274                                                  
1275         /* We are done if there are no more t    
1276                                                  
1277         extui   a3, a3, 1, WSBITS-1     # a3     
1278         movi    a0, (1 << (WSBITS-1))            
1279         _beqz   a3, .Lnospill           # onl    
1280                                                  
1281         /* We want 1 at the top, so that we r    
1282                                                  
1283         or      a3, a3, a0              # 1yy    
1284                                                  
1285         /* Skip empty frames - get 'oldest' W    
1286                                                  
1287         wsr     a3, windowstart         # sav    
1288         neg     a0, a3                           
1289         and     a3, a0, a3              # fir    
1290                                                  
1291         ffs_ws  a0, a3                  # a0:    
1292         movi    a3, WSBITS                       
1293         sub     a0, a3, a0              # WSB    
1294         ssr     a0                      # sav    
1295                                                  
1296         rsr     a3, windowbase                   
1297         add     a3, a3, a0                       
1298         wsr     a3, windowbase                   
1299         rsync                                    
1300                                                  
1301         rsr     a3, windowstart                  
1302         srl     a3, a3                  # shi    
1303                                                  
1304         /* WB is now just one frame below the    
1305            window. WS is shifted so the oldes    
1306            and WS differ by one 4-register fr    
1307                                                  
1308         /* Save frames. Depending what call w    
1309          * we have to save 4,8. or 12 registe    
1310          */                                      
1311                                                  
1312                                                  
1313 .Lloop: _bbsi.l a3, 1, .Lc4                      
1314         _bbci.l a3, 2, .Lc12                     
1315                                                  
1316 .Lc8:   s32e    a4, a13, -16                     
1317         l32e    a4, a5, -12                      
1318         s32e    a8, a4, -32                      
1319         s32e    a5, a13, -12                     
1320         s32e    a6, a13, -8                      
1321         s32e    a7, a13, -4                      
1322         s32e    a9, a4, -28                      
1323         s32e    a10, a4, -24                     
1324         s32e    a11, a4, -20                     
1325         srli    a11, a3, 2              # shi    
1326         rotw    2                                
1327         _bnei   a3, 1, .Lloop                    
1328         j       .Lexit                           
1329                                                  
1330 .Lc4:   s32e    a4, a9, -16                      
1331         s32e    a5, a9, -12                      
1332         s32e    a6, a9, -8                       
1333         s32e    a7, a9, -4                       
1334                                                  
1335         srli    a7, a3, 1                        
1336         rotw    1                                
1337         _bnei   a3, 1, .Lloop                    
1338         j       .Lexit                           
1339                                                  
1340 .Lc12:  _bbci.l a3, 3, .Linvalid_mask   # bit    
1341                                                  
1342         /* 12-register frame (call12) */         
1343                                                  
1344         l32e    a0, a5, -12                      
1345         s32e    a8, a0, -48                      
1346         mov     a8, a0                           
1347                                                  
1348         s32e    a9, a8, -44                      
1349         s32e    a10, a8, -40                     
1350         s32e    a11, a8, -36                     
1351         s32e    a12, a8, -32                     
1352         s32e    a13, a8, -28                     
1353         s32e    a14, a8, -24                     
1354         s32e    a15, a8, -20                     
1355         srli    a15, a3, 3                       
1356                                                  
1357         /* The stack pointer for a4..a7 is ou    
1358          * window, grab the stackpointer, and    
1359          * Alternatively, we could also use t    
1360          * makes the fixup routine much more     
1361          * rotw 1                                
1362          * s32e a0, a13, -16                     
1363          * ...                                   
1364          * rotw 2                                
1365          */                                      
1366                                                  
1367         rotw    1                                
1368         mov     a4, a13                          
1369         rotw    -1                               
1370                                                  
1371         s32e    a4, a8, -16                      
1372         s32e    a5, a8, -12                      
1373         s32e    a6, a8, -8                       
1374         s32e    a7, a8, -4                       
1375                                                  
1376         rotw    3                                
1377                                                  
1378         _beqi   a3, 1, .Lexit                    
1379         j       .Lloop                           
1380                                                  
1381 .Lexit:                                          
1382                                                  
1383         /* Done. Do the final rotation and se    
1384                                                  
1385         rotw    1                                
1386         rsr     a3, windowbase                   
1387         ssl     a3                               
1388         movi    a3, 1                            
1389         sll     a3, a3                           
1390         wsr     a3, windowstart                  
1391 .Lnospill:                                       
1392                                                  
1393         /* Advance PC, restore registers and     
1394                                                  
1395         l32i    a3, a2, PT_SAR                   
1396         l32i    a0, a2, PT_AREG0                 
1397         wsr     a3, sar                          
1398         l32i    a3, a2, PT_AREG3                 
1399                                                  
1400         /* Restore clobbered registers. */       
1401                                                  
1402         l32i    a4, a2, PT_AREG4                 
1403         l32i    a7, a2, PT_AREG7                 
1404         l32i    a8, a2, PT_AREG8                 
1405         l32i    a11, a2, PT_AREG11               
1406         l32i    a12, a2, PT_AREG12               
1407         l32i    a15, a2, PT_AREG15               
1408                                                  
1409         movi    a2, 0                            
1410         rfe                                      
1411                                                  
1412 .Linvalid_mask:                                  
1413                                                  
1414         /* We get here because of an unrecove    
1415          * registers, so set up a dummy frame    
1416          * Note: We assume EXC_TABLE_KSTK con    
1417          */                                      
1418                                                  
1419         movi    a0, 1                            
1420         movi    a1, 0                            
1421                                                  
1422         wsr     a0, windowstart                  
1423         wsr     a1, windowbase                   
1424         rsync                                    
1425                                                  
1426         movi    a0, 0                            
1427                                                  
1428         rsr     a3, excsave1                     
1429         l32i    a1, a3, EXC_TABLE_KSTK           
1430                                                  
1431         movi    a4, KERNEL_PS_WOE_MASK | LOCK    
1432         wsr     a4, ps                           
1433         rsync                                    
1434                                                  
1435         movi    abi_arg0, SIGSEGV                
1436         abi_call        make_task_dead           
1437                                                  
1438         /* shouldn't return, so panic */         
1439                                                  
1440         wsr     a0, excsave1                     
1441         call0   unrecoverable_exception          
1442 1:      j       1b                               
1443                                                  
1444                                                  
1445 ENDPROC(fast_syscall_spill_registers)            
1446                                                  
1447 /* Fixup handler.                                
1448  *                                               
1449  * We get here if the spill routine causes an    
1450  * We basically restore WINDOWBASE and WINDOW    
1451  * we entered the spill routine and jump to t    
1452  *                                               
1453  * Note that we only need to restore the bits    
1454  * been spilled yet by the _spill_register ro    
1455  * rotated windowstart with only those bits s    
1456  * spilled yet. Because a3 is rotated such th    
1457  * frame for the current windowbase - 1, we n    
1458  * value of the current windowbase + 1 and mo    
1459  *                                               
1460  * a0: value of depc, original value in depc     
1461  * a2: trashed, original value in EXC_TABLE_D    
1462  * a3: exctable, original value in excsave1      
1463  */                                              
1464                                                  
1465 ENTRY(fast_syscall_spill_registers_fixup)        
1466                                                  
1467         rsr     a2, windowbase  # get current    
1468         xsr     a0, depc        # restore dep    
1469         ssl     a2              # set shift (    
1470                                                  
1471         /* We need to make sure the current r    
1472          * To do this, we simply set the bit     
1473          * in WS, so that the exception handl    
1474          *                                       
1475          * Note: we use a3 to set the windowb    
1476          * of it, saving it in the original _    
1477          * the exception handler call.           
1478          */                                      
1479                                                  
1480         xsr     a3, excsave1    # get spill-m    
1481         slli    a3, a3, 1       # shift left     
1482         addi    a3, a3, 1       # set the bit    
1483                                                  
1484         slli    a2, a3, 32-WSBITS                
1485         src     a2, a3, a2      # a2 = xxwww1    
1486         wsr     a2, windowstart # set correct    
1487                                                  
1488         srli    a3, a3, 1                        
1489         rsr     a2, excsave1                     
1490         l32i    a2, a2, EXC_TABLE_DOUBLE_SAVE    
1491         xsr     a2, excsave1                     
1492         s32i    a3, a2, EXC_TABLE_DOUBLE_SAVE    
1493         l32i    a3, a2, EXC_TABLE_PARAM # ori    
1494         xsr     a2, excsave1                     
1495                                                  
1496         /* Return to the original (user task)    
1497          * We leave the following frame behin    
1498          * a0, a1, a2   same                     
1499          * a3:          trashed (saved in EXC    
1500          * depc:        depc (we have to retu    
1501          * excsave_1:   exctable                 
1502          */                                      
1503                                                  
1504         wsr     a3, windowbase                   
1505         rsync                                    
1506                                                  
1507         /* We are now in the original frame w    
1508          *  a0: return address                   
1509          *  a1: used, stack pointer              
1510          *  a2: kernel stack pointer             
1511          *  a3: available                        
1512          *  depc: exception address              
1513          *  excsave: exctable                    
1514          * Note: This frame might be the same    
1515          */                                      
1516                                                  
1517         /* Setup stack pointer. */               
1518                                                  
1519         addi    a2, a2, -PT_USER_SIZE            
1520         s32i    a0, a2, PT_AREG0                 
1521                                                  
1522         /* Make sure we return to this fixup     
1523                                                  
1524         movi    a3, fast_syscall_spill_regist    
1525         s32i    a3, a2, PT_DEPC         # set    
1526                                                  
1527         /* Jump to the exception handler. */     
1528                                                  
1529         rsr     a3, excsave1                     
1530         rsr     a0, exccause                     
1531         addx4   a0, a0, a3                       
1532         l32i    a0, a0, EXC_TABLE_FAST_USER      
1533         l32i    a3, a3, EXC_TABLE_DOUBLE_SAVE    
1534         jx      a0                               
1535                                                  
1536 ENDPROC(fast_syscall_spill_registers_fixup)      
1537                                                  
1538 ENTRY(fast_syscall_spill_registers_fixup_retu    
1539                                                  
1540         /* When we return here, all registers    
1541                                                  
1542         wsr     a2, depc                # exc    
1543                                                  
1544         /* Restore fixup handler. */             
1545                                                  
1546         rsr     a2, excsave1                     
1547         s32i    a3, a2, EXC_TABLE_DOUBLE_SAVE    
1548         movi    a3, fast_syscall_spill_regist    
1549         s32i    a3, a2, EXC_TABLE_FIXUP          
1550         rsr     a3, windowbase                   
1551         s32i    a3, a2, EXC_TABLE_PARAM          
1552         l32i    a2, a2, EXC_TABLE_KSTK           
1553                                                  
1554         /* Load WB at the time the exception     
1555                                                  
1556         rsr     a3, sar                 # WB     
1557         neg     a3, a3                           
1558         wsr     a3, windowbase                   
1559         rsync                                    
1560                                                  
1561         rsr     a3, excsave1                     
1562         l32i    a3, a3, EXC_TABLE_DOUBLE_SAVE    
1563                                                  
1564         rfde                                     
1565                                                  
1566 ENDPROC(fast_syscall_spill_registers_fixup_re    
1567                                                  
1568 #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS     
1569                                                  
1570 ENTRY(fast_syscall_spill_registers)              
1571                                                  
1572         l32i    a0, a2, PT_AREG0        # res    
1573         movi    a2, -ENOSYS                      
1574         rfe                                      
1575                                                  
1576 ENDPROC(fast_syscall_spill_registers)            
1577                                                  
1578 #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS    
1579                                                  
1580 #ifdef CONFIG_MMU                                
1581 /*                                               
1582  * We should never get here. Bail out!           
1583  */                                              
1584                                                  
1585 ENTRY(fast_second_level_miss_double_kernel)      
1586                                                  
1587 1:                                               
1588         call0   unrecoverable_exception          
1589 1:      j       1b                               
1590                                                  
1591 ENDPROC(fast_second_level_miss_double_kernel)    
1592                                                  
1593 /* First-level entry handler for user, kernel    
1594  * TLB miss exceptions.  Note that for now, u    
1595  * exceptions share the same entry point and     
1596  *                                               
1597  * An old, less-efficient C version of this f    
1598  * We include it below, interleaved as commen    
1599  *                                               
1600  * Entry condition:                              
1601  *                                               
1602  *   a0:        trashed, original value saved    
1603  *   a1:        a1                               
1604  *   a2:        new stack pointer, original i    
1605  *   a3:        a3                               
1606  *   depc:      a2, original value saved on s    
1607  *   excsave_1: dispatch table                   
1608  *                                               
1609  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES    
1610  *           <  VALID_DOUBLE_EXCEPTION_ADDRES    
1611  */                                              
1612                                                  
1613 ENTRY(fast_second_level_miss)                    
1614                                                  
1615         /* Save a1 and a3. Note: we don't exp    
1616                                                  
1617         s32i    a1, a2, PT_AREG1                 
1618         s32i    a3, a2, PT_AREG3                 
1619                                                  
1620         /* We need to map the page of PTEs fo    
1621          * the pointer to that page.  Also, i    
1622          * to be NULL while tsk->active_mm is    
1623          * a vmalloc address.  In that rare c    
1624          * active_mm instead to avoid a fault    
1625          *                                       
1626          * http://mail.nl.linux.org/linux-mm/    
1627          *   (or search Internet on "mm vs. a    
1628          *                                       
1629          *      if (!mm)                         
1630          *              mm = tsk->active_mm;     
1631          *      pgd = pgd_offset (mm, regs->e    
1632          *      pmd = pmd_offset (pgd, regs->    
1633          *      pmdval = *pmd;                   
1634          */                                      
1635                                                  
1636         GET_CURRENT(a1,a2)                       
1637         l32i    a0, a1, TASK_MM         # tsk    
1638         beqz    a0, .Lfast_second_level_miss_    
1639                                                  
1640 .Lfast_second_level_miss_continue:               
1641         rsr     a3, excvaddr            # fau    
1642         _PGD_OFFSET(a0, a3, a1)                  
1643         l32i    a0, a0, 0               # rea    
1644         beqz    a0, .Lfast_second_level_miss_    
1645                                                  
1646         /* Read ptevaddr and convert to top o    
1647          *                                       
1648          *      vpnval = read_ptevaddr_regist    
1649          *      vpnval += DTLB_WAY_PGTABLE;      
1650          *      pteval = mk_pte (virt_to_page    
1651          *      write_dtlb_entry (pteval, vpn    
1652          *                                       
1653          * The messy computation for 'pteval'    
1654          * into the following:                   
1655          *                                       
1656          * pteval = ((pmdval - PAGE_OFFSET +     
1657          *                 | PAGE_DIRECTORY      
1658          */                                      
1659                                                  
1660         movi    a1, (PHYS_OFFSET - PAGE_OFFSE    
1661         add     a0, a0, a1              # pmd    
1662         extui   a1, a0, 0, PAGE_SHIFT   # ...    
1663         xor     a0, a0, a1                       
1664                                                  
1665         movi    a1, _PAGE_DIRECTORY              
1666         or      a0, a0, a1              # ...    
1667                                                  
1668         /*                                       
1669          * We utilize all three wired-ways (7    
1670          * Memory regions are mapped to the D    
1671          * This allows to map the three most     
1672          * DTLBs:                                
1673          *  0,1 -> way 7        program (0040    
1674          *  2   -> way 8        shared libari    
1675          *  3   -> way 0        stack (3000.0    
1676          */                                      
1677                                                  
1678         extui   a3, a3, 28, 2           # add    
1679         rsr     a1, ptevaddr                     
1680         addx2   a3, a3, a3              # ->     
1681         srli    a1, a1, PAGE_SHIFT               
1682         extui   a3, a3, 2, 2            # ->     
1683         slli    a1, a1, PAGE_SHIFT      # pte    
1684         addi    a3, a3, DTLB_WAY_PGD             
1685         add     a1, a1, a3              # ...    
1686                                                  
1687 .Lfast_second_level_miss_wdtlb:                  
1688         wdtlb   a0, a1                           
1689         dsync                                    
1690                                                  
1691         /* Exit critical section. */             
1692 .Lfast_second_level_miss_skip_wdtlb:             
1693         rsr     a3, excsave1                     
1694         movi    a0, 0                            
1695         s32i    a0, a3, EXC_TABLE_FIXUP          
1696                                                  
1697         /* Restore the working registers, and    
1698                                                  
1699         l32i    a0, a2, PT_AREG0                 
1700         l32i    a1, a2, PT_AREG1                 
1701         l32i    a3, a2, PT_AREG3                 
1702         l32i    a2, a2, PT_DEPC                  
1703                                                  
1704         bgeui   a2, VALID_DOUBLE_EXCEPTION_AD    
1705                                                  
1706         /* Restore excsave1 and return. */       
1707                                                  
1708         rsr     a2, depc                         
1709         rfe                                      
1710                                                  
1711         /* Return from double exception. */      
1712                                                  
1713 1:      xsr     a2, depc                         
1714         esync                                    
1715         rfde                                     
1716                                                  
1717 .Lfast_second_level_miss_no_mm:                  
1718         l32i    a0, a1, TASK_ACTIVE_MM  # unl    
1719         bnez    a0, .Lfast_second_level_miss_    
1720                                                  
1721         /* Even more unlikely case active_mm     
1722          * We can get here with NMI in the mi    
1723          * touches vmalloc area.                 
1724          */                                      
1725         movi    a0, init_mm                      
1726         j       .Lfast_second_level_miss_cont    
1727                                                  
1728 .Lfast_second_level_miss_no_pmd:                 
1729 #if (DCACHE_WAY_SIZE > PAGE_SIZE)                
1730                                                  
1731         /* Special case for cache aliasing.      
1732          * We (should) only get here if a cle    
1733          * or the aliased cache flush functio    
1734          * by another task. Re-establish temp    
1735          * TLBTEMP_BASE areas.                   
1736          */                                      
1737                                                  
1738         /* We shouldn't be in a double except    
1739                                                  
1740         l32i    a0, a2, PT_DEPC                  
1741         bgeui   a0, VALID_DOUBLE_EXCEPTION_AD    
1742                                                  
1743         /* Make sure the exception originated    
1744                                                  
1745         movi    a0, __tlbtemp_mapping_start      
1746         rsr     a3, epc1                         
1747         bltu    a3, a0, .Lfast_second_level_m    
1748         movi    a0, __tlbtemp_mapping_end        
1749         bgeu    a3, a0, .Lfast_second_level_m    
1750                                                  
1751         /* Check if excvaddr was in one of th    
1752                                                  
1753         movi    a3, TLBTEMP_BASE_1               
1754         rsr     a0, excvaddr                     
1755         bltu    a0, a3, .Lfast_second_level_m    
1756                                                  
1757         addi    a1, a0, -TLBTEMP_SIZE            
1758         bgeu    a1, a3, .Lfast_second_level_m    
1759                                                  
1760         /* Check if we have to restore an ITL    
1761                                                  
1762         movi    a1, __tlbtemp_mapping_itlb       
1763         rsr     a3, epc1                         
1764         sub     a3, a3, a1                       
1765                                                  
1766         /* Calculate VPN */                      
1767                                                  
1768         movi    a1, PAGE_MASK                    
1769         and     a1, a1, a0                       
1770                                                  
1771         /* Jump for ITLB entry */                
1772                                                  
1773         bgez    a3, 1f                           
1774                                                  
1775         /* We can use up to two TLBTEMP areas    
1776                                                  
1777         extui   a3, a0, PAGE_SHIFT + DCACHE_A    
1778         add     a1, a3, a1                       
1779                                                  
1780         /* PPN is in a6 for the first TLBTEMP    
1781                                                  
1782         mov     a0, a6                           
1783         movnez  a0, a7, a3                       
1784         j       .Lfast_second_level_miss_wdtl    
1785                                                  
1786         /* ITLB entry. We only use dst in a6.    
1787                                                  
1788 1:      witlb   a6, a1                           
1789         isync                                    
1790         j       .Lfast_second_level_miss_skip    
1791                                                  
1792                                                  
1793 #endif  // DCACHE_WAY_SIZE > PAGE_SIZE           
1794                                                  
1795         /* Invalid PGD, default exception han    
1796 .Lfast_second_level_miss_slow:                   
1797                                                  
1798         rsr     a1, depc                         
1799         s32i    a1, a2, PT_AREG2                 
1800         mov     a1, a2                           
1801                                                  
1802         rsr     a2, ps                           
1803         bbsi.l  a2, PS_UM_BIT, 1f                
1804         call0   _kernel_exception                
1805 1:      call0   _user_exception                  
1806                                                  
1807 ENDPROC(fast_second_level_miss)                  
1808                                                  
1809 /*                                               
1810  * StoreProhibitedException                      
1811  *                                               
1812  * Update the pte and invalidate the itlb map    
1813  *                                               
1814  * Entry condition:                              
1815  *                                               
1816  *   a0:        trashed, original value saved    
1817  *   a1:        a1                               
1818  *   a2:        new stack pointer, original i    
1819  *   a3:        a3                               
1820  *   depc:      a2, original value saved on s    
1821  *   excsave_1: dispatch table                   
1822  *                                               
1823  *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES    
1824  *           <  VALID_DOUBLE_EXCEPTION_ADDRES    
1825  */                                              
1826                                                  
1827 ENTRY(fast_store_prohibited)                     
1828                                                  
1829         /* Save a1 and a3. */                    
1830                                                  
1831         s32i    a1, a2, PT_AREG1                 
1832         s32i    a3, a2, PT_AREG3                 
1833                                                  
1834         GET_CURRENT(a1,a2)                       
1835         l32i    a0, a1, TASK_MM         # tsk    
1836         beqz    a0, .Lfast_store_no_mm           
1837                                                  
1838 .Lfast_store_continue:                           
1839         rsr     a1, excvaddr            # fau    
1840         _PGD_OFFSET(a0, a1, a3)                  
1841         l32i    a0, a0, 0                        
1842         beqz    a0, .Lfast_store_slow            
1843                                                  
1844         /*                                       
1845          * Note that we test _PAGE_WRITABLE_B    
1846          * and is not PAGE_NONE. See pgtable.    
1847          */                                      
1848                                                  
1849         _PTE_OFFSET(a0, a1, a3)                  
1850         l32i    a3, a0, 0               # rea    
1851         movi    a1, _PAGE_CA_INVALID             
1852         ball    a3, a1, .Lfast_store_slow        
1853         bbci.l  a3, _PAGE_WRITABLE_BIT, .Lfas    
1854                                                  
1855         movi    a1, _PAGE_ACCESSED | _PAGE_DI    
1856         or      a3, a3, a1                       
1857         rsr     a1, excvaddr                     
1858         s32i    a3, a0, 0                        
1859                                                  
1860         /* We need to flush the cache if we h    
1861 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DC    
1862         dhwb    a0, 0                            
1863 #endif                                           
1864         pdtlb   a0, a1                           
1865         wdtlb   a3, a0                           
1866                                                  
1867         /* Exit critical section. */             
1868                                                  
1869         movi    a0, 0                            
1870         rsr     a3, excsave1                     
1871         s32i    a0, a3, EXC_TABLE_FIXUP          
1872                                                  
1873         /* Restore the working registers, and    
1874                                                  
1875         l32i    a3, a2, PT_AREG3                 
1876         l32i    a1, a2, PT_AREG1                 
1877         l32i    a0, a2, PT_AREG0                 
1878         l32i    a2, a2, PT_DEPC                  
1879                                                  
1880         bgeui   a2, VALID_DOUBLE_EXCEPTION_AD    
1881         rsr     a2, depc                         
1882         rfe                                      
1883                                                  
1884         /* Double exception. Restore FIXUP ha    
1885                                                  
1886 1:      xsr     a2, depc                         
1887         esync                                    
1888         rfde                                     
1889                                                  
1890 .Lfast_store_no_mm:                              
1891         l32i    a0, a1, TASK_ACTIVE_MM  # unl    
1892         j       .Lfast_store_continue            
1893                                                  
1894         /* If there was a problem, handle fau    
1895 .Lfast_store_slow:                               
1896         rsr     a1, excvaddr                     
1897         pdtlb   a0, a1                           
1898         bbci.l  a0, DTLB_HIT_BIT, 1f             
1899         idtlb   a0                               
1900 1:                                               
1901         rsr     a3, depc        # still holds    
1902         s32i    a3, a2, PT_AREG2                 
1903         mov     a1, a2                           
1904                                                  
1905         rsr     a2, ps                           
1906         bbsi.l  a2, PS_UM_BIT, 1f                
1907         call0   _kernel_exception                
1908 1:      call0   _user_exception                  
1909                                                  
1910 ENDPROC(fast_store_prohibited)                   
1911                                                  
1912 #endif /* CONFIG_MMU */                          
1913                                                  
1914         .text                                    
1915 /*                                               
1916  * System Calls.                                 
1917  *                                               
1918  * void system_call (struct pt_regs* regs, in    
1919  *                            a2                 
1920  */                                              
1921         .literal_position                        
1922                                                  
1923 ENTRY(system_call)                               
1924                                                  
1925 #if defined(__XTENSA_WINDOWED_ABI__)             
1926         abi_entry_default                        
1927 #elif defined(__XTENSA_CALL0_ABI__)              
1928         abi_entry(12)                            
1929                                                  
1930         s32i    a0, sp, 0                        
1931         s32i    abi_saved0, sp, 4                
1932         s32i    abi_saved1, sp, 8                
1933         mov     abi_saved0, a2                   
1934 #else                                            
1935 #error Unsupported Xtensa ABI                    
1936 #endif                                           
1937                                                  
1938         /* regs->syscall = regs->areg[2] */      
1939                                                  
1940         l32i    a7, abi_saved0, PT_AREG2         
1941         s32i    a7, abi_saved0, PT_SYSCALL       
1942                                                  
1943         GET_THREAD_INFO(a4, a1)                  
1944         l32i    abi_saved1, a4, TI_FLAGS         
1945         movi    a4, _TIF_WORK_MASK               
1946         and     abi_saved1, abi_saved1, a4       
1947         beqz    abi_saved1, 1f                   
1948                                                  
1949         mov     abi_arg0, abi_saved0             
1950         abi_call        do_syscall_trace_ente    
1951         beqz    abi_rv, .Lsyscall_exit           
1952         l32i    a7, abi_saved0, PT_SYSCALL       
1953                                                  
1954 1:                                               
1955         /* syscall = sys_call_table[syscall_n    
1956                                                  
1957         movi    a4, sys_call_table               
1958         movi    a5, __NR_syscalls                
1959         movi    abi_rv, -ENOSYS                  
1960         bgeu    a7, a5, 1f                       
1961                                                  
1962         addx4   a4, a7, a4                       
1963         l32i    abi_tmp0, a4, 0                  
1964                                                  
1965         /* Load args: arg0 - arg5 are passed     
1966                                                  
1967         l32i    abi_arg0, abi_saved0, PT_AREG    
1968         l32i    abi_arg1, abi_saved0, PT_AREG    
1969         l32i    abi_arg2, abi_saved0, PT_AREG    
1970         l32i    abi_arg3, abi_saved0, PT_AREG    
1971         l32i    abi_arg4, abi_saved0, PT_AREG    
1972         l32i    abi_arg5, abi_saved0, PT_AREG    
1973                                                  
1974         abi_callx       abi_tmp0                 
1975                                                  
1976 1:      /* regs->areg[2] = return_value */       
1977                                                  
1978         s32i    abi_rv, abi_saved0, PT_AREG2     
1979         bnez    abi_saved1, 1f                   
1980 .Lsyscall_exit:                                  
1981 #if defined(__XTENSA_WINDOWED_ABI__)             
1982         abi_ret_default                          
1983 #elif defined(__XTENSA_CALL0_ABI__)              
1984         l32i    a0, sp, 0                        
1985         l32i    abi_saved0, sp, 4                
1986         l32i    abi_saved1, sp, 8                
1987         abi_ret(12)                              
1988 #else                                            
1989 #error Unsupported Xtensa ABI                    
1990 #endif                                           
1991                                                  
1992 1:                                               
1993         mov     abi_arg0, abi_saved0             
1994         abi_call        do_syscall_trace_leav    
1995         j       .Lsyscall_exit                   
1996                                                  
1997 ENDPROC(system_call)                             
1998                                                  
1999 /*                                               
2000  * Spill live registers on the kernel stack m    
2001  *                                               
2002  * Entry condition: ps.woe is set, ps.excm is    
2003  * Exit condition: windowstart has single bit    
2004  * May clobber: a12, a13                         
2005  */                                              
2006         .macro  spill_registers_kernel           
2007                                                  
2008 #if XCHAL_NUM_AREGS > 16                         
2009         call12  1f                               
2010         _j      2f                               
2011         retw                                     
2012         .align  4                                
2013 1:                                               
2014         _entry  a1, 48                           
2015         addi    a12, a0, 3                       
2016 #if XCHAL_NUM_AREGS > 32                         
2017         .rept   (XCHAL_NUM_AREGS - 32) / 12      
2018         _entry  a1, 48                           
2019         mov     a12, a0                          
2020         .endr                                    
2021 #endif                                           
2022         _entry  a1, 16                           
2023 #if XCHAL_NUM_AREGS % 12 == 0                    
2024         mov     a8, a8                           
2025 #elif XCHAL_NUM_AREGS % 12 == 4                  
2026         mov     a12, a12                         
2027 #elif XCHAL_NUM_AREGS % 12 == 8                  
2028         mov     a4, a4                           
2029 #endif                                           
2030         retw                                     
2031 2:                                               
2032 #else                                            
2033         mov     a12, a12                         
2034 #endif                                           
2035         .endm                                    
2036                                                  
2037 /*                                               
2038  * Task switch.                                  
2039  *                                               
2040  * struct task*  _switch_to (struct task* pre    
2041  *         a2                              a2    
2042  */                                              
2043                                                  
2044 ENTRY(_switch_to)                                
2045                                                  
2046 #if defined(__XTENSA_WINDOWED_ABI__)             
2047         abi_entry(XTENSA_SPILL_STACK_RESERVE)    
2048 #elif defined(__XTENSA_CALL0_ABI__)              
2049         abi_entry(16)                            
2050                                                  
2051         s32i    a12, sp, 0                       
2052         s32i    a13, sp, 4                       
2053         s32i    a14, sp, 8                       
2054         s32i    a15, sp, 12                      
2055 #else                                            
2056 #error Unsupported Xtensa ABI                    
2057 #endif                                           
2058         mov     a11, a3                 # and    
2059                                                  
2060         l32i    a4, a2, TASK_THREAD_INFO         
2061         l32i    a5, a3, TASK_THREAD_INFO         
2062                                                  
2063         save_xtregs_user a4 a6 a8 a9 a12 a13     
2064                                                  
2065 #if THREAD_RA > 1020 || THREAD_SP > 1020         
2066         addi    a10, a2, TASK_THREAD             
2067         s32i    a0, a10, THREAD_RA - TASK_THR    
2068         s32i    a1, a10, THREAD_SP - TASK_THR    
2069 #else                                            
2070         s32i    a0, a2, THREAD_RA       # sav    
2071         s32i    a1, a2, THREAD_SP       # sav    
2072 #endif                                           
2073                                                  
2074 #if defined(CONFIG_STACKPROTECTOR) && !define    
2075         movi    a6, __stack_chk_guard            
2076         l32i    a8, a3, TASK_STACK_CANARY        
2077         s32i    a8, a6, 0                        
2078 #endif                                           
2079                                                  
2080         /* Disable ints while we manipulate t    
2081                                                  
2082         irq_save a14, a3                         
2083         rsync                                    
2084                                                  
2085         /* Switch CPENABLE */                    
2086                                                  
2087 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_    
2088         l32i    a3, a5, THREAD_CPENABLE          
2089 #ifdef CONFIG_SMP                                
2090         beqz    a3, 1f                           
2091         memw                    # pairs with     
2092         l32i    a6, a5, THREAD_CP_OWNER_CPU      
2093         l32i    a7, a5, THREAD_CPU               
2094         beq     a6, a7, 1f      # load 0 into    
2095         movi    a3, 0                            
2096 1:                                               
2097 #endif                                           
2098         wsr     a3, cpenable                     
2099 #endif                                           
2100                                                  
2101 #if XCHAL_HAVE_EXCLUSIVE                         
2102         l32i    a3, a5, THREAD_ATOMCTL8          
2103         getex   a3                               
2104         s32i    a3, a4, THREAD_ATOMCTL8          
2105 #endif                                           
2106                                                  
2107         /* Flush register file. */               
2108                                                  
2109 #if defined(__XTENSA_WINDOWED_ABI__)             
2110         spill_registers_kernel                   
2111 #endif                                           
2112                                                  
2113         /* Set kernel stack (and leave critic    
2114          * Note: It's save to set it here. Th    
2115          *       because the kernel stack wil    
2116          *       we return from kernel space.    
2117          */                                      
2118                                                  
2119         rsr     a3, excsave1            # exc    
2120         addi    a7, a5, PT_REGS_OFFSET           
2121         s32i    a7, a3, EXC_TABLE_KSTK           
2122                                                  
2123         /* restore context of the task 'next'    
2124                                                  
2125         l32i    a0, a11, THREAD_RA      # res    
2126         l32i    a1, a11, THREAD_SP      # res    
2127                                                  
2128         load_xtregs_user a5 a6 a8 a9 a12 a13     
2129                                                  
2130         wsr     a14, ps                          
2131         rsync                                    
2132                                                  
2133 #if defined(__XTENSA_WINDOWED_ABI__)             
2134         abi_ret(XTENSA_SPILL_STACK_RESERVE)      
2135 #elif defined(__XTENSA_CALL0_ABI__)              
2136         l32i    a12, sp, 0                       
2137         l32i    a13, sp, 4                       
2138         l32i    a14, sp, 8                       
2139         l32i    a15, sp, 12                      
2140         abi_ret(16)                              
2141 #else                                            
2142 #error Unsupported Xtensa ABI                    
2143 #endif                                           
2144                                                  
2145 ENDPROC(_switch_to)                              
2146                                                  
2147 ENTRY(ret_from_fork)                             
2148                                                  
2149         /* void schedule_tail (struct task_st    
2150          * Note: prev is still in abi_arg0 (r    
2151          */                                      
2152         abi_call        schedule_tail            
2153                                                  
2154         mov             abi_arg0, a1             
2155         abi_call        do_syscall_trace_leav    
2156         j               common_exception_retu    
2157                                                  
2158 ENDPROC(ret_from_fork)                           
2159                                                  
2160 /*                                               
2161  * Kernel thread creation helper                 
2162  * On entry, set up by copy_thread: abi_saved    
2163  * abi_saved1 = thread_fn arg. Left from _swi    
2164  */                                              
2165 ENTRY(ret_from_kernel_thread)                    
2166                                                  
2167         abi_call        schedule_tail            
2168         mov             abi_arg0, abi_saved1     
2169         abi_callx       abi_saved0               
2170         j               common_exception_retu    
2171                                                  
2172 ENDPROC(ret_from_kernel_thread)                  
2173                                                  
2174 #ifdef CONFIG_HIBERNATION                        
2175                                                  
2176         .section        .bss, "aw"               
2177         .align  4                                
2178 .Lsaved_regs:                                    
2179 #if defined(__XTENSA_WINDOWED_ABI__)             
2180         .fill   2, 4                             
2181 #elif defined(__XTENSA_CALL0_ABI__)              
2182         .fill   6, 4                             
2183 #else                                            
2184 #error Unsupported Xtensa ABI                    
2185 #endif                                           
2186         .align  XCHAL_NCP_SA_ALIGN               
2187 .Lsaved_user_regs:                               
2188         .fill   XTREGS_USER_SIZE, 1              
2189                                                  
2190         .previous                                
2191                                                  
2192 ENTRY(swsusp_arch_suspend)                       
2193                                                  
2194         abi_entry_default                        
2195                                                  
2196         movi            a2, .Lsaved_regs         
2197         movi            a3, .Lsaved_user_regs    
2198         s32i            a0, a2, 0                
2199         s32i            a1, a2, 4                
2200         save_xtregs_user a3 a4 a5 a6 a7 a8 0     
2201 #if defined(__XTENSA_WINDOWED_ABI__)             
2202         spill_registers_kernel                   
2203 #elif defined(__XTENSA_CALL0_ABI__)              
2204         s32i            a12, a2, 8               
2205         s32i            a13, a2, 12              
2206         s32i            a14, a2, 16              
2207         s32i            a15, a2, 20              
2208 #else                                            
2209 #error Unsupported Xtensa ABI                    
2210 #endif                                           
2211         abi_call        swsusp_save              
2212         mov             a2, abi_rv               
2213         abi_ret_default                          
2214                                                  
2215 ENDPROC(swsusp_arch_suspend)                     
2216                                                  
2217 ENTRY(swsusp_arch_resume)                        
2218                                                  
2219         abi_entry_default                        
2220                                                  
2221 #if defined(__XTENSA_WINDOWED_ABI__)             
2222         spill_registers_kernel                   
2223 #endif                                           
2224                                                  
2225         movi            a2, restore_pblist       
2226         l32i            a2, a2, 0                
2227                                                  
2228 .Lcopy_pbe:                                      
2229         l32i            a3, a2, PBE_ADDRESS      
2230         l32i            a4, a2, PBE_ORIG_ADDR    
2231                                                  
2232         __loopi         a3, a9, PAGE_SIZE, 16    
2233         l32i            a5, a3, 0                
2234         l32i            a6, a3, 4                
2235         l32i            a7, a3, 8                
2236         l32i            a8, a3, 12               
2237         addi            a3, a3, 16               
2238         s32i            a5, a4, 0                
2239         s32i            a6, a4, 4                
2240         s32i            a7, a4, 8                
2241         s32i            a8, a4, 12               
2242         addi            a4, a4, 16               
2243         __endl          a3, a9                   
2244                                                  
2245         l32i            a2, a2, PBE_NEXT         
2246         bnez            a2, .Lcopy_pbe           
2247                                                  
2248         movi            a2, .Lsaved_regs         
2249         movi            a3, .Lsaved_user_regs    
2250         l32i            a0, a2, 0                
2251         l32i            a1, a2, 4                
2252         load_xtregs_user a3 a4 a5 a6 a7 a8 0     
2253 #if defined(__XTENSA_CALL0_ABI__)                
2254         l32i            a12, a2, 8               
2255         l32i            a13, a2, 12              
2256         l32i            a14, a2, 16              
2257         l32i            a15, a2, 20              
2258 #endif                                           
2259         movi            a2, 0                    
2260         abi_ret_default                          
2261                                                  
2262 ENDPROC(swsusp_arch_resume)                      
2263                                                  
2264 #endif                                           
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php