~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/entry/entry_32.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/x86/entry/entry_32.S (Architecture sparc) and /arch/ppc/entry/entry_32.S (Architecture ppc)


  1 /* SPDX-License-Identifier: GPL-2.0 */            
  2 /*                                                
  3  *  Copyright (C) 1991,1992  Linus Torvalds       
  4  *                                                
  5  * entry_32.S contains the system-call and low    
  6  *                                                
  7  * Stack layout while running C code:             
  8  *      ptrace needs to have all registers on     
  9  *      If the order here is changed, it needs    
 10  *      updated in fork.c:copy_process(), sign    
 11  *      ptrace.c and ptrace.h                     
 12  *                                                
 13  *       0(%esp) - %ebx                           
 14  *       4(%esp) - %ecx                           
 15  *       8(%esp) - %edx                           
 16  *       C(%esp) - %esi                           
 17  *      10(%esp) - %edi                           
 18  *      14(%esp) - %ebp                           
 19  *      18(%esp) - %eax                           
 20  *      1C(%esp) - %ds                            
 21  *      20(%esp) - %es                            
 22  *      24(%esp) - %fs                            
 23  *      28(%esp) - unused -- was %gs on old st    
 24  *      2C(%esp) - orig_eax                       
 25  *      30(%esp) - %eip                           
 26  *      34(%esp) - %cs                            
 27  *      38(%esp) - %eflags                        
 28  *      3C(%esp) - %oldesp                        
 29  *      40(%esp) - %oldss                         
 30  */                                               
 31                                                   
 32 #include <linux/linkage.h>                        
 33 #include <linux/err.h>                            
 34 #include <asm/thread_info.h>                      
 35 #include <asm/irqflags.h>                         
 36 #include <asm/errno.h>                            
 37 #include <asm/segment.h>                          
 38 #include <asm/smp.h>                              
 39 #include <asm/percpu.h>                           
 40 #include <asm/processor-flags.h>                  
 41 #include <asm/irq_vectors.h>                      
 42 #include <asm/cpufeatures.h>                      
 43 #include <asm/alternative.h>                      
 44 #include <asm/asm.h>                              
 45 #include <asm/smap.h>                             
 46 #include <asm/frame.h>                            
 47 #include <asm/trapnr.h>                           
 48 #include <asm/nospec-branch.h>                    
 49                                                   
 50 #include "calling.h"                              
 51                                                   
 52         .section .entry.text, "ax"                
 53                                                   
 54 #define PTI_SWITCH_MASK         (1 << PAGE_SHI    
 55                                                   
 56 /* Unconditionally switch to user cr3 */          
 57 .macro SWITCH_TO_USER_CR3 scratch_reg:req         
 58         ALTERNATIVE "jmp .Lend_\@", "", X86_FE    
 59                                                   
 60         movl    %cr3, \scratch_reg                
 61         orl     $PTI_SWITCH_MASK, \scratch_reg    
 62         movl    \scratch_reg, %cr3                
 63 .Lend_\@:                                         
 64 .endm                                             
 65                                                   
 66 .macro BUG_IF_WRONG_CR3 no_user_check=0           
 67 #ifdef CONFIG_DEBUG_ENTRY                         
 68         ALTERNATIVE "jmp .Lend_\@", "", X86_FE    
 69         .if \no_user_check == 0                   
 70         /* coming from usermode? */               
 71         testl   $USER_SEGMENT_RPL_MASK, PT_CS(    
 72         jz      .Lend_\@                          
 73         .endif                                    
 74         /* On user-cr3? */                        
 75         movl    %cr3, %eax                        
 76         testl   $PTI_SWITCH_MASK, %eax            
 77         jnz     .Lend_\@                          
 78         /* From userspace with kernel cr3 - BU    
 79         ud2                                       
 80 .Lend_\@:                                         
 81 #endif                                            
 82 .endm                                             
 83                                                   
 84 /*                                                
 85  * Switch to kernel cr3 if not already loaded     
 86  * \scratch_reg                                   
 87  */                                               
 88 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req       
 89         ALTERNATIVE "jmp .Lend_\@", "", X86_FE    
 90         movl    %cr3, \scratch_reg                
 91         /* Test if we are already on kernel CR    
 92         testl   $PTI_SWITCH_MASK, \scratch_reg    
 93         jz      .Lend_\@                          
 94         andl    $(~PTI_SWITCH_MASK), \scratch_    
 95         movl    \scratch_reg, %cr3                
 96         /* Return original CR3 in \scratch_reg    
 97         orl     $PTI_SWITCH_MASK, \scratch_reg    
 98 .Lend_\@:                                         
 99 .endm                                             
100                                                   
101 #define CS_FROM_ENTRY_STACK     (1 << 31)         
102 #define CS_FROM_USER_CR3        (1 << 30)         
103 #define CS_FROM_KERNEL          (1 << 29)         
104 #define CS_FROM_ESPFIX          (1 << 28)         
105                                                   
106 .macro FIXUP_FRAME                                
107         /*                                        
108          * The high bits of the CS dword (__cs    
109          * Clear them in case hardware didn't     
110          */                                       
111         andl    $0x0000ffff, 4*4(%esp)            
112                                                   
113 #ifdef CONFIG_VM86                                
114         testl   $X86_EFLAGS_VM, 5*4(%esp)         
115         jnz     .Lfrom_usermode_no_fixup_\@       
116 #endif                                            
117         testl   $USER_SEGMENT_RPL_MASK, 4*4(%e    
118         jnz     .Lfrom_usermode_no_fixup_\@       
119                                                   
120         orl     $CS_FROM_KERNEL, 4*4(%esp)        
121                                                   
122         /*                                        
123          * When we're here from kernel mode; t    
124          *                                        
125          *  6*4(%esp) - <previous context>        
126          *  5*4(%esp) - flags                     
127          *  4*4(%esp) - cs                        
128          *  3*4(%esp) - ip                        
129          *  2*4(%esp) - orig_eax                  
130          *  1*4(%esp) - gs / function             
131          *  0*4(%esp) - fs                        
132          *                                        
133          * Lets build a 5 entry IRET frame aft    
134          * is complete and in particular regs-    
135          * the original 6 entries as gap:         
136          *                                        
137          * 14*4(%esp) - <previous context>        
138          * 13*4(%esp) - gap / flags               
139          * 12*4(%esp) - gap / cs                  
140          * 11*4(%esp) - gap / ip                  
141          * 10*4(%esp) - gap / orig_eax            
142          *  9*4(%esp) - gap / gs / function       
143          *  8*4(%esp) - gap / fs                  
144          *  7*4(%esp) - ss                        
145          *  6*4(%esp) - sp                        
146          *  5*4(%esp) - flags                     
147          *  4*4(%esp) - cs                        
148          *  3*4(%esp) - ip                        
149          *  2*4(%esp) - orig_eax                  
150          *  1*4(%esp) - gs / function             
151          *  0*4(%esp) - fs                        
152          */                                       
153                                                   
154         pushl   %ss             # ss              
155         pushl   %esp            # sp (points a    
156         addl    $7*4, (%esp)    # point sp bac    
157         pushl   7*4(%esp)       # flags           
158         pushl   7*4(%esp)       # cs              
159         pushl   7*4(%esp)       # ip              
160         pushl   7*4(%esp)       # orig_eax        
161         pushl   7*4(%esp)       # gs / functio    
162         pushl   7*4(%esp)       # fs              
163 .Lfrom_usermode_no_fixup_\@:                      
164 .endm                                             
165                                                   
166 .macro IRET_FRAME                                 
167         /*                                        
168          * We're called with %ds, %es, %fs, an    
169          * frame, so we shouldn't use them.  A    
170          * mode and therefore have a nonzero S    
171          * so any attempt to access the stack     
172          * accesses through %esp, which automa    
173          */                                       
174         testl $CS_FROM_KERNEL, 1*4(%esp)          
175         jz .Lfinished_frame_\@                    
176                                                   
177         /*                                        
178          * Reconstruct the 3 entry IRET frame     
179          * regs->sp without lowering %esp in b    
180          * middle doesn't scribble our stack.     
181          */                                       
182         pushl   %eax                              
183         pushl   %ecx                              
184         movl    5*4(%esp), %eax         # (mod    
185                                                   
186         movl    4*4(%esp), %ecx         # flag    
187         movl    %ecx, %ss:-1*4(%eax)              
188                                                   
189         movl    3*4(%esp), %ecx         # cs      
190         andl    $0x0000ffff, %ecx                 
191         movl    %ecx, %ss:-2*4(%eax)              
192                                                   
193         movl    2*4(%esp), %ecx         # ip      
194         movl    %ecx, %ss:-3*4(%eax)              
195                                                   
196         movl    1*4(%esp), %ecx         # eax     
197         movl    %ecx, %ss:-4*4(%eax)              
198                                                   
199         popl    %ecx                              
200         lea     -4*4(%eax), %esp                  
201         popl    %eax                              
202 .Lfinished_frame_\@:                              
203 .endm                                             
204                                                   
205 .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=    
206         cld                                       
207 .if \skip_gs == 0                                 
208         pushl   $0                                
209 .endif                                            
210         pushl   %fs                               
211                                                   
212         pushl   %eax                              
213         movl    $(__KERNEL_PERCPU), %eax          
214         movl    %eax, %fs                         
215 .if \unwind_espfix > 0                            
216         UNWIND_ESPFIX_STACK                       
217 .endif                                            
218         popl    %eax                              
219                                                   
220         FIXUP_FRAME                               
221         pushl   %es                               
222         pushl   %ds                               
223         pushl   \pt_regs_ax                       
224         pushl   %ebp                              
225         pushl   %edi                              
226         pushl   %esi                              
227         pushl   %edx                              
228         pushl   %ecx                              
229         pushl   %ebx                              
230         movl    $(__USER_DS), %edx                
231         movl    %edx, %ds                         
232         movl    %edx, %es                         
233         /* Switch to kernel stack if necessary    
234 .if \switch_stacks > 0                            
235         SWITCH_TO_KERNEL_STACK                    
236 .endif                                            
237 .endm                                             
238                                                   
239 .macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=    
240         SAVE_ALL unwind_espfix=\unwind_espfix     
241                                                   
242         BUG_IF_WRONG_CR3                          
243                                                   
244         /*                                        
245          * Now switch the CR3 when PTI is enab    
246          *                                        
247          * We can enter with either user or ke    
248          * store the old cr3 in \cr3_reg and s    
249          * if necessary.                          
250          */                                       
251         SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_    
252                                                   
253 .Lend_\@:                                         
254 .endm                                             
255                                                   
256 .macro RESTORE_INT_REGS                           
257         popl    %ebx                              
258         popl    %ecx                              
259         popl    %edx                              
260         popl    %esi                              
261         popl    %edi                              
262         popl    %ebp                              
263         popl    %eax                              
264 .endm                                             
265                                                   
266 .macro RESTORE_REGS pop=0                         
267         RESTORE_INT_REGS                          
268 1:      popl    %ds                               
269 2:      popl    %es                               
270 3:      popl    %fs                               
271 4:      addl    $(4 + \pop), %esp       /* pop    
272         IRET_FRAME                                
273                                                   
274         /*                                        
275          * There is no _ASM_EXTABLE_TYPE_REG()    
276          * ASM the registers are known and we     
277          */                                       
278         _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_POP_    
279         _ASM_EXTABLE_TYPE(2b, 3b, EX_TYPE_POP_    
280         _ASM_EXTABLE_TYPE(3b, 4b, EX_TYPE_POP_    
281 .endm                                             
282                                                   
283 .macro RESTORE_ALL_NMI cr3_reg:req pop=0          
284         /*                                        
285          * Now switch the CR3 when PTI is enab    
286          *                                        
287          * We enter with kernel cr3 and switch    
288          * stored on \cr3_reg, which is either    
289          */                                       
290         ALTERNATIVE "jmp .Lswitched_\@", "", X    
291                                                   
292         testl   $PTI_SWITCH_MASK, \cr3_reg        
293         jz      .Lswitched_\@                     
294                                                   
295         /* User cr3 in \cr3_reg - write it to     
296         movl    \cr3_reg, %cr3                    
297                                                   
298 .Lswitched_\@:                                    
299                                                   
300         BUG_IF_WRONG_CR3                          
301                                                   
302         RESTORE_REGS pop=\pop                     
303 .endm                                             
304                                                   
305 .macro CHECK_AND_APPLY_ESPFIX                     
306 #ifdef CONFIG_X86_ESPFIX32                        
307 #define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS    
308 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page + G    
309                                                   
310         ALTERNATIVE     "jmp .Lend_\@", "", X8    
311                                                   
312         movl    PT_EFLAGS(%esp), %eax             
313         /*                                        
314          * Warning: PT_OLDSS(%esp) contains th    
315          * are returning to the kernel.           
316          * See comments in process.c:copy_thre    
317          */                                       
318         movb    PT_OLDSS(%esp), %ah               
319         movb    PT_CS(%esp), %al                  
320         andl    $(X86_EFLAGS_VM | (SEGMENT_TI_    
321         cmpl    $((SEGMENT_LDT << 8) | USER_RP    
322         jne     .Lend_\@        # returning to    
323                                                   
324         /*                                        
325          * Setup and switch to ESPFIX stack       
326          *                                        
327          * We're returning to userspace with a    
328          * restore the high word of ESP for us    
329          * "official" bug of all the x86-compa    
330          * around to make dosemu and wine happ    
331          * high word of ESP with the high word    
332          * compensating for the offset by chan    
333          * a base address that matches for the    
334          */                                       
335         mov     %esp, %edx                        
336         mov     PT_OLDESP(%esp), %eax             
337         mov     %dx, %ax                          
338         sub     %eax, %edx                        
339         shr     $16, %edx                         
340         mov     %dl, GDT_ESPFIX_SS + 4            
341         mov     %dh, GDT_ESPFIX_SS + 7            
342         pushl   $__ESPFIX_SS                      
343         pushl   %eax                              
344         /*                                        
345          * Disable interrupts, but do not irqt    
346          * will soon execute iret and the trac    
347          * the irqstate after the IRET:           
348          */                                       
349         cli                                       
350         lss     (%esp), %esp                      
351 .Lend_\@:                                         
352 #endif /* CONFIG_X86_ESPFIX32 */                  
353 .endm                                             
354                                                   
355 /*                                                
356  * Called with pt_regs fully populated and ker    
357  * so we can access PER_CPU and use the intege    
358  *                                                
359  * We need to be very careful here with the %e    
360  * can happen everywhere. If the NMI handler f    
361  * entry-stack, it will overwrite the task-sta    
362  * copied there. So allocate the stack-frame o    
363  * switch to it before we do any copying.         
364  */                                               
365                                                   
366 .macro SWITCH_TO_KERNEL_STACK                     
367                                                   
368         BUG_IF_WRONG_CR3                          
369                                                   
370         SWITCH_TO_KERNEL_CR3 scratch_reg=%eax     
371                                                   
372         /*                                        
373          * %eax now contains the entry cr3 and    
374          * that register for the time this mac    
375          */                                       
376                                                   
377         /* Are we on the entry stack? Bail out    
378         movl    PER_CPU_VAR(cpu_entry_area), %    
379         addl    $CPU_ENTRY_AREA_entry_stack +     
380         subl    %esp, %ecx      /* ecx = (end     
381         cmpl    $SIZEOF_entry_stack, %ecx         
382         jae     .Lend_\@                          
383                                                   
384         /* Load stack pointer into %esi and %e    
385         movl    %esp, %esi                        
386         movl    %esi, %edi                        
387                                                   
388         /* Move %edi to the top of the entry s    
389         andl    $(MASK_entry_stack), %edi         
390         addl    $(SIZEOF_entry_stack), %edi       
391                                                   
392         /* Load top of task-stack into %edi */    
393         movl    TSS_entry2task_stack(%edi), %e    
394                                                   
395         /* Special case - entry from kernel mo    
396 #ifdef CONFIG_VM86                                
397         movl    PT_EFLAGS(%esp), %ecx             
398         movb    PT_CS(%esp), %cl                  
399         andl    $(X86_EFLAGS_VM | SEGMENT_RPL_    
400 #else                                             
401         movl    PT_CS(%esp), %ecx                 
402         andl    $SEGMENT_RPL_MASK, %ecx           
403 #endif                                            
404         cmpl    $USER_RPL, %ecx                   
405         jb      .Lentry_from_kernel_\@            
406                                                   
407         /* Bytes to copy */                       
408         movl    $PTREGS_SIZE, %ecx                
409                                                   
410 #ifdef CONFIG_VM86                                
411         testl   $X86_EFLAGS_VM, PT_EFLAGS(%esi    
412         jz      .Lcopy_pt_regs_\@                 
413                                                   
414         /*                                        
415          * Stack-frame contains 4 additional s    
416          * coming from VM86 mode                  
417          */                                       
418         addl    $(4 * 4), %ecx                    
419                                                   
420 #endif                                            
421 .Lcopy_pt_regs_\@:                                
422                                                   
423         /* Allocate frame on task-stack */        
424         subl    %ecx, %edi                        
425                                                   
426         /* Switch to task-stack */                
427         movl    %edi, %esp                        
428                                                   
429         /*                                        
430          * We are now on the task-stack and ca    
431          * stack-frame                            
432          */                                       
433         shrl    $2, %ecx                          
434         cld                                       
435         rep movsl                                 
436                                                   
437         jmp .Lend_\@                              
438                                                   
439 .Lentry_from_kernel_\@:                           
440                                                   
441         /*                                        
442          * This handles the case when we enter    
443          * kernel-mode and %esp points to the     
444          * happens we need to switch to the ta    
445          * but switch back to the entry-stack     
446          * iret and return to the interrupted     
447          * happens when we hit an exception wh    
448          * segment registers on the way back t    
449          * sysenter handler runs with eflags.t    
450          *                                        
451          * When we switch to the task-stack he    
452          * contents of the entry-stack anymore    
453          * might be scheduled out or moved to     
454          * copy the complete entry-stack to th    
455          * marker in the iret-frame (bit 31 of    
456          * what we've done on the iret path.      
457          *                                        
458          * On the iret path we copy everything    
459          * entry-stack, so that the interrupte    
460          * continues on the same stack it was     
461          *                                        
462          * Be aware that an NMI can happen any    
463          *                                        
464          * %esi: Entry-Stack pointer (same as     
465          * %edi: Top of the task stack            
466          * %eax: CR3 on kernel entry              
467          */                                       
468                                                   
469         /* Calculate number of bytes on the en    
470         movl    %esi, %ecx                        
471                                                   
472         /* %ecx to the top of entry-stack */      
473         andl    $(MASK_entry_stack), %ecx         
474         addl    $(SIZEOF_entry_stack), %ecx       
475                                                   
476         /* Number of bytes on the entry stack     
477         sub     %esi, %ecx                        
478                                                   
479         /* Mark stackframe as coming from entr    
480         orl     $CS_FROM_ENTRY_STACK, PT_CS(%e    
481                                                   
482         /*                                        
483          * Test the cr3 used to enter the kern    
484          * so that we can switch back to it be    
485          */                                       
486         testl   $PTI_SWITCH_MASK, %eax            
487         jz      .Lcopy_pt_regs_\@                 
488         orl     $CS_FROM_USER_CR3, PT_CS(%esp)    
489                                                   
490         /*                                        
491          * %esi and %edi are unchanged, %ecx c    
492          * bytes to copy. The code at .Lcopy_p    
493          * the stack-frame on task-stack and c    
494          */                                       
495         jmp .Lcopy_pt_regs_\@                     
496                                                   
497 .Lend_\@:                                         
498 .endm                                             
499                                                   
500 /*                                                
501  * Switch back from the kernel stack to the en    
502  *                                                
503  * The %esp register must point to pt_regs on     
504  * first calculate the size of the stack-frame    
505  * whether we return to VM86 mode or not. With    
506  * to copy the contents of the stack over to t    
507  *                                                
508  * We must be very careful here, as we can't t    
509  * task-stack once we switched to the entry-st    
510  * while on the entry-stack, the NMI handler w    
511  * of the task stack, overwriting our stack-fr    
512  * Therefore we switch the stack only after ev    
513  */                                               
514 .macro SWITCH_TO_ENTRY_STACK                      
515                                                   
516         /* Bytes to copy */                       
517         movl    $PTREGS_SIZE, %ecx                
518                                                   
519 #ifdef CONFIG_VM86                                
520         testl   $(X86_EFLAGS_VM), PT_EFLAGS(%e    
521         jz      .Lcopy_pt_regs_\@                 
522                                                   
523         /* Additional 4 registers to copy when    
524         addl    $(4 * 4), %ecx                    
525                                                   
526 .Lcopy_pt_regs_\@:                                
527 #endif                                            
528                                                   
529         /* Initialize source and destination f    
530         movl    PER_CPU_VAR(cpu_tss_rw + TSS_s    
531         subl    %ecx, %edi                        
532         movl    %esp, %esi                        
533                                                   
534         /* Save future stack pointer in %ebx *    
535         movl    %edi, %ebx                        
536                                                   
537         /* Copy over the stack-frame */           
538         shrl    $2, %ecx                          
539         cld                                       
540         rep movsl                                 
541                                                   
542         /*                                        
543          * Switch to entry-stack - needs to ha    
544          * copied because the NMI handler will    
545          * when on entry-stack                    
546          */                                       
547         movl    %ebx, %esp                        
548                                                   
549 .Lend_\@:                                         
550 .endm                                             
551                                                   
552 /*                                                
553  * This macro handles the case when we return     
554  * path and have to switch back to the entry s    
555  *                                                
556  * See the comments below the .Lentry_from_ker    
557  * SWITCH_TO_KERNEL_STACK macro for more detai    
558  */                                               
559 .macro PARANOID_EXIT_TO_KERNEL_MODE               
560                                                   
561         /*                                        
562          * Test if we entered the kernel with     
563          * likely we did not, because this cod    
564          * return-to-kernel path.                 
565          */                                       
566         testl   $CS_FROM_ENTRY_STACK, PT_CS(%e    
567         jz      .Lend_\@                          
568                                                   
569         /* Unlikely slow-path */                  
570                                                   
571         /* Clear marker from stack-frame */       
572         andl    $(~CS_FROM_ENTRY_STACK), PT_CS    
573                                                   
574         /* Copy the remaining task-stack conte    
575         movl    %esp, %esi                        
576         movl    PER_CPU_VAR(cpu_tss_rw + TSS_s    
577                                                   
578         /* Bytes on the task-stack to ecx */      
579         movl    PER_CPU_VAR(cpu_tss_rw + TSS_s    
580         subl    %esi, %ecx                        
581                                                   
582         /* Allocate stack-frame on entry-stack    
583         subl    %ecx, %edi                        
584                                                   
585         /*                                        
586          * Save future stack-pointer, we must     
587          * copy is done, otherwise the NMI han    
588          * contents of the task-stack we are a    
589          */                                       
590         movl    %edi, %ebx                        
591                                                   
592         /* Do the copy */                         
593         shrl    $2, %ecx                          
594         cld                                       
595         rep movsl                                 
596                                                   
597         /* Safe to switch to entry-stack now *    
598         movl    %ebx, %esp                        
599                                                   
600         /*                                        
601          * We came from entry-stack and need t    
602          * switch back to user cr3.               
603          */                                       
604         testl   $CS_FROM_USER_CR3, PT_CS(%esp)    
605         jz      .Lend_\@                          
606                                                   
607         /* Clear marker from stack-frame */       
608         andl    $(~CS_FROM_USER_CR3), PT_CS(%e    
609                                                   
610         SWITCH_TO_USER_CR3 scratch_reg=%eax       
611                                                   
612 .Lend_\@:                                         
613 .endm                                             
614                                                   
615 /**                                               
616  * idtentry - Macro to generate entry stubs fo    
617  * @vector:             Vector number             
618  * @asmsym:             ASM symbol for the ent    
619  * @cfunc:              C function to be calle    
620  * @has_error_code:     Hardware pushed error     
621  */                                               
622 .macro idtentry vector asmsym cfunc has_error_    
623 SYM_CODE_START(\asmsym)                           
624         ASM_CLAC                                  
625         cld                                       
626                                                   
627         .if \has_error_code == 0                  
628                 pushl   $0              /* Cle    
629         .endif                                    
630                                                   
631         /* Push the C-function address into th    
632         pushl   $\cfunc                           
633         /* Invoke the common exception entry *    
634         jmp     handle_exception                  
635 SYM_CODE_END(\asmsym)                             
636 .endm                                             
637                                                   
638 .macro idtentry_irq vector cfunc                  
639         .p2align CONFIG_X86_L1_CACHE_SHIFT        
640 SYM_CODE_START_LOCAL(asm_\cfunc)                  
641         ASM_CLAC                                  
642         SAVE_ALL switch_stacks=1                  
643         ENCODE_FRAME_POINTER                      
644         movl    %esp, %eax                        
645         movl    PT_ORIG_EAX(%esp), %edx           
646         movl    $-1, PT_ORIG_EAX(%esp)            
647         call    \cfunc                            
648         jmp     handle_exception_return           
649 SYM_CODE_END(asm_\cfunc)                          
650 .endm                                             
651                                                   
652 /*                                                
653  * Include the defines which emit the idt entr    
654  * shared between 32 and 64 bit and emit the _    
655  * so the stacktrace boundary checks work.        
656  */                                               
657         .align 16                                 
658         .globl __irqentry_text_start              
659 __irqentry_text_start:                            
660                                                   
661 #include <asm/idtentry.h>                         
662                                                   
663         .align 16                                 
664         .globl __irqentry_text_end                
665 __irqentry_text_end:                              
666                                                   
667 /*                                                
668  * %eax: prev task                                
669  * %edx: next task                                
670  */                                               
671 .pushsection .text, "ax"                          
672 SYM_CODE_START(__switch_to_asm)                   
673         /*                                        
674          * Save callee-saved registers            
675          * This must match the order in struct    
676          */                                       
677         pushl   %ebp                              
678         pushl   %ebx                              
679         pushl   %edi                              
680         pushl   %esi                              
681         /*                                        
682          * Flags are saved to prevent AC leaka    
683          * away if objtool would have 32bit su    
684          * the STAC/CLAC correctness.             
685          */                                       
686         pushfl                                    
687                                                   
688         /* switch stack */                        
689         movl    %esp, TASK_threadsp(%eax)         
690         movl    TASK_threadsp(%edx), %esp         
691                                                   
692 #ifdef CONFIG_STACKPROTECTOR                      
693         movl    TASK_stack_canary(%edx), %ebx     
694         movl    %ebx, PER_CPU_VAR(__stack_chk_    
695 #endif                                            
696                                                   
697         /*                                        
698          * When switching from a shallower to     
699          * the RSB may either underflow or use    
700          * with userspace addresses. On CPUs w    
701          * exist, overwrite the RSB with entri    
702          * speculative execution to prevent at    
703          */                                       
704         FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOO    
705                                                   
706         /* Restore flags or the incoming task     
707         popfl                                     
708         /* restore callee-saved registers */      
709         popl    %esi                              
710         popl    %edi                              
711         popl    %ebx                              
712         popl    %ebp                              
713                                                   
714         jmp     __switch_to                       
715 SYM_CODE_END(__switch_to_asm)                     
716 .popsection                                       
717                                                   
718 /*                                                
719  * A newly forked process directly context swi    
720  *                                                
721  * eax: prev task we switched from                
722  * ebx: kernel thread func (NULL for user thre    
723  * edi: kernel thread arg                         
724  */                                               
725 .pushsection .text, "ax"                          
726 SYM_CODE_START(ret_from_fork_asm)                 
727         movl    %esp, %edx      /* regs */        
728                                                   
729         /* return address for the stack unwind    
730         pushl   $.Lsyscall_32_done                
731                                                   
732         FRAME_BEGIN                               
733         /* prev already in EAX */                 
734         movl    %ebx, %ecx      /* fn */          
735         pushl   %edi            /* fn_arg */      
736         call    ret_from_fork                     
737         addl    $4, %esp                          
738         FRAME_END                                 
739                                                   
740         RET                                       
741 SYM_CODE_END(ret_from_fork_asm)                   
742 .popsection                                       
743                                                   
744 SYM_ENTRY(__begin_SYSENTER_singlestep_region,     
745 /*                                                
746  * All code from here through __end_SYSENTER_s    
747  * to being single-stepped if a user program s    
748  * There is absolutely nothing that we can do     
749  * (thanks Intel!).  To keep our handling of t    
750  * possible, we handle TF just like AC and NT,    
751  * will ignore all of the single-step traps ge    
752  */                                               
753                                                   
754 /*                                                
755  * 32-bit SYSENTER entry.                         
756  *                                                
757  * 32-bit system calls through the vDSO's __ke    
758  * if X86_FEATURE_SEP is available.  This is t    
759  * entry on 32-bit systems.                       
760  *                                                
761  * The SYSENTER instruction, in principle, sho    
762  * vDSO.  In practice, a small number of Andro    
763  * with a copy of Bionic that inlined a SYSENT    
764  * never happened in any of Google's Bionic ve    
765  * in a narrow range of Intel-provided version    
766  *                                                
767  * SYSENTER loads SS, ESP, CS, and EIP from pr    
768  * IF and VM in RFLAGS are cleared (IOW: inter    
769  * SYSENTER does not save anything on the stac    
770  * and does not save old EIP (!!!), ESP, or EF    
771  *                                                
772  * To avoid losing track of EFLAGS.VM (and thu    
773  * user and/or vm86 state), we explicitly disa    
774  * instruction in vm86 mode by reprogramming t    
775  *                                                
776  * Arguments:                                     
777  * eax  system call number                        
778  * ebx  arg1                                      
779  * ecx  arg2                                      
780  * edx  arg3                                      
781  * esi  arg4                                      
782  * edi  arg5                                      
783  * ebp  user stack                                
784  * 0(%ebp) arg6                                   
785  */                                               
786 SYM_FUNC_START(entry_SYSENTER_32)                 
787         /*                                        
788          * On entry-stack with all userspace-r    
789          * restore eflags and %eax to use it a    
790          * switch.                                
791          */                                       
792         pushfl                                    
793         pushl   %eax                              
794         BUG_IF_WRONG_CR3 no_user_check=1          
795         SWITCH_TO_KERNEL_CR3 scratch_reg=%eax     
796         popl    %eax                              
797         popfl                                     
798                                                   
799         /* Stack empty again, switch to task s    
800         movl    TSS_entry2task_stack(%esp), %e    
801                                                   
802 .Lsysenter_past_esp:                              
803         pushl   $__USER_DS              /* pt_    
804         pushl   $0                      /* pt_    
805         pushfl                          /* pt_    
806         pushl   $__USER_CS              /* pt_    
807         pushl   $0                      /* pt_    
808         pushl   %eax                    /* pt_    
809         SAVE_ALL pt_regs_ax=$-ENOSYS    /* sav    
810                                                   
811         /*                                        
812          * SYSENTER doesn't filter flags, so w    
813          * and TF ourselves.  To save a few cy    
814          * either was set instead of doing an     
815          * This needs to happen before enablin    
816          * we don't get preempted with NT set.    
817          *                                        
818          * If TF is set, we will single-step a    
819          * will ignore all the traps.  (Yes, t    
820          * single-stepping in general.  This a    
821          * a more complicated code to handle t    
822          * forces us to single-step through th    
823          *                                        
824          * NB.: .Lsysenter_fix_flags is a labe    
825          * out-of-line as an optimization: NT     
826          * majority of the cases and instead o    
827          * we're keeping that code behind a br    
828          * not-taken and therefore its instruc    
829          */                                       
830         testl   $X86_EFLAGS_NT|X86_EFLAGS_AC|X    
831         jnz     .Lsysenter_fix_flags              
832 .Lsysenter_flags_fixed:                           
833                                                   
834         movl    %esp, %eax                        
835         call    do_SYSENTER_32                    
836         testb   %al, %al                          
837         jz      .Lsyscall_32_done                 
838                                                   
839         STACKLEAK_ERASE                           
840                                                   
841         /* Opportunistic SYSEXIT */               
842                                                   
843         /*                                        
844          * Setup entry stack - we keep the poi    
845          * switch after almost all user-state     
846          */                                       
847                                                   
848         /* Load entry stack pointer and alloca    
849         movl    PER_CPU_VAR(cpu_tss_rw + TSS_s    
850         subl    $(2*4), %eax                      
851                                                   
852         /* Copy eflags and eax to entry stack     
853         movl    PT_EFLAGS(%esp), %edi             
854         movl    PT_EAX(%esp), %esi                
855         movl    %edi, (%eax)                      
856         movl    %esi, 4(%eax)                     
857                                                   
858         /* Restore user registers and segments    
859         movl    PT_EIP(%esp), %edx      /* pt_    
860         movl    PT_OLDESP(%esp), %ecx   /* pt_    
861 1:      mov     PT_FS(%esp), %fs                  
862                                                   
863         popl    %ebx                    /* pt_    
864         addl    $2*4, %esp              /* ski    
865         popl    %esi                    /* pt_    
866         popl    %edi                    /* pt_    
867         popl    %ebp                    /* pt_    
868                                                   
869         /* Switch to entry stack */               
870         movl    %eax, %esp                        
871                                                   
872         /* Now ready to switch the cr3 */         
873         SWITCH_TO_USER_CR3 scratch_reg=%eax       
874         /* Clobbers ZF */                         
875         CLEAR_CPU_BUFFERS                         
876                                                   
877         /*                                        
878          * Restore all flags except IF. (We re    
879          * STI gives a one-instruction window     
880          * whereas POPF does not.)                
881          */                                       
882         btrl    $X86_EFLAGS_IF_BIT, (%esp)        
883         BUG_IF_WRONG_CR3 no_user_check=1          
884         popfl                                     
885         popl    %eax                              
886                                                   
887         /*                                        
888          * Return back to the vDSO, which will    
889          * Don't bother with DS and ES (they a    
890          */                                       
891         sti                                       
892         sysexit                                   
893                                                   
894 2:      movl    $0, PT_FS(%esp)                   
895         jmp     1b                                
896         _ASM_EXTABLE(1b, 2b)                      
897                                                   
898 .Lsysenter_fix_flags:                             
899         pushl   $X86_EFLAGS_FIXED                 
900         popfl                                     
901         jmp     .Lsysenter_flags_fixed            
902 SYM_ENTRY(__end_SYSENTER_singlestep_region, SY    
903 SYM_FUNC_END(entry_SYSENTER_32)                   
904                                                   
905 /*                                                
906  * 32-bit legacy system call entry.               
907  *                                                
908  * 32-bit x86 Linux system calls traditionally    
909  * instruction.  INT $0x80 lands here.            
910  *                                                
911  * This entry point can be used by any 32-bit     
912  * Instances of INT $0x80 can be found inline     
913  * libraries.  It is also used by the vDSO's _    
914  * fallback for hardware that doesn't support     
915  * Restarted 32-bit system calls also fall bac    
916  * regardless of what instruction was original    
917  * call.  (64-bit programs can use INT $0x80 a    
918  * only run on 64-bit kernels and therefore la    
919  * entry_INT80_compat.)                           
920  *                                                
921  * This is considered a slow path.  It is not     
922  * implementations on modern hardware except d    
923  *                                                
924  * Arguments:                                     
925  * eax  system call number                        
926  * ebx  arg1                                      
927  * ecx  arg2                                      
928  * edx  arg3                                      
929  * esi  arg4                                      
930  * edi  arg5                                      
931  * ebp  arg6                                      
932  */                                               
933 SYM_FUNC_START(entry_INT80_32)                    
934         ASM_CLAC                                  
935         pushl   %eax                    /* pt_    
936                                                   
937         SAVE_ALL pt_regs_ax=$-ENOSYS switch_st    
938                                                   
939         movl    %esp, %eax                        
940         call    do_int80_syscall_32               
941 .Lsyscall_32_done:                                
942         STACKLEAK_ERASE                           
943                                                   
944 restore_all_switch_stack:                         
945         SWITCH_TO_ENTRY_STACK                     
946         CHECK_AND_APPLY_ESPFIX                    
947                                                   
948         /* Switch back to user CR3 */             
949         SWITCH_TO_USER_CR3 scratch_reg=%eax       
950                                                   
951         BUG_IF_WRONG_CR3                          
952                                                   
953         /* Restore user state */                  
954         RESTORE_REGS pop=4                        
955         CLEAR_CPU_BUFFERS                         
956 .Lirq_return:                                     
957         /*                                        
958          * ARCH_HAS_MEMBARRIER_SYNC_CORE rely     
959          * when returning from IPI handler and    
960          * scheduler to user-space.               
961          */                                       
962         iret                                      
963                                                   
964 .Lasm_iret_error:                                 
965         pushl   $0                                
966         pushl   $iret_error                       
967                                                   
968 #ifdef CONFIG_DEBUG_ENTRY                         
969         /*                                        
970          * The stack-frame here is the one tha    
971          * return-to-user frame. We are on ker    
972          * the fixup code. This confuses the C    
973          * as the checker expects it.             
974          */                                       
975         pushl   %eax                              
976         SWITCH_TO_USER_CR3 scratch_reg=%eax       
977         popl    %eax                              
978 #endif                                            
979                                                   
980         jmp     handle_exception                  
981                                                   
982         _ASM_EXTABLE(.Lirq_return, .Lasm_iret_    
983 SYM_FUNC_END(entry_INT80_32)                      
984                                                   
985 .macro FIXUP_ESPFIX_STACK                         
986 /*                                                
987  * Switch back for ESPFIX stack to the normal     
988  *                                                
989  * We can't call C functions using the ESPFIX     
990  * the high word of the segment base from the     
991  * normal stack and adjusts ESP with the match    
992  *                                                
993  * We might be on user CR3 here, so percpu dat    
994  * access the GDT through the percpu segment.     
995  * the cpu_entry_area alias of the GDT.           
996  */                                               
997 #ifdef CONFIG_X86_ESPFIX32                        
998         /* fixup the stack */                     
999         pushl   %ecx                              
1000         subl    $2*4, %esp                       
1001         sgdt    (%esp)                           
1002         movl    2(%esp), %ecx                    
1003         /*                                       
1004          * Careful: ECX is a linear pointer,     
1005          * zero.  %cs is the only known-linea    
1006          */                                      
1007         mov     %cs:GDT_ESPFIX_OFFSET + 4(%ec    
1008         mov     %cs:GDT_ESPFIX_OFFSET + 7(%ec    
1009         shl     $16, %eax                        
1010         addl    $2*4, %esp                       
1011         popl    %ecx                             
1012         addl    %esp, %eax                       
1013         pushl   $__KERNEL_DS                     
1014         pushl   %eax                             
1015         lss     (%esp), %esp                     
1016 #endif                                           
1017 .endm                                            
1018                                                  
1019 .macro UNWIND_ESPFIX_STACK                       
1020         /* It's safe to clobber %eax, all oth    
1021 #ifdef CONFIG_X86_ESPFIX32                       
1022         movl    %ss, %eax                        
1023         /* see if on espfix stack */             
1024         cmpw    $__ESPFIX_SS, %ax                
1025         jne     .Lno_fixup_\@                    
1026         /* switch to normal stack */             
1027         FIXUP_ESPFIX_STACK                       
1028 .Lno_fixup_\@:                                   
1029 #endif                                           
1030 .endm                                            
1031                                                  
1032 SYM_CODE_START_LOCAL_NOALIGN(handle_exception    
1033         /* the function address is in %gs's s    
1034         SAVE_ALL switch_stacks=1 skip_gs=1 un    
1035         ENCODE_FRAME_POINTER                     
1036                                                  
1037         movl    PT_GS(%esp), %edi                
1038                                                  
1039         /* fixup orig %eax */                    
1040         movl    PT_ORIG_EAX(%esp), %edx          
1041         movl    $-1, PT_ORIG_EAX(%esp)           
1042                                                  
1043         movl    %esp, %eax                       
1044         CALL_NOSPEC edi                          
1045                                                  
1046 handle_exception_return:                         
1047 #ifdef CONFIG_VM86                               
1048         movl    PT_EFLAGS(%esp), %eax            
1049         movb    PT_CS(%esp), %al                 
1050         andl    $(X86_EFLAGS_VM | SEGMENT_RPL    
1051 #else                                            
1052         /*                                       
1053          * We can be coming here from child s    
1054          */                                      
1055         movl    PT_CS(%esp), %eax                
1056         andl    $SEGMENT_RPL_MASK, %eax          
1057 #endif                                           
1058         cmpl    $USER_RPL, %eax                  
1059         jnb     ret_to_user                      
1060                                                  
1061         PARANOID_EXIT_TO_KERNEL_MODE             
1062         BUG_IF_WRONG_CR3                         
1063         RESTORE_REGS 4                           
1064         jmp     .Lirq_return                     
1065                                                  
1066 ret_to_user:                                     
1067         movl    %esp, %eax                       
1068         jmp     restore_all_switch_stack         
1069 SYM_CODE_END(handle_exception)                   
1070                                                  
1071 SYM_CODE_START(asm_exc_double_fault)             
1072 1:                                               
1073         /*                                       
1074          * This is a task gate handler, not a    
1075          * The error code is on the stack, bu    
1076          * empty.  Interrupts are off.  Our s    
1077          * exceptions:                           
1078          *                                       
1079          *  - CR0.TS is set.  "TS" literally     
1080          *  - EFLAGS.NT is set because we're     
1081          *  - The doublefault TSS has back_li    
1082          *  - TR points to the doublefault TS    
1083          *  - CR3 is the normal kernel PGD.      
1084          *    that the CPU didn't bother to s    
1085          *    would make it very awkward to r    
1086          *    from.                              
1087          *                                       
1088          * The rest of EFLAGS is sanitized fo    
1089          * worry about AC or DF.                 
1090          *                                       
1091          * Don't even bother popping the erro    
1092          * and ignoring it makes us a bit mor    
1093          * hypervisor task gate implementatio    
1094          *                                       
1095          * We will manually undo the task swi    
1096          * task-switching IRET.                  
1097          */                                      
1098                                                  
1099         clts                            /* cl    
1100         pushl   $X86_EFLAGS_FIXED                
1101         popfl                           /* cl    
1102                                                  
1103         call    doublefault_shim                 
1104                                                  
1105         /* We don't support returning, so we     
1106 1:                                               
1107         hlt                                      
1108         jmp 1b                                   
1109 SYM_CODE_END(asm_exc_double_fault)               
1110                                                  
1111 /*                                               
1112  * NMI is doubly nasty.  It can happen on the    
1113  * entry_SYSENTER_32 (just like #DB), but it     
1114  * of the #DB handler even if that #DB in tur    
1115  * switched stacks.  We handle both condition    
1116  * interrupted kernel code running on the SYS    
1117  */                                              
1118 SYM_CODE_START(asm_exc_nmi)                      
1119         ASM_CLAC                                 
1120                                                  
1121 #ifdef CONFIG_X86_ESPFIX32                       
1122         /*                                       
1123          * ESPFIX_SS is only ever set on the     
1124          * after we've switched to the entry     
1125          */                                      
1126         pushl   %eax                             
1127         movl    %ss, %eax                        
1128         cmpw    $__ESPFIX_SS, %ax                
1129         popl    %eax                             
1130         je      .Lnmi_espfix_stack               
1131 #endif                                           
1132                                                  
1133         pushl   %eax                             
1134         SAVE_ALL_NMI cr3_reg=%edi                
1135         ENCODE_FRAME_POINTER                     
1136         xorl    %edx, %edx                       
1137         movl    %esp, %eax                       
1138                                                  
1139         /* Are we currently on the SYSENTER s    
1140         movl    PER_CPU_VAR(cpu_entry_area),     
1141         addl    $CPU_ENTRY_AREA_entry_stack +    
1142         subl    %eax, %ecx      /* ecx = (end    
1143         cmpl    $SIZEOF_entry_stack, %ecx        
1144         jb      .Lnmi_from_sysenter_stack        
1145                                                  
1146         /* Not on SYSENTER stack. */             
1147         call    exc_nmi                          
1148         jmp     .Lnmi_return                     
1149                                                  
1150 .Lnmi_from_sysenter_stack:                       
1151         /*                                       
1152          * We're on the SYSENTER stack.  Swit    
1153          * is using the thread stack right no    
1154          */                                      
1155         movl    %esp, %ebx                       
1156         movl    PER_CPU_VAR(pcpu_hot + X86_to    
1157         call    exc_nmi                          
1158         movl    %ebx, %esp                       
1159                                                  
1160 .Lnmi_return:                                    
1161 #ifdef CONFIG_X86_ESPFIX32                       
1162         testl   $CS_FROM_ESPFIX, PT_CS(%esp)     
1163         jnz     .Lnmi_from_espfix                
1164 #endif                                           
1165                                                  
1166         CHECK_AND_APPLY_ESPFIX                   
1167         RESTORE_ALL_NMI cr3_reg=%edi pop=4       
1168         CLEAR_CPU_BUFFERS                        
1169         jmp     .Lirq_return                     
1170                                                  
1171 #ifdef CONFIG_X86_ESPFIX32                       
1172 .Lnmi_espfix_stack:                              
1173         /*                                       
1174          * Create the pointer to LSS back        
1175          */                                      
1176         pushl   %ss                              
1177         pushl   %esp                             
1178         addl    $4, (%esp)                       
1179                                                  
1180         /* Copy the (short) IRET frame */        
1181         pushl   4*4(%esp)       # flags          
1182         pushl   4*4(%esp)       # cs             
1183         pushl   4*4(%esp)       # ip             
1184                                                  
1185         pushl   %eax            # orig_ax        
1186                                                  
1187         SAVE_ALL_NMI cr3_reg=%edi unwind_espf    
1188         ENCODE_FRAME_POINTER                     
1189                                                  
1190         /* clear CS_FROM_KERNEL, set CS_FROM_    
1191         xorl    $(CS_FROM_ESPFIX | CS_FROM_KE    
1192                                                  
1193         xorl    %edx, %edx                       
1194         movl    %esp, %eax                       
1195         jmp     .Lnmi_from_sysenter_stack        
1196                                                  
1197 .Lnmi_from_espfix:                               
1198         RESTORE_ALL_NMI cr3_reg=%edi             
1199         /*                                       
1200          * Because we cleared CS_FROM_KERNEL,    
1201          * fix up the gap and long frame:        
1202          *                                       
1203          *  3 - original frame  (exception)      
1204          *  2 - ESPFIX block    (above)          
1205          *  6 - gap             (FIXUP_FRAME)    
1206          *  5 - long frame      (FIXUP_FRAME)    
1207          *  1 - orig_ax                          
1208          */                                      
1209         lss     (1+5+6)*4(%esp), %esp            
1210         CLEAR_CPU_BUFFERS                        
1211         jmp     .Lirq_return                     
1212 #endif                                           
1213 SYM_CODE_END(asm_exc_nmi)                        
1214                                                  
1215 .pushsection .text, "ax"                         
1216 SYM_CODE_START(rewind_stack_and_make_dead)       
1217         /* Prevent any naive code from trying    
1218         xorl    %ebp, %ebp                       
1219                                                  
1220         movl    PER_CPU_VAR(pcpu_hot + X86_to    
1221         leal    -TOP_OF_KERNEL_STACK_PADDING-    
1222                                                  
1223         call    make_task_dead                   
1224 1:      jmp 1b                                   
1225 SYM_CODE_END(rewind_stack_and_make_dead)         
1226 .popsection                                      
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php