~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/microblaze/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/microblaze/kernel/entry.S (Version linux-6.12-rc7) and /arch/i386/kernel/entry.S (Version linux-4.10.17)


  1 /*                                                
  2  * Low-level system-call handling, trap handle    
  3  *                                                
  4  * Copyright (C) 2008-2009 Michal Simek <monstr    
  5  * Copyright (C) 2008-2009 PetaLogix              
  6  * Copyright (C) 2003           John Williams <    
  7  * Copyright (C) 2001,2002      NEC Corporatio    
  8  * Copyright (C) 2001,2002      Miles Bader <mi    
  9  *                                                
 10  * This file is subject to the terms and condi    
 11  * Public License. See the file COPYING in the    
 12  * archive for more details.                      
 13  *                                                
 14  * Written by Miles Bader <miles@gnu.org>          
 15  * Heavily modified by John Williams for Micro    
 16  */                                               
 17                                                   
 18 #include <linux/sys.h>                            
 19 #include <linux/linkage.h>                        
 20                                                   
 21 #include <asm/entry.h>                            
 22 #include <asm/current.h>                          
 23 #include <asm/processor.h>                        
 24 #include <asm/exceptions.h>                       
 25 #include <asm/asm-offsets.h>                      
 26 #include <asm/thread_info.h>                      
 27                                                   
 28 #include <asm/page.h>                             
 29 #include <asm/unistd.h>                           
 30 #include <asm/xilinx_mb_manager.h>                
 31                                                   
 32 #include <linux/errno.h>                          
 33 #include <asm/signal.h>                           
 34 #include <asm/mmu.h>                              
 35                                                   
 36 #undef DEBUG                                      
 37                                                   
 38 #ifdef DEBUG                                      
 39 /* Create space for syscalls counting. */         
 40 .section .data                                    
 41 .global syscall_debug_table                       
 42 .align 4                                          
 43 syscall_debug_table:                              
 44         .space  (__NR_syscalls * 4)               
 45 #endif /* DEBUG */                                
 46                                                   
 47 #define C_ENTRY(name)   .globl name; .align 4;    
 48                                                   
 49 /*                                                
 50  * Various ways of setting and clearing BIP in    
 51  * This is mucky, but necessary using microbla    
 52  * allows msr ops to write to BIP                 
 53  */                                               
 54 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR       
 55         .macro  clear_bip                         
 56         msrclr  r0, MSR_BIP                       
 57         .endm                                     
 58                                                   
 59         .macro  set_bip                           
 60         msrset  r0, MSR_BIP                       
 61         .endm                                     
 62                                                   
 63         .macro  clear_eip                         
 64         msrclr  r0, MSR_EIP                       
 65         .endm                                     
 66                                                   
 67         .macro  set_ee                            
 68         msrset  r0, MSR_EE                        
 69         .endm                                     
 70                                                   
 71         .macro  disable_irq                       
 72         msrclr  r0, MSR_IE                        
 73         .endm                                     
 74                                                   
 75         .macro  enable_irq                        
 76         msrset  r0, MSR_IE                        
 77         .endm                                     
 78                                                   
 79         .macro  set_ums                           
 80         msrset  r0, MSR_UMS                       
 81         msrclr  r0, MSR_VMS                       
 82         .endm                                     
 83                                                   
 84         .macro  set_vms                           
 85         msrclr  r0, MSR_UMS                       
 86         msrset  r0, MSR_VMS                       
 87         .endm                                     
 88                                                   
 89         .macro  clear_ums                         
 90         msrclr  r0, MSR_UMS                       
 91         .endm                                     
 92                                                   
 93         .macro  clear_vms_ums                     
 94         msrclr  r0, MSR_VMS | MSR_UMS             
 95         .endm                                     
 96 #else                                             
 97         .macro  clear_bip                         
 98         mfs     r11, rmsr                         
 99         andi    r11, r11, ~MSR_BIP                
100         mts     rmsr, r11                         
101         .endm                                     
102                                                   
103         .macro  set_bip                           
104         mfs     r11, rmsr                         
105         ori     r11, r11, MSR_BIP                 
106         mts     rmsr, r11                         
107         .endm                                     
108                                                   
109         .macro  clear_eip                         
110         mfs     r11, rmsr                         
111         andi    r11, r11, ~MSR_EIP                
112         mts     rmsr, r11                         
113         .endm                                     
114                                                   
115         .macro  set_ee                            
116         mfs     r11, rmsr                         
117         ori     r11, r11, MSR_EE                  
118         mts     rmsr, r11                         
119         .endm                                     
120                                                   
121         .macro  disable_irq                       
122         mfs     r11, rmsr                         
123         andi    r11, r11, ~MSR_IE                 
124         mts     rmsr, r11                         
125         .endm                                     
126                                                   
127         .macro  enable_irq                        
128         mfs     r11, rmsr                         
129         ori     r11, r11, MSR_IE                  
130         mts     rmsr, r11                         
131         .endm                                     
132                                                   
133         .macro set_ums                            
134         mfs     r11, rmsr                         
135         ori     r11, r11, MSR_VMS                 
136         andni   r11, r11, MSR_UMS                 
137         mts     rmsr, r11                         
138         .endm                                     
139                                                   
140         .macro  set_vms                           
141         mfs     r11, rmsr                         
142         ori     r11, r11, MSR_VMS                 
143         andni   r11, r11, MSR_UMS                 
144         mts     rmsr, r11                         
145         .endm                                     
146                                                   
147         .macro  clear_ums                         
148         mfs     r11, rmsr                         
149         andni   r11, r11, MSR_UMS                 
150         mts     rmsr,r11                          
151         .endm                                     
152                                                   
153         .macro  clear_vms_ums                     
154         mfs     r11, rmsr                         
155         andni   r11, r11, (MSR_VMS|MSR_UMS)       
156         mts     rmsr,r11                          
157         .endm                                     
158 #endif                                            
159                                                   
160 /* Define how to call high-level functions. Wi    
161  * enabled when calling the high-level functio    
162  * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL         
163  */                                               
164                                                   
165 /* turn on virtual protected mode save */         
166 #define VM_ON           \                         
167         set_ums;        \                         
168         rted    r0, 2f; \                         
169         nop; \                                    
170 2:                                                
171                                                   
172 /* turn off virtual protected mode save and us    
173 #define VM_OFF                  \                 
174         clear_vms_ums;          \                 
175         rted    r0, TOPHYS(1f); \                 
176         nop; \                                    
177 1:                                                
178                                                   
179 #define SAVE_REGS \                               
180         swi     r2, r1, PT_R2;  /* Save SDA */    
181         swi     r3, r1, PT_R3;                    
182         swi     r4, r1, PT_R4;                    
183         swi     r5, r1, PT_R5;                    
184         swi     r6, r1, PT_R6;                    
185         swi     r7, r1, PT_R7;                    
186         swi     r8, r1, PT_R8;                    
187         swi     r9, r1, PT_R9;                    
188         swi     r10, r1, PT_R10;                  
189         swi     r11, r1, PT_R11;        /* sav    
190         swi     r12, r1, PT_R12;                  
191         swi     r13, r1, PT_R13;        /* Sav    
192         swi     r14, r1, PT_PC; /* PC, before     
193         swi     r15, r1, PT_R15;        /* Sav    
194         swi     r16, r1, PT_R16;                  
195         swi     r17, r1, PT_R17;                  
196         swi     r18, r1, PT_R18;        /* Sav    
197         swi     r19, r1, PT_R19;                  
198         swi     r20, r1, PT_R20;                  
199         swi     r21, r1, PT_R21;                  
200         swi     r22, r1, PT_R22;                  
201         swi     r23, r1, PT_R23;                  
202         swi     r24, r1, PT_R24;                  
203         swi     r25, r1, PT_R25;                  
204         swi     r26, r1, PT_R26;                  
205         swi     r27, r1, PT_R27;                  
206         swi     r28, r1, PT_R28;                  
207         swi     r29, r1, PT_R29;                  
208         swi     r30, r1, PT_R30;                  
209         swi     r31, r1, PT_R31;        /* Sav    
210         mfs     r11, rmsr;              /* sav    
211         swi     r11, r1, PT_MSR;                  
212                                                   
213 #define RESTORE_REGS_GP \                         
214         lwi     r2, r1, PT_R2;  /* restore SDA    
215         lwi     r3, r1, PT_R3;                    
216         lwi     r4, r1, PT_R4;                    
217         lwi     r5, r1, PT_R5;                    
218         lwi     r6, r1, PT_R6;                    
219         lwi     r7, r1, PT_R7;                    
220         lwi     r8, r1, PT_R8;                    
221         lwi     r9, r1, PT_R9;                    
222         lwi     r10, r1, PT_R10;                  
223         lwi     r11, r1, PT_R11;        /* res    
224         lwi     r12, r1, PT_R12;                  
225         lwi     r13, r1, PT_R13;        /* res    
226         lwi     r14, r1, PT_PC; /* RESTORE_LIN    
227         lwi     r15, r1, PT_R15;        /* res    
228         lwi     r16, r1, PT_R16;                  
229         lwi     r17, r1, PT_R17;                  
230         lwi     r18, r1, PT_R18;        /* res    
231         lwi     r19, r1, PT_R19;                  
232         lwi     r20, r1, PT_R20;                  
233         lwi     r21, r1, PT_R21;                  
234         lwi     r22, r1, PT_R22;                  
235         lwi     r23, r1, PT_R23;                  
236         lwi     r24, r1, PT_R24;                  
237         lwi     r25, r1, PT_R25;                  
238         lwi     r26, r1, PT_R26;                  
239         lwi     r27, r1, PT_R27;                  
240         lwi     r28, r1, PT_R28;                  
241         lwi     r29, r1, PT_R29;                  
242         lwi     r30, r1, PT_R30;                  
243         lwi     r31, r1, PT_R31;        /* Res    
244                                                   
245 #define RESTORE_REGS \                            
246         lwi     r11, r1, PT_MSR;                  
247         mts     rmsr , r11;                       
248         RESTORE_REGS_GP                           
249                                                   
250 #define RESTORE_REGS_RTBD \                       
251         lwi     r11, r1, PT_MSR;                  
252         andni   r11, r11, MSR_EIP;          /*    
253         ori     r11, r11, MSR_EE | MSR_BIP; /*    
254         mts     rmsr , r11;                       
255         RESTORE_REGS_GP                           
256                                                   
257 #define SAVE_STATE      \                         
258         swi     r1, r0, TOPHYS(PER_CPU(ENTRY_S    
259         /* See if already in kernel mode.*/       
260         mfs     r1, rmsr;                         
261         andi    r1, r1, MSR_UMS;                  
262         bnei    r1, 1f;                           
263         /* Kernel-mode state save.  */            
264         /* Reload kernel stack-ptr. */            
265         lwi     r1, r0, TOPHYS(PER_CPU(ENTRY_S    
266         /* FIXME: I can add these two lines to    
267         /* tophys(r1,r1); */                      
268         /* addik        r1, r1, -PT_SIZE; */      
269         addik   r1, r1, CONFIG_KERNEL_BASE_ADD    
270         SAVE_REGS                                 
271         brid    2f;                               
272         swi     r1, r1, PT_MODE;                  
273 1:      /* User-mode state save.  */              
274         lwi     r1, r0, TOPHYS(PER_CPU(CURRENT    
275         tophys(r1,r1);                            
276         lwi     r1, r1, TS_THREAD_INFO; /* get    
277         /* MS these three instructions can be     
278         /* addik        r1, r1, THREAD_SIZE; *    
279         /* tophys(r1,r1); */                      
280         /* addik        r1, r1, -PT_SIZE; */      
281         addik r1, r1, THREAD_SIZE + CONFIG_KER    
282         SAVE_REGS                                 
283         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY_    
284         swi     r11, r1, PT_R1; /* Store user     
285         swi     r0, r1, PT_MODE; /* Was in use    
286         /* MS: I am clearing UMS even in case     
287         clear_ums;                                
288 2:      lwi     CURRENT_TASK, r0, TOPHYS(PER_C    
289                                                   
290 .text                                             
291                                                   
292 .extern cpuinfo                                   
293                                                   
294 C_ENTRY(mb_flush_dcache):                         
295         addik   r1, r1, -PT_SIZE                  
296         SAVE_REGS                                 
297                                                   
298         addik   r3, r0, cpuinfo                   
299         lwi     r7, r3, CI_DCS                    
300         lwi     r8, r3, CI_DCL                    
301         sub     r9, r7, r8                        
302 1:                                                
303         wdc.flush r9, r0                          
304         bgtid   r9, 1b                            
305         addk    r9, r9, r8                        
306                                                   
307         RESTORE_REGS                              
308         addik   r1, r1, PT_SIZE                   
309         rtsd    r15, 8                            
310         nop                                       
311                                                   
312 C_ENTRY(mb_invalidate_icache):                    
313         addik   r1, r1, -PT_SIZE                  
314         SAVE_REGS                                 
315                                                   
316         addik   r3, r0, cpuinfo                   
317         lwi     r7, r3, CI_ICS                    
318         lwi     r8, r3, CI_ICL                    
319         sub     r9, r7, r8                        
320 1:                                                
321         wic     r9, r0                            
322         bgtid   r9, 1b                            
323         addk    r9, r9, r8                        
324                                                   
325         RESTORE_REGS                              
326         addik   r1, r1, PT_SIZE                   
327         rtsd    r15, 8                            
328         nop                                       
329                                                   
330 /*                                                
331  * User trap.                                     
332  *                                                
333  * System calls are handled here.                 
334  *                                                
335  * Syscall protocol:                              
336  * Syscall number in r12, args in r5-r10          
337  * Return value in r3                             
338  *                                                
339  * Trap entered via brki instruction, so BIP b    
340  * are masked. This is nice, means we don't ha    
341  */                                               
342 C_ENTRY(_user_exception):                         
343         swi     r1, r0, TOPHYS(PER_CPU(ENTRY_S    
344         addi    r14, r14, 4     /* return addr    
345                                                   
346         lwi     r1, r0, TOPHYS(PER_CPU(CURRENT    
347         tophys(r1,r1);                            
348         lwi     r1, r1, TS_THREAD_INFO; /* get    
349 /* calculate kernel stack pointer from task st    
350         addik   r1, r1, THREAD_SIZE;              
351         tophys(r1,r1);                            
352                                                   
353         addik   r1, r1, -PT_SIZE; /* Make room    
354         SAVE_REGS                                 
355         swi     r0, r1, PT_R3                     
356         swi     r0, r1, PT_R4                     
357                                                   
358         swi     r0, r1, PT_MODE;                  
359         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY_    
360         swi     r11, r1, PT_R1;         /* Sto    
361         clear_ums;                                
362 2:      lwi     CURRENT_TASK, r0, TOPHYS(PER_C    
363         /* Save away the syscall number.  */      
364         swi     r12, r1, PT_R0;                   
365         tovirt(r1,r1)                             
366                                                   
367 /* where the trap should return need -8 to adj    
368 /* Jump to the appropriate function for the sy    
369  * (r12 is not preserved), or return an error     
370  * register should point to the location where    
371  * the called function should return.  [note t    
372                                                   
373         /* Step into virtual mode */              
374         rtbd    r0, 3f                            
375         nop                                       
376 3:                                                
377         lwi     r11, CURRENT_TASK, TS_THREAD_I    
378         lwi     r11, r11, TI_FLAGS       /* ge    
379         andi    r11, r11, _TIF_WORK_SYSCALL_MA    
380         beqi    r11, 4f                           
381                                                   
382         addik   r3, r0, -ENOSYS                   
383         swi     r3, r1, PT_R3                     
384         brlid   r15, do_syscall_trace_enter       
385         addik   r5, r1, PT_R0                     
386                                                   
387         # do_syscall_trace_enter returns the n    
388         addk    r12, r0, r3                       
389         lwi     r5, r1, PT_R5;                    
390         lwi     r6, r1, PT_R6;                    
391         lwi     r7, r1, PT_R7;                    
392         lwi     r8, r1, PT_R8;                    
393         lwi     r9, r1, PT_R9;                    
394         lwi     r10, r1, PT_R10;                  
395 4:                                                
396 /* Jump to the appropriate function for the sy    
397  * (r12 is not preserved), or return an error     
398  * The LP register should point to the locatio    
399  * should return.  [note that MAKE_SYS_CALL us    
400         /* See if the system call number is va    
401         blti    r12, 5f                           
402         addi    r11, r12, -__NR_syscalls;         
403         bgei    r11, 5f;                          
404         /* Figure out which function to use fo    
405         /* Note Microblaze barrel shift is opt    
406         add     r12, r12, r12;                    
407         add     r12, r12, r12;                    
408         addi    r30, r0, 1                        
409                                                   
410 #ifdef DEBUG                                      
411         /* Trac syscalls and stored them to sy    
412         /* The first syscall location stores t    
413         lwi     r3, r0, syscall_debug_table       
414         addi    r3, r3, 1                         
415         swi     r3, r0, syscall_debug_table       
416         lwi     r3, r12, syscall_debug_table      
417         addi    r3, r3, 1                         
418         swi     r3, r12, syscall_debug_table      
419 #endif                                            
420                                                   
421         # Find and jump into the syscall handl    
422         lwi     r12, r12, sys_call_table          
423         /* where the trap should return need -    
424         addi    r15, r0, ret_from_trap-8          
425         bra     r12                               
426                                                   
427         /* The syscall number is invalid, retu    
428 5:                                                
429         braid   ret_from_trap                     
430         addi    r3, r0, -ENOSYS;                  
431                                                   
432 /* Entry point used to return from a syscall/t    
433 /* We re-enable BIP bit before state restore *    
434 C_ENTRY(ret_from_trap):                           
435         swi     r3, r1, PT_R3                     
436         swi     r4, r1, PT_R4                     
437                                                   
438         lwi     r11, r1, PT_MODE;                 
439 /* See if returning to kernel mode, if so, ski    
440         bnei    r11, 2f;                          
441         /* We're returning to user mode, so ch    
442          * trigger rescheduling. */               
443         /* FIXME: Restructure all these flag c    
444         lwi     r11, CURRENT_TASK, TS_THREAD_I    
445         lwi     r11, r11, TI_FLAGS;               
446         andi    r11, r11, _TIF_WORK_SYSCALL_MA    
447         beqi    r11, 1f                           
448                                                   
449         brlid   r15, do_syscall_trace_leave       
450         addik   r5, r1, PT_R0                     
451 1:                                                
452         /* We're returning to user mode, so ch    
453          * trigger rescheduling. */               
454         /* get thread info from current task *    
455         lwi     r11, CURRENT_TASK, TS_THREAD_I    
456         lwi     r19, r11, TI_FLAGS;               
457         andi    r11, r19, _TIF_NEED_RESCHED;      
458         beqi    r11, 5f;                          
459                                                   
460         bralid  r15, schedule;  /* Call schedu    
461         nop;                            /* del    
462         bri     1b                                
463                                                   
464         /* Maybe handle a signal */               
465 5:                                                
466         andi    r11, r19, _TIF_SIGPENDING | _T    
467         beqi    r11, 4f;                /* Sig    
468                                                   
469         addik   r5, r1, 0;              /* Arg    
470         bralid  r15, do_notify_resume;  /* Han    
471         add     r6, r30, r0;            /* Arg    
472         add     r30, r0, r0             /* no     
473         bri     1b                                
474                                                   
475 /* Finally, return to user state.  */             
476 4:      set_bip;                        /*  In    
477         swi     CURRENT_TASK, r0, PER_CPU(CURR    
478         VM_OFF;                                   
479         tophys(r1,r1);                            
480         RESTORE_REGS_RTBD;                        
481         addik   r1, r1, PT_SIZE         /* Cle    
482         lwi     r1, r1, PT_R1 - PT_SIZE;/* Res    
483         bri     6f;                               
484                                                   
485 /* Return to kernel state.  */                    
486 2:      set_bip;                        /*  In    
487         VM_OFF;                                   
488         tophys(r1,r1);                            
489         RESTORE_REGS_RTBD;                        
490         addik   r1, r1, PT_SIZE         /* Cle    
491         tovirt(r1,r1);                            
492 6:                                                
493 TRAP_return:            /* Make global symbol     
494         rtbd    r14, 0; /* Instructions to ret    
495         nop;                                      
496                                                   
497                                                   
498 /* This the initial entry point for a new chil    
499    stack in place that makes it look like the     
500    syscall.  This function is actually `return    
501    (copy_thread makes ret_from_fork the return    
502    saved context).  */                            
503 C_ENTRY(ret_from_fork):                           
504         bralid  r15, schedule_tail; /* ...whic    
505         add     r5, r3, r0;     /* switch_thre    
506                                 /* ( in the de    
507         brid    ret_from_trap;  /* Do normal t    
508         add     r3, r0, r0;     /* Child's for    
509                                                   
510 C_ENTRY(ret_from_kernel_thread):                  
511         bralid  r15, schedule_tail; /* ...whic    
512         add     r5, r3, r0;     /* switch_thre    
513                                 /* ( in the de    
514         brald   r15, r20        /* fn was left    
515         addk    r5, r0, r19     /* ... and arg    
516         brid    ret_from_trap                     
517         add     r3, r0, r0                        
518                                                   
519 C_ENTRY(sys_rt_sigreturn_wrapper):                
520         addik   r30, r0, 0              /* no     
521         brid    sys_rt_sigreturn        /* Do     
522         addik   r5, r1, 0;              /* add    
523                                                   
524 /*                                                
525  * HW EXCEPTION rutine start                      
526  */                                               
527 C_ENTRY(full_exception_trap):                     
528         /* adjust exception address for privil    
529          * for finding where is it */             
530         addik   r17, r17, -4                      
531         SAVE_STATE /* Save registers */           
532         /* PC, before IRQ/trap - this is one i    
533         swi     r17, r1, PT_PC;                   
534         tovirt(r1,r1)                             
535         /* FIXME this can be store directly in    
536          * I tested it but there is a fault */    
537         /* where the trap should return need -    
538         addik   r15, r0, ret_from_exc - 8         
539         mfs     r6, resr                          
540         mfs     r7, rfsr;               /* sav    
541         mts     rfsr, r0;       /* Clear stick    
542         rted    r0, full_exception                
543         addik   r5, r1, 0                /* pa    
544                                                   
545 /*                                                
546  * Unaligned data trap.                           
547  *                                                
548  * Unaligned data trap last on 4k page is hand    
549  *                                                
550  * Trap entered via exception, so EE bit is se    
551  * are masked.  This is nice, means we don't h    
552  *                                                
553  * The assembler routine is in "arch/microblaz    
554  */                                               
555 C_ENTRY(unaligned_data_trap):                     
556         /* MS: I have to save r11 value and th    
557          * set_bit, clear_eip, set_ee use r11     
558          * instructions are not used. We don't    
559          * are used and they use r0 instead of    
560          * I am using ENTRY_SP which should be    
561          * pointer saving. */                     
562         swi     r11, r0, TOPHYS(PER_CPU(ENTRY_    
563         set_bip;        /* equalize initial st    
564         clear_eip;                                
565         set_ee;                                   
566         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY_    
567         SAVE_STATE              /* Save regist    
568         /* PC, before IRQ/trap - this is one i    
569         swi     r17, r1, PT_PC;                   
570         tovirt(r1,r1)                             
571         /* where the trap should return need -    
572         addik   r15, r0, ret_from_exc-8           
573         mfs     r3, resr                /* ESR    
574         mfs     r4, rear                /* EAR    
575         rtbd    r0, _unaligned_data_exception     
576         addik   r7, r1, 0               /* par    
577                                                   
578 /*                                                
579  * Page fault traps.                              
580  *                                                
581  * If the real exception handler (from hw_exce    
582  * the mapping for the process, then we're thr    
583  *                                                
584  * Trap entered via exceptions, so EE bit is s    
585  * are masked.  This is nice, means we don't h    
586  *                                                
587  * Build a standard exception frame for TLB Ac    
588  * will bail out to this point if they can't r    
589  *                                                
590  * The C function called is in "arch/microblaz    
591  * void do_page_fault(struct pt_regs *regs,       
592  *                              unsigned long     
593  *                              unsigned long     
594  */                                               
595 /* data and intruction trap - which is choose     
596 C_ENTRY(page_fault_data_trap):                    
597         SAVE_STATE              /* Save regist    
598         /* PC, before IRQ/trap - this is one i    
599         swi     r17, r1, PT_PC;                   
600         tovirt(r1,r1)                             
601         /* where the trap should return need -    
602         addik   r15, r0, ret_from_exc-8           
603         mfs     r6, rear                /* par    
604         mfs     r7, resr                /* par    
605         rted    r0, do_page_fault                 
606         addik   r5, r1, 0               /* par    
607                                                   
608 C_ENTRY(page_fault_instr_trap):                   
609         SAVE_STATE              /* Save regist    
610         /* PC, before IRQ/trap - this is one i    
611         swi     r17, r1, PT_PC;                   
612         tovirt(r1,r1)                             
613         /* where the trap should return need -    
614         addik   r15, r0, ret_from_exc-8           
615         mfs     r6, rear                /* par    
616         ori     r7, r0, 0               /* par    
617         rted    r0, do_page_fault                 
618         addik   r5, r1, 0               /* par    
619                                                   
620 /* Entry point used to return from an exceptio    
621 C_ENTRY(ret_from_exc):                            
622         lwi     r11, r1, PT_MODE;                 
623         bnei    r11, 2f;                /* See    
624                                         /* ...    
625                                                   
626         /* We're returning to user mode, so ch    
627            trigger rescheduling. */               
628 1:                                                
629         lwi     r11, CURRENT_TASK, TS_THREAD_I    
630         lwi     r19, r11, TI_FLAGS;     /* get    
631         andi    r11, r19, _TIF_NEED_RESCHED;      
632         beqi    r11, 5f;                          
633                                                   
634 /* Call the scheduler before returning from a     
635         bralid  r15, schedule;  /* Call schedu    
636         nop;                            /* del    
637         bri     1b                                
638                                                   
639         /* Maybe handle a signal */               
640 5:      andi    r11, r19, _TIF_SIGPENDING | _T    
641         beqi    r11, 4f;                /* Sig    
642                                                   
643         /*                                        
644          * Handle a signal return; Pending sig    
645          *                                        
646          * Not all registers are saved by the     
647          * points (for instance, call-saved re    
648          * C-compiler calling sequence in the     
649          * preserved), and call-clobbered regi    
650          * traps), but signal handlers may wan    
651          * complete register state.  Here we s    
652          * the normal entry sequence, so that     
653          * (in a possibly modified form) after    
654         addik   r5, r1, 0;              /* Arg    
655         bralid  r15, do_notify_resume;  /* Han    
656         addi    r6, r0, 0;              /* Arg    
657         bri     1b                                
658                                                   
659 /* Finally, return to user state.  */             
660 4:      set_bip;                        /* Int    
661         swi     CURRENT_TASK, r0, PER_CPU(CURR    
662         VM_OFF;                                   
663         tophys(r1,r1);                            
664                                                   
665         RESTORE_REGS_RTBD;                        
666         addik   r1, r1, PT_SIZE         /* Cle    
667                                                   
668         lwi     r1, r1, PT_R1 - PT_SIZE; /* Re    
669         bri     6f;                               
670 /* Return to kernel state.  */                    
671 2:      set_bip;                        /* Int    
672         VM_OFF;                                   
673         tophys(r1,r1);                            
674         RESTORE_REGS_RTBD;                        
675         addik   r1, r1, PT_SIZE         /* Cle    
676                                                   
677         tovirt(r1,r1);                            
678 6:                                                
679 EXC_return:             /* Make global symbol     
680         rtbd    r14, 0; /* Instructions to ret    
681         nop;                                      
682                                                   
683 /*                                                
684  * HW EXCEPTION rutine end                        
685  */                                               
686                                                   
687 /*                                                
688  * Hardware maskable interrupts.                  
689  *                                                
690  * The stack-pointer (r1) should have already     
691  * location PER_CPU(ENTRY_SP).                    
692  */                                               
693 C_ENTRY(_interrupt):                              
694 /* MS: we are in physical address */              
695 /* Save registers, switch to proper stack, con    
696         swi     r1, r0, TOPHYS(PER_CPU(ENTRY_S    
697         /* MS: See if already in kernel mode.     
698         mfs     r1, rmsr                          
699         nop                                       
700         andi    r1, r1, MSR_UMS                   
701         bnei    r1, 1f                            
702                                                   
703 /* Kernel-mode state save. */                     
704         lwi     r1, r0, TOPHYS(PER_CPU(ENTRY_S    
705         tophys(r1,r1); /* MS: I have in r1 phy    
706         /* save registers */                      
707 /* MS: Make room on the stack -> activation re    
708         addik   r1, r1, -PT_SIZE;                 
709         SAVE_REGS                                 
710         brid    2f;                               
711         swi     r1, r1, PT_MODE; /* 0 - user m    
712 1:                                                
713 /* User-mode state save. */                       
714  /* MS: get the saved current */                  
715         lwi     r1, r0, TOPHYS(PER_CPU(CURRENT    
716         tophys(r1,r1);                            
717         lwi     r1, r1, TS_THREAD_INFO;           
718         addik   r1, r1, THREAD_SIZE;              
719         tophys(r1,r1);                            
720         /* save registers */                      
721         addik   r1, r1, -PT_SIZE;                 
722         SAVE_REGS                                 
723         /* calculate mode */                      
724         swi     r0, r1, PT_MODE;                  
725         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY_    
726         swi     r11, r1, PT_R1;                   
727         clear_ums;                                
728 2:                                                
729         lwi     CURRENT_TASK, r0, TOPHYS(PER_C    
730         tovirt(r1,r1)                             
731         addik   r15, r0, irq_call;                
732 irq_call:rtbd   r0, do_IRQ;                       
733         addik   r5, r1, 0;                        
734                                                   
735 /* MS: we are in virtual mode */                  
736 ret_from_irq:                                     
737         lwi     r11, r1, PT_MODE;                 
738         bnei    r11, 2f;                          
739                                                   
740 1:                                                
741         lwi     r11, CURRENT_TASK, TS_THREAD_I    
742         lwi     r19, r11, TI_FLAGS; /* MS: get    
743         andi    r11, r19, _TIF_NEED_RESCHED;      
744         beqi    r11, 5f                           
745         bralid  r15, schedule;                    
746         nop; /* delay slot */                     
747         bri     1b                                
748                                                   
749     /* Maybe handle a signal */                   
750 5:      andi    r11, r19, _TIF_SIGPENDING | _T    
751         beqid   r11, no_intr_resched              
752 /* Handle a signal return; Pending signals sho    
753         addik   r5, r1, 0; /* Arg 1: struct pt    
754         bralid  r15, do_notify_resume;  /* Han    
755         addi    r6, r0, 0; /* Arg 2: int in_sy    
756         bri     1b                                
757                                                   
758 /* Finally, return to user state. */              
759 no_intr_resched:                                  
760     /* Disable interrupts, we are now committe    
761         disable_irq                               
762         swi     CURRENT_TASK, r0, PER_CPU(CURR    
763         VM_OFF;                                   
764         tophys(r1,r1);                            
765         RESTORE_REGS                              
766         addik   r1, r1, PT_SIZE /* MS: Clean u    
767         lwi     r1, r1, PT_R1 - PT_SIZE;          
768         bri     6f;                               
769 /* MS: Return to kernel state. */                 
770 2:                                                
771 #ifdef CONFIG_PREEMPTION                          
772         lwi     r11, CURRENT_TASK, TS_THREAD_I    
773         /* MS: get preempt_count from thread i    
774         lwi     r5, r11, TI_PREEMPT_COUNT;        
775         bgti    r5, restore;                      
776                                                   
777         lwi     r5, r11, TI_FLAGS;                
778         andi    r5, r5, _TIF_NEED_RESCHED;        
779         beqi    r5, restore /* if zero jump ov    
780                                                   
781         /* interrupts are off that's why I am     
782         bralid  r15, preempt_schedule_irq         
783         nop                                       
784 restore:                                          
785 #endif                                            
786         VM_OFF /* MS: turn off MMU */             
787         tophys(r1,r1)                             
788         RESTORE_REGS                              
789         addik   r1, r1, PT_SIZE /* MS: Clean u    
790         tovirt(r1,r1);                            
791 6:                                                
792 IRQ_return: /* MS: Make global symbol for debu    
793         rtid    r14, 0                            
794         nop                                       
795                                                   
796 #ifdef CONFIG_MB_MANAGER                          
797                                                   
798 #define PT_PID          PT_SIZE                   
799 #define PT_TLBI         PT_SIZE + 4               
800 #define PT_ZPR          PT_SIZE + 8               
801 #define PT_TLBL0        PT_SIZE + 12              
802 #define PT_TLBH0        PT_SIZE + 16              
803                                                   
804 C_ENTRY(_xtmr_manager_reset):                     
805         lwi     r1, r0, xmb_manager_stackpoint    
806                                                   
807         /* Restore MSR */                         
808         lwi     r2, r1, PT_MSR                    
809         mts     rmsr, r2                          
810         bri     4                                 
811                                                   
812         /* restore Special purpose registers *    
813         lwi     r2, r1, PT_PID                    
814         mts     rpid, r2                          
815                                                   
816         lwi     r2, r1, PT_TLBI                   
817         mts     rtlbx, r2                         
818                                                   
819         lwi     r2, r1, PT_ZPR                    
820         mts     rzpr, r2                          
821                                                   
822 #if CONFIG_XILINX_MICROBLAZE0_USE_FPU             
823         lwi     r2, r1, PT_FSR                    
824         mts     rfsr, r2                          
825 #endif                                            
826                                                   
827         /* restore all the tlb's */               
828         addik   r3, r0, TOPHYS(tlb_skip)          
829         addik   r6, r0, PT_TLBL0                  
830         addik   r7, r0, PT_TLBH0                  
831 restore_tlb:                                      
832         add     r6, r6, r1                        
833         add     r7, r7, r1                        
834         lwi     r2, r6, 0                         
835         mts     rtlblo, r2                        
836         lwi     r2, r7, 0                         
837         mts     rtlbhi, r2                        
838         addik   r6, r6, 4                         
839         addik   r7, r7, 4                         
840         bgtid   r3, restore_tlb                   
841         addik   r3, r3, -1                        
842                                                   
843         lwi     r5, r0, TOPHYS(xmb_manager_dev    
844         lwi     r8, r0, TOPHYS(xmb_manager_res    
845         set_vms                                   
846         /* return from reset need -8 to adjust    
847         addik   r15, r0, ret_from_reset - 8       
848         rtbd    r8, 0                             
849         nop                                       
850                                                   
851 ret_from_reset:                                   
852         set_bip /* Ints masked for state resto    
853         VM_OFF                                    
854         /* MS: Restore all regs */                
855         RESTORE_REGS                              
856         lwi     r14, r1, PT_R14                   
857         lwi     r16, r1, PT_PC                    
858         addik   r1, r1, PT_SIZE + 36              
859         rtbd    r16, 0                            
860         nop                                       
861                                                   
862 /*                                                
863  * Break handler for MB Manager. Enter to _xmb    
864  * injecting fault in one of the TMR Microblaz    
865  * FIXME: This break handler supports getting     
866  * called from kernel space only.                 
867  */                                               
868 C_ENTRY(_xmb_manager_break):                      
869         /*                                        
870          * Reserve memory in the stack for con    
871          * (which includes memory for storing     
872          */                                       
873         addik   r1, r1, -PT_SIZE - 36             
874         swi     r1, r0, xmb_manager_stackpoint    
875         SAVE_REGS                                 
876         swi     r14, r1, PT_R14 /* rewrite sav    
877         swi     r16, r1, PT_PC; /* PC and r16     
878                                                   
879         lwi     r6, r0, TOPHYS(xmb_manager_bas    
880         lwi     r7, r0, TOPHYS(xmb_manager_crv    
881         /*                                        
882          * When the break vector gets asserted    
883          * the break signal must be blocked be    
884          * break handler, below code configure    
885          * control register to block break sig    
886          */                                       
887         swi     r7, r6, 0                         
888                                                   
889         /* Save the special purpose registers     
890         mfs     r2, rpid                          
891         swi     r2, r1, PT_PID                    
892                                                   
893         mfs     r2, rtlbx                         
894         swi     r2, r1, PT_TLBI                   
895                                                   
896         mfs     r2, rzpr                          
897         swi     r2, r1, PT_ZPR                    
898                                                   
899 #if CONFIG_XILINX_MICROBLAZE0_USE_FPU             
900         mfs     r2, rfsr                          
901         swi     r2, r1, PT_FSR                    
902 #endif                                            
903         mfs     r2, rmsr                          
904         swi     r2, r1, PT_MSR                    
905                                                   
906         /* Save all the tlb's */                  
907         addik   r3, r0, TOPHYS(tlb_skip)          
908         addik   r6, r0, PT_TLBL0                  
909         addik   r7, r0, PT_TLBH0                  
910 save_tlb:                                         
911         add     r6, r6, r1                        
912         add     r7, r7, r1                        
913         mfs     r2, rtlblo                        
914         swi     r2, r6, 0                         
915         mfs     r2, rtlbhi                        
916         swi     r2, r7, 0                         
917         addik   r6, r6, 4                         
918         addik   r7, r7, 4                         
919         bgtid   r3, save_tlb                      
920         addik   r3, r3, -1                        
921                                                   
922         lwi     r5, r0, TOPHYS(xmb_manager_dev    
923         lwi     r8, r0, TOPHYS(xmb_manager_cal    
924         /* return from break need -8 to adjust    
925         addik   r15, r0, ret_from_break - 8       
926         rtbd    r8, 0                             
927         nop                                       
928                                                   
929 ret_from_break:                                   
930         /* flush the d-cache */                   
931         bralid  r15, mb_flush_dcache              
932         nop                                       
933                                                   
934         /*                                        
935          * To make sure microblaze i-cache is     
936          * invalidate the i-cache.                
937          */                                       
938         bralid  r15, mb_invalidate_icache         
939         nop                                       
940                                                   
941         set_bip; /* Ints masked for state rest    
942         VM_OFF;                                   
943         mbar    1                                 
944         mbar    2                                 
945         bri     4                                 
946         suspend                                   
947         nop                                       
948 #endif                                            
949                                                   
950 /*                                                
951  * Debug trap for KGDB. Enter to _debug_except    
952  * and call handling function with saved pt_re    
953  */                                               
954 C_ENTRY(_debug_exception):                        
955         /* BIP bit is set on entry, no interru    
956         swi     r1, r0, TOPHYS(PER_CPU(ENTRY_S    
957                                                   
958         mfs     r1, rmsr                          
959         nop                                       
960         andi    r1, r1, MSR_UMS                   
961         bnei    r1, 1f                            
962 /* MS: Kernel-mode state save - kgdb */           
963         lwi     r1, r0, TOPHYS(PER_CPU(ENTRY_S    
964                                                   
965         /* BIP bit is set on entry, no interru    
966         addik   r1, r1, CONFIG_KERNEL_BASE_ADD    
967         SAVE_REGS;                                
968         /* save all regs to pt_reg structure *    
969         swi     r0, r1, PT_R0;  /* R0 must be     
970         swi     r14, r1, PT_R14 /* rewrite sav    
971         swi     r16, r1, PT_PC; /* PC and r16     
972         /* save special purpose registers to p    
973         mfs     r11, rear;                        
974         swi     r11, r1, PT_EAR;                  
975         mfs     r11, resr;                        
976         swi     r11, r1, PT_ESR;                  
977         mfs     r11, rfsr;                        
978         swi     r11, r1, PT_FSR;                  
979                                                   
980         /* stack pointer is in physical addres    
981          * by PT_SIZE but we need to get corre    
982         addik   r11, r1, CONFIG_KERNEL_START -    
983         swi     r11, r1, PT_R1                    
984         /* MS: r31 - current pointer isn't cha    
985         tovirt(r1,r1)                             
986 #ifdef CONFIG_KGDB                                
987         addi    r5, r1, 0 /* pass pt_reg addre    
988         addik   r15, r0, dbtrap_call; /* retur    
989         rtbd    r0, microblaze_kgdb_break         
990         nop;                                      
991 #endif                                            
992         /* MS: Place handler for brki from ker    
993          * It is very unlikely that another br    
994         bri 0                                     
995                                                   
996 /* MS: User-mode state save - gdb */              
997 1:      lwi     r1, r0, TOPHYS(PER_CPU(CURRENT    
998         tophys(r1,r1);                            
999         lwi     r1, r1, TS_THREAD_INFO; /* get    
1000         addik   r1, r1, THREAD_SIZE;    /* ca    
1001         tophys(r1,r1);                           
1002                                                  
1003         addik   r1, r1, -PT_SIZE; /* Make roo    
1004         SAVE_REGS;                               
1005         swi     r16, r1, PT_PC; /* Save LP */    
1006         swi     r0, r1, PT_MODE; /* Was in us    
1007         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY    
1008         swi     r11, r1, PT_R1; /* Store user    
1009         lwi     CURRENT_TASK, r0, TOPHYS(PER_    
1010         tovirt(r1,r1)                            
1011         set_vms;                                 
1012         addik   r5, r1, 0;                       
1013         addik   r15, r0, dbtrap_call;            
1014 dbtrap_call: /* Return point for kernel/user     
1015         rtbd    r0, sw_exception                 
1016         nop                                      
1017                                                  
1018         /* MS: The first instruction for the     
1019         set_bip; /* Ints masked for state res    
1020         lwi     r11, r1, PT_MODE;                
1021         bnei    r11, 2f;                         
1022 /* MS: Return to user space - gdb */             
1023 1:                                               
1024         /* Get current task ptr into r11 */      
1025         lwi     r11, CURRENT_TASK, TS_THREAD_    
1026         lwi     r19, r11, TI_FLAGS;     /* ge    
1027         andi    r11, r19, _TIF_NEED_RESCHED;     
1028         beqi    r11, 5f;                         
1029                                                  
1030         /* Call the scheduler before returnin    
1031         bralid  r15, schedule;  /* Call sched    
1032         nop;                            /* de    
1033         bri     1b                               
1034                                                  
1035         /* Maybe handle a signal */              
1036 5:      andi    r11, r19, _TIF_SIGPENDING | _    
1037         beqi    r11, 4f;                /* Si    
1038                                                  
1039         addik   r5, r1, 0;              /* Ar    
1040         bralid  r15, do_notify_resume;  /* Ha    
1041         addi  r6, r0, 0;        /* Arg 2: int    
1042         bri     1b                               
1043                                                  
1044 /* Finally, return to user state.  */            
1045 4:      swi     CURRENT_TASK, r0, PER_CPU(CUR    
1046         VM_OFF;                                  
1047         tophys(r1,r1);                           
1048         /* MS: Restore all regs */               
1049         RESTORE_REGS_RTBD                        
1050         addik   r1, r1, PT_SIZE  /* Clean up     
1051         lwi     r1, r1, PT_R1 - PT_SIZE; /* R    
1052 DBTRAP_return_user: /* MS: Make global symbol    
1053         rtbd    r16, 0; /* MS: Instructions t    
1054         nop;                                     
1055                                                  
1056 /* MS: Return to kernel state - kgdb */          
1057 2:      VM_OFF;                                  
1058         tophys(r1,r1);                           
1059         /* MS: Restore all regs */               
1060         RESTORE_REGS_RTBD                        
1061         lwi     r14, r1, PT_R14;                 
1062         lwi     r16, r1, PT_PC;                  
1063         addik   r1, r1, PT_SIZE; /* MS: Clean    
1064         tovirt(r1,r1);                           
1065 DBTRAP_return_kernel: /* MS: Make global symb    
1066         rtbd    r16, 0; /* MS: Instructions t    
1067         nop;                                     
1068                                                  
1069                                                  
1070 ENTRY(_switch_to)                                
1071         /* prepare return value */               
1072         addk    r3, r0, CURRENT_TASK             
1073                                                  
1074         /* save registers in cpu_context */      
1075         /* use r11 and r12, volatile register    
1076         /* give start of cpu_context for prev    
1077         addik   r11, r5, TI_CPU_CONTEXT          
1078         swi     r1, r11, CC_R1                   
1079         swi     r2, r11, CC_R2                   
1080         /* skip volatile registers.              
1081          * they are saved on stack when we ju    
1082         /* dedicated registers */                
1083         swi     r13, r11, CC_R13                 
1084         swi     r14, r11, CC_R14                 
1085         swi     r15, r11, CC_R15                 
1086         swi     r16, r11, CC_R16                 
1087         swi     r17, r11, CC_R17                 
1088         swi     r18, r11, CC_R18                 
1089         /* save non-volatile registers */        
1090         swi     r19, r11, CC_R19                 
1091         swi     r20, r11, CC_R20                 
1092         swi     r21, r11, CC_R21                 
1093         swi     r22, r11, CC_R22                 
1094         swi     r23, r11, CC_R23                 
1095         swi     r24, r11, CC_R24                 
1096         swi     r25, r11, CC_R25                 
1097         swi     r26, r11, CC_R26                 
1098         swi     r27, r11, CC_R27                 
1099         swi     r28, r11, CC_R28                 
1100         swi     r29, r11, CC_R29                 
1101         swi     r30, r11, CC_R30                 
1102         /* special purpose registers */          
1103         mfs     r12, rmsr                        
1104         swi     r12, r11, CC_MSR                 
1105         mfs     r12, rear                        
1106         swi     r12, r11, CC_EAR                 
1107         mfs     r12, resr                        
1108         swi     r12, r11, CC_ESR                 
1109         mfs     r12, rfsr                        
1110         swi     r12, r11, CC_FSR                 
1111                                                  
1112         /* update r31, the current-give me po    
1113         lwi     CURRENT_TASK, r6, TI_TASK        
1114         /* stored it to current_save too */      
1115         swi     CURRENT_TASK, r0, PER_CPU(CUR    
1116                                                  
1117         /* get new process' cpu context and r    
1118         /* give me start where start context     
1119         addik   r11, r6, TI_CPU_CONTEXT          
1120                                                  
1121         /* non-volatile registers */             
1122         lwi     r30, r11, CC_R30                 
1123         lwi     r29, r11, CC_R29                 
1124         lwi     r28, r11, CC_R28                 
1125         lwi     r27, r11, CC_R27                 
1126         lwi     r26, r11, CC_R26                 
1127         lwi     r25, r11, CC_R25                 
1128         lwi     r24, r11, CC_R24                 
1129         lwi     r23, r11, CC_R23                 
1130         lwi     r22, r11, CC_R22                 
1131         lwi     r21, r11, CC_R21                 
1132         lwi     r20, r11, CC_R20                 
1133         lwi     r19, r11, CC_R19                 
1134         /* dedicated registers */                
1135         lwi     r18, r11, CC_R18                 
1136         lwi     r17, r11, CC_R17                 
1137         lwi     r16, r11, CC_R16                 
1138         lwi     r15, r11, CC_R15                 
1139         lwi     r14, r11, CC_R14                 
1140         lwi     r13, r11, CC_R13                 
1141         /* skip volatile registers */            
1142         lwi     r2, r11, CC_R2                   
1143         lwi     r1, r11, CC_R1                   
1144                                                  
1145         /* special purpose registers */          
1146         lwi     r12, r11, CC_FSR                 
1147         mts     rfsr, r12                        
1148         lwi     r12, r11, CC_MSR                 
1149         mts     rmsr, r12                        
1150                                                  
1151         rtsd    r15, 8                           
1152         nop                                      
1153                                                  
1154 #ifdef CONFIG_MB_MANAGER                         
1155 .global xmb_inject_err                           
1156 .section .text                                   
1157 .align 2                                         
1158 .ent xmb_inject_err                              
1159 .type xmb_inject_err, @function                  
1160 xmb_inject_err:                                  
1161         addik   r1, r1, -PT_SIZE                 
1162         SAVE_REGS                                
1163                                                  
1164         /* Switch to real mode */                
1165         VM_OFF;                                  
1166         set_bip;                                 
1167         mbar    1                                
1168         mbar    2                                
1169         bralid  r15, XMB_INJECT_ERR_OFFSET       
1170         nop;                                     
1171                                                  
1172         /* enable virtual mode */                
1173         set_vms;                                 
1174         /* barrier for instructions and data     
1175         mbar    1                                
1176         mbar    2                                
1177         /*                                       
1178          * Enable Interrupts, Virtual Protect    
1179          * initial state for all possible ent    
1180          */                                      
1181         rtbd    r0, 1f                           
1182         nop;                                     
1183 1:                                               
1184         RESTORE_REGS                             
1185         addik   r1, r1, PT_SIZE                  
1186         rtsd    r15, 8;                          
1187         nop;                                     
1188 .end xmb_inject_err                              
1189                                                  
1190 .section .data                                   
1191 .global xmb_manager_dev                          
1192 .global xmb_manager_baseaddr                     
1193 .global xmb_manager_crval                        
1194 .global xmb_manager_callback                     
1195 .global xmb_manager_reset_callback               
1196 .global xmb_manager_stackpointer                 
1197 .align 4                                         
1198 xmb_manager_dev:                                 
1199         .long 0                                  
1200 xmb_manager_baseaddr:                            
1201         .long 0                                  
1202 xmb_manager_crval:                               
1203         .long 0                                  
1204 xmb_manager_callback:                            
1205         .long 0                                  
1206 xmb_manager_reset_callback:                      
1207         .long 0                                  
1208 xmb_manager_stackpointer:                        
1209         .long 0                                  
1210                                                  
1211 /*                                               
1212  * When the break vector gets asserted becaus    
1213  * the break signal must be blocked before ex    
1214  * break handler, Below api updates the manag    
1215  * control register and error count callback     
1216  * which will be used by the break handler to    
1217  * break and call the callback function.         
1218  */                                              
1219 .global xmb_manager_register                     
1220 .section .text                                   
1221 .align 2                                         
1222 .ent xmb_manager_register                        
1223 .type xmb_manager_register, @function            
1224 xmb_manager_register:                            
1225         swi     r5, r0, xmb_manager_baseaddr     
1226         swi     r6, r0, xmb_manager_crval        
1227         swi     r7, r0, xmb_manager_callback     
1228         swi     r8, r0, xmb_manager_dev          
1229         swi     r9, r0, xmb_manager_reset_cal    
1230                                                  
1231         rtsd    r15, 8;                          
1232         nop;                                     
1233 .end xmb_manager_register                        
1234 #endif                                           
1235                                                  
1236 ENTRY(_reset)                                    
1237         VM_OFF                                   
1238         brai    0; /* Jump to reset vector */    
1239                                                  
1240         /* These are compiled and loaded into    
1241          * copied into place in mach_early_se    
1242         .section        .init.ivt, "ax"          
1243 #if CONFIG_MANUAL_RESET_VECTOR && !defined(CO    
1244         .org    0x0                              
1245         brai    CONFIG_MANUAL_RESET_VECTOR       
1246 #elif defined(CONFIG_MB_MANAGER)                 
1247         .org    0x0                              
1248         brai    TOPHYS(_xtmr_manager_reset);     
1249 #endif                                           
1250         .org    0x8                              
1251         brai    TOPHYS(_user_exception); /* s    
1252         .org    0x10                             
1253         brai    TOPHYS(_interrupt);     /* In    
1254 #ifdef CONFIG_MB_MANAGER                         
1255         .org    0x18                             
1256         brai    TOPHYS(_xmb_manager_break);      
1257 #else                                            
1258         .org    0x18                             
1259         brai    TOPHYS(_debug_exception);        
1260 #endif                                           
1261         .org    0x20                             
1262         brai    TOPHYS(_hw_exception_handler)    
1263                                                  
1264 #ifdef CONFIG_MB_MANAGER                         
1265         /*                                       
1266          * For TMR Inject API which injects t    
1267          * be executed from LMB.                 
1268          * TMR Inject is programmed with addr    
1269          * when program counter matches with     
1270          * be injected. 0x200 is expected to     
1271          * offset, hence used for this api.      
1272          */                                      
1273         .org    XMB_INJECT_ERR_OFFSET            
1274 xmb_inject_error:                                
1275         nop                                      
1276         rtsd    r15, 8                           
1277         nop                                      
1278 #endif                                           
1279                                                  
1280 .section .rodata,"a"                             
1281 #include "syscall_table.S"                       
1282                                                  
1283 syscall_table_size=(.-sys_call_table)            
1284                                                  
1285 type_SYSCALL:                                    
1286         .ascii "SYSCALL\0"                       
1287 type_IRQ:                                        
1288         .ascii "IRQ\0"                           
1289 type_IRQ_PREEMPT:                                
1290         .ascii "IRQ (PREEMPTED)\0"               
1291 type_SYSCALL_PREEMPT:                            
1292         .ascii " SYSCALL (PREEMPTED)\0"          
1293                                                  
1294         /*                                       
1295          * Trap decoding for stack unwinder      
1296          * Tuples are (start addr, end addr,     
1297          * If return address lies on [start a    
1298          * unwinder displays 'string'            
1299          */                                      
1300                                                  
1301         .align 4                                 
1302 .global microblaze_trap_handlers                 
1303 microblaze_trap_handlers:                        
1304         /* Exact matches come first */           
1305         .word ret_from_trap; .word ret_from_t    
1306         .word ret_from_irq ; .word ret_from_i    
1307         /* Fuzzy matches go here */              
1308         .word ret_from_irq ; .word no_intr_re    
1309         .word ret_from_trap; .word TRAP_retur    
1310         /* End of table */                       
1311         .word 0               ; .word 0          
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php