~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/parisc/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-or-later */
  2 /*
  3  * Linux/PA-RISC Project (http://www.parisc-linux.org/)
  4  *
  5  * kernel entry points (interruptions, system call wrappers)
  6  *  Copyright (C) 1999,2000 Philipp Rumpf 
  7  *  Copyright (C) 1999 SuSE GmbH Nuernberg 
  8  *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
  9  *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
 10  */
 11 
 12 #include <asm/asm-offsets.h>
 13 
 14 /* we have the following possibilities to act on an interruption:
 15  *  - handle in assembly and use shadowed registers only
 16  *  - save registers to kernel stack and handle in assembly or C */
 17 
 18 
 19 #include <asm/psw.h>
 20 #include <asm/cache.h>          /* for L1_CACHE_SHIFT */
 21 #include <asm/assembly.h>       /* for LDREG/STREG defines */
 22 #include <asm/signal.h>
 23 #include <asm/unistd.h>
 24 #include <asm/ldcw.h>
 25 #include <asm/traps.h>
 26 #include <asm/thread_info.h>
 27 #include <asm/alternative.h>
 28 #include <asm/spinlock_types.h>
 29 
 30 #include <linux/linkage.h>
 31 #include <linux/pgtable.h>
 32 
 33 #ifdef CONFIG_64BIT
 34         .level 2.0w
 35 #else
 36         .level 2.0
 37 #endif
 38 
 39 /*
 40  * We need seven instructions after a TLB insert for it to take effect.
 41  * The PA8800/PA8900 processors are an exception and need 12 instructions.
 42  * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
 43  */
 44 #ifdef CONFIG_64BIT
 45 #define NUM_PIPELINE_INSNS    12
 46 #else
 47 #define NUM_PIPELINE_INSNS    7
 48 #endif
 49 
 50         /* Insert num nops */
 51         .macro  insert_nops num
 52         .rept \num
 53         nop
 54         .endr
 55         .endm
 56 
 57         /* Get aligned page_table_lock address for this mm from cr28/tr4 */
 58         .macro  get_ptl reg
 59         mfctl   %cr28,\reg
 60         .endm
 61 
 62         /* space_to_prot macro creates a prot id from a space id */
 63 
 64 #if (SPACEID_SHIFT) == 0
 65         .macro  space_to_prot spc prot
 66         depd,z  \spc,62,31,\prot
 67         .endm
 68 #else
 69         .macro  space_to_prot spc prot
 70         extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
 71         .endm
 72 #endif
 73         /*
 74          * The "get_stack" macros are responsible for determining the
 75          * kernel stack value.
 76          *
 77          *      If sr7 == 0
 78          *          Already using a kernel stack, so call the
 79          *          get_stack_use_r30 macro to push a pt_regs structure
 80          *          on the stack, and store registers there.
 81          *      else
 82          *          Need to set up a kernel stack, so call the
 83          *          get_stack_use_cr30 macro to set up a pointer
 84          *          to the pt_regs structure contained within the
 85          *          task pointer pointed to by cr30. Load the stack
 86          *          pointer from the task structure.
 87          *
 88          * Note that we use shadowed registers for temps until
 89          * we can save %r26 and %r29. %r26 is used to preserve
 90          * %r8 (a shadowed register) which temporarily contained
 91          * either the fault type ("code") or the eirr. We need
 92          * to use a non-shadowed register to carry the value over
 93          * the rfir in virt_map. We use %r26 since this value winds
 94          * up being passed as the argument to either do_cpu_irq_mask
 95          * or handle_interruption. %r29 is used to hold a pointer
 96          * the register save area, and once again, it needs to
 97          * be a non-shadowed register so that it survives the rfir.
 98          */
 99 
100         .macro  get_stack_use_cr30
101 
102         /* we save the registers in the task struct */
103 
104         copy    %r30, %r17
105         mfctl   %cr30, %r1
106         tophys  %r1,%r9         /* task_struct */
107         LDREG   TASK_STACK(%r9),%r30
108         ldo     PT_SZ_ALGN(%r30),%r30
109         mtsp    %r0,%sr7        /* clear sr7 after kernel stack was set! */
110         mtsp    %r16,%sr3
111         ldo     TASK_REGS(%r9),%r9
112         STREG   %r17,PT_GR30(%r9)
113         STREG   %r29,PT_GR29(%r9)
114         STREG   %r26,PT_GR26(%r9)
115         STREG   %r16,PT_SR7(%r9)
116         copy    %r9,%r29
117         .endm
118 
119         .macro  get_stack_use_r30
120 
121         /* we put a struct pt_regs on the stack and save the registers there */
122 
123         tophys  %r30,%r9
124         copy    %r30,%r1
125         ldo     PT_SZ_ALGN(%r30),%r30
126         STREG   %r1,PT_GR30(%r9)
127         STREG   %r29,PT_GR29(%r9)
128         STREG   %r26,PT_GR26(%r9)
129         STREG   %r16,PT_SR7(%r9)
130         copy    %r9,%r29
131         .endm
132 
133         .macro  rest_stack
134         LDREG   PT_GR1(%r29), %r1
135         LDREG   PT_GR30(%r29),%r30
136         LDREG   PT_GR29(%r29),%r29
137         .endm
138 
139         /* default interruption handler
140          * (calls traps.c:handle_interruption) */
141         .macro  def code
142         b       intr_save
143         ldi     \code, %r8
144         .align  32
145         .endm
146 
147         /* Interrupt interruption handler
148          * (calls irq.c:do_cpu_irq_mask) */
149         .macro  extint code
150         b       intr_extint
151         mfsp    %sr7,%r16
152         .align  32
153         .endm   
154 
155         .import os_hpmc, code
156 
157         /* HPMC handler */
158         .macro  hpmc code
159         nop                     /* must be a NOP, will be patched later */
160         load32  PA(os_hpmc), %r3
161         bv,n    0(%r3)
162         nop
163         .word   0               /* checksum (will be patched) */
164         .word   0               /* address of handler */
165         .word   0               /* length of handler */
166         .endm
167 
168         /*
169          * Performance Note: Instructions will be moved up into
170          * this part of the code later on, once we are sure
171          * that the tlb miss handlers are close to final form.
172          */
173 
174         /* Register definitions for tlb miss handler macros */
175 
176         va  = r8        /* virtual address for which the trap occurred */
177         spc = r24       /* space for which the trap occurred */
178 
179 #ifndef CONFIG_64BIT
180 
181         /*
182          * itlb miss interruption handler (parisc 1.1 - 32 bit)
183          */
184 
185         .macro  itlb_11 code
186 
187         mfctl   %pcsq, spc
188         b       itlb_miss_11
189         mfctl   %pcoq, va
190 
191         .align          32
192         .endm
193 #endif
194         
195         /*
196          * itlb miss interruption handler (parisc 2.0)
197          */
198 
199         .macro  itlb_20 code
200         mfctl   %pcsq, spc
201 #ifdef CONFIG_64BIT
202         b       itlb_miss_20w
203 #else
204         b       itlb_miss_20
205 #endif
206         mfctl   %pcoq, va
207 
208         .align          32
209         .endm
210         
211 #ifndef CONFIG_64BIT
212         /*
213          * naitlb miss interruption handler (parisc 1.1 - 32 bit)
214          */
215 
216         .macro  naitlb_11 code
217 
218         mfctl   %isr,spc
219         b       naitlb_miss_11
220         mfctl   %ior,va
221 
222         .align          32
223         .endm
224 #endif
225         
226         /*
227          * naitlb miss interruption handler (parisc 2.0)
228          */
229 
230         .macro  naitlb_20 code
231 
232         mfctl   %isr,spc
233 #ifdef CONFIG_64BIT
234         b       naitlb_miss_20w
235 #else
236         b       naitlb_miss_20
237 #endif
238         mfctl   %ior,va
239 
240         .align          32
241         .endm
242         
243 #ifndef CONFIG_64BIT
244         /*
245          * dtlb miss interruption handler (parisc 1.1 - 32 bit)
246          */
247 
248         .macro  dtlb_11 code
249 
250         mfctl   %isr, spc
251         b       dtlb_miss_11
252         mfctl   %ior, va
253 
254         .align          32
255         .endm
256 #endif
257 
258         /*
259          * dtlb miss interruption handler (parisc 2.0)
260          */
261 
262         .macro  dtlb_20 code
263 
264         mfctl   %isr, spc
265 #ifdef CONFIG_64BIT
266         b       dtlb_miss_20w
267 #else
268         b       dtlb_miss_20
269 #endif
270         mfctl   %ior, va
271 
272         .align          32
273         .endm
274         
275 #ifndef CONFIG_64BIT
276         /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
277 
278         .macro  nadtlb_11 code
279 
280         mfctl   %isr,spc
281         b       nadtlb_miss_11
282         mfctl   %ior,va
283 
284         .align          32
285         .endm
286 #endif
287         
288         /* nadtlb miss interruption handler (parisc 2.0) */
289 
290         .macro  nadtlb_20 code
291 
292         mfctl   %isr,spc
293 #ifdef CONFIG_64BIT
294         b       nadtlb_miss_20w
295 #else
296         b       nadtlb_miss_20
297 #endif
298         mfctl   %ior,va
299 
300         .align          32
301         .endm
302         
303 #ifndef CONFIG_64BIT
304         /*
305          * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
306          */
307 
308         .macro  dbit_11 code
309 
310         mfctl   %isr,spc
311         b       dbit_trap_11
312         mfctl   %ior,va
313 
314         .align          32
315         .endm
316 #endif
317 
318         /*
319          * dirty bit trap interruption handler (parisc 2.0)
320          */
321 
322         .macro  dbit_20 code
323 
324         mfctl   %isr,spc
325 #ifdef CONFIG_64BIT
326         b       dbit_trap_20w
327 #else
328         b       dbit_trap_20
329 #endif
330         mfctl   %ior,va
331 
332         .align          32
333         .endm
334 
335         /* In LP64, the space contains part of the upper 32 bits of the
336          * fault.  We have to extract this and place it in the va,
337          * zeroing the corresponding bits in the space register */
338         .macro          space_adjust    spc,va,tmp
339 #ifdef CONFIG_64BIT
340         extrd,u         \spc,63,SPACEID_SHIFT,\tmp
341         depd            %r0,63,SPACEID_SHIFT,\spc
342         depd            \tmp,31,SPACEID_SHIFT,\va
343 #endif
344         .endm
345 
346         .import         swapper_pg_dir,code
347 
348         /* Get the pgd.  For faults on space zero (kernel space), this
349          * is simply swapper_pg_dir.  For user space faults, the
350          * pgd is stored in %cr25 */
351         .macro          get_pgd         spc,reg
352         ldil            L%PA(swapper_pg_dir),\reg
353         ldo             R%PA(swapper_pg_dir)(\reg),\reg
354         or,COND(=)      %r0,\spc,%r0
355         mfctl           %cr25,\reg
356         .endm
357 
358         /* 
359                 space_check(spc,tmp,fault)
360 
361                 spc - The space we saw the fault with.
362                 tmp - The place to store the current space.
363                 fault - Function to call on failure.
364 
365                 Only allow faults on different spaces from the
366                 currently active one if we're the kernel 
367 
368         */
369         .macro          space_check     spc,tmp,fault
370         mfsp            %sr7,\tmp
371         /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
372         or,COND(<>)     %r0,\spc,%r0    /* user may execute gateway page
373                                          * as kernel, so defeat the space
374                                          * check if it is */
375         copy            \spc,\tmp
376         or,COND(=)      %r0,\tmp,%r0    /* nullify if executing as kernel */
377         cmpb,COND(<>),n \tmp,\spc,\fault
378         .endm
379 
380         /* Look up a PTE in a 2-Level scheme (faulting at each
381          * level if the entry isn't present 
382          *
383          * NOTE: we use ldw even for LP64, since the short pointers
384          * can address up to 1TB
385          */
386         .macro          L2_ptep pmd,pte,index,va,fault
387 #if CONFIG_PGTABLE_LEVELS == 3
388         extru_safe      \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
389 #else
390         extru_safe      \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
391 #endif
392         dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
393 #if CONFIG_PGTABLE_LEVELS < 3
394         copy            %r0,\pte
395 #endif
396         ldw,s           \index(\pmd),\pmd
397         bb,>=,n         \pmd,_PxD_PRESENT_BIT,\fault
398         dep             %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
399         SHLREG          \pmd,PxD_VALUE_SHIFT,\pmd
400         extru_safe      \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
401         dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
402         shladd          \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
403         .endm
404 
405         /* Look up PTE in a 3-Level scheme. */
406         .macro          L3_ptep pgd,pte,index,va,fault
407 #if CONFIG_PGTABLE_LEVELS == 3
408         copy            %r0,\pte
409         extrd,u         \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
410         ldw,s           \index(\pgd),\pgd
411         bb,>=,n         \pgd,_PxD_PRESENT_BIT,\fault
412         shld            \pgd,PxD_VALUE_SHIFT,\pgd
413 #endif
414         L2_ptep         \pgd,\pte,\index,\va,\fault
415         .endm
416 
417         /* Acquire page_table_lock and check page is present. */
418         .macro          ptl_lock        spc,ptp,pte,tmp,tmp1,fault
419 #ifdef CONFIG_TLB_PTLOCK
420 98:     cmpib,COND(=),n 0,\spc,2f
421         get_ptl         \tmp
422 1:      LDCW            0(\tmp),\tmp1
423         cmpib,COND(=)   0,\tmp1,1b
424         nop
425         LDREG           0(\ptp),\pte
426         bb,<,n          \pte,_PAGE_PRESENT_BIT,3f
427         b               \fault
428         stw             \tmp1,0(\tmp)
429 99:     ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
430 #endif
431 2:      LDREG           0(\ptp),\pte
432         bb,>=,n         \pte,_PAGE_PRESENT_BIT,\fault
433 3:
434         .endm
435 
436         /* Release page_table_lock if for user space. We use an ordered
437            store to ensure all prior accesses are performed prior to
438            releasing the lock. Note stw may not be executed, so we
439            provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
440         .macro          ptl_unlock      spc,tmp,tmp2
441 #ifdef CONFIG_TLB_PTLOCK
442 98:     get_ptl         \tmp
443         ldi             __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
444         or,COND(=)      %r0,\spc,%r0
445         stw,ma          \tmp2,0(\tmp)
446 99:     ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
447         insert_nops     NUM_PIPELINE_INSNS - 4
448 #else
449         insert_nops     NUM_PIPELINE_INSNS - 1
450 #endif
451         .endm
452 
453         /* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
454          * don't needlessly dirty the cache line if it was already set */
455         .macro          update_accessed ptp,pte,tmp,tmp1
456         ldi             _PAGE_ACCESSED,\tmp1
457         or              \tmp1,\pte,\tmp
458         and,COND(<>)    \tmp1,\pte,%r0
459         STREG           \tmp,0(\ptp)
460         .endm
461 
462         /* Set the dirty bit (and accessed bit).  No need to be
463          * clever, this is only used from the dirty fault */
464         .macro          update_dirty    ptp,pte,tmp
465         ldi             _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
466         or              \tmp,\pte,\pte
467         STREG           \pte,0(\ptp)
468         .endm
469 
470         /* We have (depending on the page size):
471          * - 38 to 52-bit Physical Page Number
472          * - 12 to 26-bit page offset
473          */
474         /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
475          * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
476         #define PAGE_ADD_SHIFT          (PAGE_SHIFT-12)
477         #define PAGE_ADD_HUGE_SHIFT     (REAL_HPAGE_SHIFT-12)
478         #define PFN_START_BIT   (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
479 
480         /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
481         .macro          convert_for_tlb_insert20 pte,tmp
482 #ifdef CONFIG_HUGETLB_PAGE
483         copy            \pte,\tmp
484         extrd,u         \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
485 
486         depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,\
487                                 (63-58)+PAGE_ADD_SHIFT,\pte
488         extrd,u,*=      \tmp,_PAGE_HPAGE_BIT+32,1,%r0
489         depdi           _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
490                                 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
491 #else /* Huge pages disabled */
492         extrd,u         \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
493         depdi           _PAGE_SIZE_ENCODING_DEFAULT,63,\
494                                 (63-58)+PAGE_ADD_SHIFT,\pte
495 #endif
496         .endm
497 
498         /* Convert the pte and prot to tlb insertion values.  How
499          * this happens is quite subtle, read below */
500         .macro          make_insert_tlb spc,pte,prot,tmp
501         space_to_prot   \spc \prot        /* create prot id from space */
502         /* The following is the real subtlety.  This is depositing
503          * T <-> _PAGE_REFTRAP
504          * D <-> _PAGE_DIRTY
505          * B <-> _PAGE_DMB (memory break)
506          *
507          * Then incredible subtlety: The access rights are
508          * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
509          * See 3-14 of the parisc 2.0 manual
510          *
511          * Finally, _PAGE_READ goes in the top bit of PL1 (so we
512          * trigger an access rights trap in user space if the user
513          * tries to read an unreadable page */
514 #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
515         /* need to drop DMB bit, as it's used as SPECIAL flag */
516         depi            0,_PAGE_SPECIAL_BIT,1,\pte
517 #endif
518         depd            \pte,8,7,\prot
519 
520         /* PAGE_USER indicates the page can be read with user privileges,
521          * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
522          * contains _PAGE_READ) */
523         extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
524         depdi           7,11,3,\prot
525         /* If we're a gateway page, drop PL2 back to zero for promotion
526          * to kernel privilege (so we can execute the page as kernel).
527          * Any privilege promotion page always denys read and write */
528         extrd,u,*=      \pte,_PAGE_GATEWAY_BIT+32,1,%r0
529         depd            %r0,11,2,\prot  /* If Gateway, Set PL2 to 0 */
530 
531         /* Enforce uncacheable pages.
532          * This should ONLY be use for MMIO on PA 2.0 machines.
533          * Memory/DMA is cache coherent on all PA2.0 machines we support
534          * (that means T-class is NOT supported) and the memory controllers
535          * on most of those machines only handles cache transactions.
536          */
537         extrd,u,*=      \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
538         depdi           1,12,1,\prot
539 
540         /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
541         convert_for_tlb_insert20 \pte \tmp
542         .endm
543 
544         /* Identical macro to make_insert_tlb above, except it
545          * makes the tlb entry for the differently formatted pa11
546          * insertion instructions */
547         .macro          make_insert_tlb_11      spc,pte,prot
548 #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
549         /* need to drop DMB bit, as it's used as SPECIAL flag */
550         depi            0,_PAGE_SPECIAL_BIT,1,\pte
551 #endif
552         zdep            \spc,30,15,\prot
553         dep             \pte,8,7,\prot
554         extru,=         \pte,_PAGE_NO_CACHE_BIT,1,%r0
555         depi            1,12,1,\prot
556         extru,=         \pte,_PAGE_USER_BIT,1,%r0
557         depi            7,11,3,\prot   /* Set for user space (1 rsvd for read) */
558         extru,=         \pte,_PAGE_GATEWAY_BIT,1,%r0
559         depi            0,11,2,\prot    /* If Gateway, Set PL2 to 0 */
560 
561         /* Get rid of prot bits and convert to page addr for iitlba */
562 
563         depi            0,31,ASM_PFN_PTE_SHIFT,\pte
564         SHRREG          \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
565         .endm
566 
567         /* This is for ILP32 PA2.0 only.  The TLB insertion needs
568          * to extend into I/O space if the address is 0xfXXXXXXX
569          * so we extend the f's into the top word of the pte in
570          * this case */
571         .macro          f_extend        pte,tmp
572         extrd,s         \pte,42,4,\tmp
573         addi,<>         1,\tmp,%r0
574         extrd,s         \pte,63,25,\pte
575         .endm
576 
577         /* The alias region is comprised of a pair of 4 MB regions
578          * aligned to 8 MB. It is used to clear/copy/flush user pages
579          * using kernel virtual addresses congruent with the user
580          * virtual address.
581          *
582          * To use the alias page, you set %r26 up with the to TLB
583          * entry (identifying the physical page) and %r23 up with
584          * the from tlb entry (or nothing if only a to entry---for
585          * clear_user_page_asm) */
586         .macro          do_alias        spc,tmp,tmp1,va,pte,prot,fault,patype
587         cmpib,COND(<>),n 0,\spc,\fault
588         ldil            L%(TMPALIAS_MAP_START),\tmp
589         copy            \va,\tmp1
590         depi_safe       0,31,TMPALIAS_SIZE_BITS+1,\tmp1
591         cmpb,COND(<>),n \tmp,\tmp1,\fault
592         mfctl           %cr19,\tmp      /* iir */
593         /* get the opcode (first six bits) into \tmp */
594         extrw,u         \tmp,5,6,\tmp
595         /*
596          * Only setting the T bit prevents data cache movein
597          * Setting access rights to zero prevents instruction cache movein
598          *
599          * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
600          * to type field and _PAGE_READ goes to top bit of PL1
601          */
602         ldi             (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
603         /*
604          * so if the opcode is one (i.e. this is a memory management
605          * instruction) nullify the next load so \prot is only T.
606          * Otherwise this is a normal data operation
607          */
608         cmpiclr,=       0x01,\tmp,%r0
609         ldi             (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
610 .ifc \patype,20
611         depd,z          \prot,8,7,\prot
612 .else
613 .ifc \patype,11
614         depw,z          \prot,8,7,\prot
615 .else
616         .error "undefined PA type to do_alias"
617 .endif
618 .endif
619         /*
620          * OK, it is in the temp alias region, check whether "from" or "to".
621          * Check "subtle" note in pacache.S re: r23/r26.
622          */
623         extrw,u,=       \va,31-TMPALIAS_SIZE_BITS,1,%r0
624         or,COND(tr)     %r23,%r0,\pte
625         or              %r26,%r0,\pte
626 
627         /* convert phys addr in \pte (from r23 or r26) to tlb insert format */
628         SHRREG          \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
629         depi_safe       _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
630         .endm 
631 
632 
633         /*
634          * Fault_vectors are architecturally required to be aligned on a 2K
635          * boundary
636          */
637 
638         .section .text.hot
639         .align 2048
640 
641 ENTRY(fault_vector_20)
642         /* First vector is invalid (0) */
643         .ascii  "cows can fly"
644         .byte 0
645         .align 32
646 
647         hpmc             1
648         def              2
649         def              3
650         extint           4
651         def              5
652         itlb_20          PARISC_ITLB_TRAP
653         def              7
654         def              8
655         def              9
656         def             10
657         def             11
658         def             12
659         def             13
660         def             14
661         dtlb_20         15
662         naitlb_20       16
663         nadtlb_20       17
664         def             18
665         def             19
666         dbit_20         20
667         def             21
668         def             22
669         def             23
670         def             24
671         def             25
672         def             26
673         def             27
674         def             28
675         def             29
676         def             30
677         def             31
678 END(fault_vector_20)
679 
680 #ifndef CONFIG_64BIT
681 
682         .align 2048
683 
684 ENTRY(fault_vector_11)
685         /* First vector is invalid (0) */
686         .ascii  "cows can fly"
687         .byte 0
688         .align 32
689 
690         hpmc             1
691         def              2
692         def              3
693         extint           4
694         def              5
695         itlb_11          PARISC_ITLB_TRAP
696         def              7
697         def              8
698         def              9
699         def             10
700         def             11
701         def             12
702         def             13
703         def             14
704         dtlb_11         15
705         naitlb_11       16
706         nadtlb_11       17
707         def             18
708         def             19
709         dbit_11         20
710         def             21
711         def             22
712         def             23
713         def             24
714         def             25
715         def             26
716         def             27
717         def             28
718         def             29
719         def             30
720         def             31
721 END(fault_vector_11)
722 
723 #endif
724         /* Fault vector is separately protected and *must* be on its own page */
725         .align          PAGE_SIZE
726 
727         .import         handle_interruption,code
728         .import         do_cpu_irq_mask,code
729 
730         /*
731          * Child Returns here
732          *
733          * copy_thread moved args into task save area.
734          */
735 
736 ENTRY(ret_from_kernel_thread)
737         /* Call schedule_tail first though */
738         BL      schedule_tail, %r2
739         nop
740 
741         mfctl   %cr30,%r1       /* task_struct */
742         LDREG   TASK_PT_GR25(%r1), %r26
743 #ifdef CONFIG_64BIT
744         LDREG   TASK_PT_GR27(%r1), %r27
745 #endif
746         LDREG   TASK_PT_GR26(%r1), %r1
747         ble     0(%sr7, %r1)
748         copy    %r31, %r2
749         b       finish_child_return
750         nop
751 END(ret_from_kernel_thread)
752 
753 
754         /*
755          * struct task_struct *_switch_to(struct task_struct *prev,
756          *      struct task_struct *next)
757          *
758          * switch kernel stacks and return prev */
759 ENTRY_CFI(_switch_to)
760         STREG    %r2, -RP_OFFSET(%r30)
761 
762         callee_save_float
763         callee_save
764 
765         load32  _switch_to_ret, %r2
766 
767         STREG   %r2, TASK_PT_KPC(%r26)
768         LDREG   TASK_PT_KPC(%r25), %r2
769 
770         STREG   %r30, TASK_PT_KSP(%r26)
771         LDREG   TASK_PT_KSP(%r25), %r30
772         bv      %r0(%r2)
773         mtctl   %r25,%cr30
774 
775 ENTRY(_switch_to_ret)
776         mtctl   %r0, %cr0               /* Needed for single stepping */
777         callee_rest
778         callee_rest_float
779 
780         LDREG   -RP_OFFSET(%r30), %r2
781         bv      %r0(%r2)
782         copy    %r26, %r28
783 ENDPROC_CFI(_switch_to)
784 
785         /*
786          * Common rfi return path for interruptions, kernel execve, and
787          * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
788          * return via this path if the signal was received when the process
789          * was running; if the process was blocked on a syscall then the
790          * normal syscall_exit path is used.  All syscalls for traced
791          * proceses exit via intr_restore.
792          *
793          * XXX If any syscalls that change a processes space id ever exit
794          * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
795          * adjust IASQ[0..1].
796          *
797          */
798 
799         .align  PAGE_SIZE
800 
801 ENTRY_CFI(syscall_exit_rfi)
802         mfctl   %cr30,%r16              /* task_struct */
803         ldo     TASK_REGS(%r16),%r16
804         /* Force iaoq to userspace, as the user has had access to our current
805          * context via sigcontext. Also Filter the PSW for the same reason.
806          */
807         LDREG   PT_IAOQ0(%r16),%r19
808         depi    PRIV_USER,31,2,%r19
809         STREG   %r19,PT_IAOQ0(%r16)
810         LDREG   PT_IAOQ1(%r16),%r19
811         depi    PRIV_USER,31,2,%r19
812         STREG   %r19,PT_IAOQ1(%r16)
813         LDREG   PT_PSW(%r16),%r19
814         load32  USER_PSW_MASK,%r1
815 #ifdef CONFIG_64BIT
816         load32  USER_PSW_HI_MASK,%r20
817         depd    %r20,31,32,%r1
818 #endif
819         and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
820         load32  USER_PSW,%r1
821         or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
822         STREG   %r19,PT_PSW(%r16)
823 
824         /*
825          * If we aren't being traced, we never saved space registers
826          * (we don't store them in the sigcontext), so set them
827          * to "proper" values now (otherwise we'll wind up restoring
828          * whatever was last stored in the task structure, which might
829          * be inconsistent if an interrupt occurred while on the gateway
830          * page). Note that we may be "trashing" values the user put in
831          * them, but we don't support the user changing them.
832          */
833 
834         STREG   %r0,PT_SR2(%r16)
835         mfsp    %sr3,%r19
836         STREG   %r19,PT_SR0(%r16)
837         STREG   %r19,PT_SR1(%r16)
838         STREG   %r19,PT_SR3(%r16)
839         STREG   %r19,PT_SR4(%r16)
840         STREG   %r19,PT_SR5(%r16)
841         STREG   %r19,PT_SR6(%r16)
842         STREG   %r19,PT_SR7(%r16)
843 
844 ENTRY(intr_return)
845         /* check for reschedule */
846         mfctl   %cr30,%r1
847         LDREG   TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
848         bb,<,n  %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
849 
850         .import do_notify_resume,code
851 intr_check_sig:
852         /* As above */
853         mfctl   %cr30,%r1
854         LDREG   TASK_TI_FLAGS(%r1),%r19
855         ldi     (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
856         and,COND(<>)    %r19, %r20, %r0
857         b,n     intr_restore    /* skip past if we've nothing to do */
858 
859         /* This check is critical to having LWS
860          * working. The IASQ is zero on the gateway
861          * page and we cannot deliver any signals until
862          * we get off the gateway page.
863          *
864          * Only do signals if we are returning to user space
865          */
866         LDREG   PT_IASQ0(%r16), %r20
867         cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
868         LDREG   PT_IASQ1(%r16), %r20
869         cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
870 
871         copy    %r0, %r25                       /* long in_syscall = 0 */
872 #ifdef CONFIG_64BIT
873         ldo     -16(%r30),%r29                  /* Reference param save area */
874 #endif
875 
876         /* NOTE: We need to enable interrupts if we have to deliver
877          * signals. We used to do this earlier but it caused kernel
878          * stack overflows. */
879         ssm     PSW_SM_I, %r0
880 
881         BL      do_notify_resume,%r2
882         copy    %r16, %r26                      /* struct pt_regs *regs */
883 
884         b,n     intr_check_sig
885 
886 intr_restore:
887         copy            %r16,%r29
888         ldo             PT_FR31(%r29),%r1
889         rest_fp         %r1
890         rest_general    %r29
891 
892         /* inverse of virt_map */
893         pcxt_ssm_bug
894         rsm             PSW_SM_QUIET,%r0        /* prepare for rfi */
895         tophys_r1       %r29
896 
897         /* Restore space id's and special cr's from PT_REGS
898          * structure pointed to by r29
899          */
900         rest_specials   %r29
901 
902         /* IMPORTANT: rest_stack restores r29 last (we are using it)!
903          * It also restores r1 and r30.
904          */
905         rest_stack
906 
907         rfi
908         nop
909 
910 #ifndef CONFIG_PREEMPTION
911 # define intr_do_preempt        intr_restore
912 #endif /* !CONFIG_PREEMPTION */
913 
914         .import schedule,code
915 intr_do_resched:
916         /* Only call schedule on return to userspace. If we're returning
917          * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
918          * we jump back to intr_restore.
919          */
920         LDREG   PT_IASQ0(%r16), %r20
921         cmpib,COND(=)   0, %r20, intr_do_preempt
922         nop
923         LDREG   PT_IASQ1(%r16), %r20
924         cmpib,COND(=)   0, %r20, intr_do_preempt
925         nop
926 
927         /* NOTE: We need to enable interrupts if we schedule.  We used
928          * to do this earlier but it caused kernel stack overflows. */
929         ssm     PSW_SM_I, %r0
930 
931 #ifdef CONFIG_64BIT
932         ldo     -16(%r30),%r29          /* Reference param save area */
933 #endif
934 
935         ldil    L%intr_check_sig, %r2
936 #ifndef CONFIG_64BIT
937         b       schedule
938 #else
939         load32  schedule, %r20
940         bv      %r0(%r20)
941 #endif
942         ldo     R%intr_check_sig(%r2), %r2
943 
944         /* preempt the current task on returning to kernel
945          * mode from an interrupt, iff need_resched is set,
946          * and preempt_count is 0. otherwise, we continue on
947          * our merry way back to the current running task.
948          */
949 #ifdef CONFIG_PREEMPTION
950         .import preempt_schedule_irq,code
951 intr_do_preempt:
952         rsm     PSW_SM_I, %r0           /* disable interrupts */
953 
954         /* current_thread_info()->preempt_count */
955         mfctl   %cr30, %r1
956         ldw     TI_PRE_COUNT(%r1), %r19
957         cmpib,<>        0, %r19, intr_restore   /* if preempt_count > 0 */
958         nop                             /* prev insn branched backwards */
959 
960         /* check if we interrupted a critical path */
961         LDREG   PT_PSW(%r16), %r20
962         bb,<,n  %r20, 31 - PSW_SM_I, intr_restore
963         nop
964 
965         /* ssm PSW_SM_I done later in intr_restore */
966 #ifdef CONFIG_MLONGCALLS
967         ldil    L%intr_restore, %r2
968         load32  preempt_schedule_irq, %r1
969         bv      %r0(%r1)
970         ldo     R%intr_restore(%r2), %r2
971 #else
972         ldil    L%intr_restore, %r1
973         BL      preempt_schedule_irq, %r2
974         ldo     R%intr_restore(%r1), %r2
975 #endif
976 #endif /* CONFIG_PREEMPTION */
977 
978         /*
979          * External interrupts.
980          */
981 
982 intr_extint:
983         cmpib,COND(=),n 0,%r16,1f
984 
985         get_stack_use_cr30
986         b,n 2f
987 
988 1:
989         get_stack_use_r30
990 2:
991         save_specials   %r29
992         virt_map
993         save_general    %r29
994 
995         ldo     PT_FR0(%r29), %r24
996         save_fp %r24
997         
998         loadgp
999 
1000         copy    %r29, %r26      /* arg0 is pt_regs */
1001         copy    %r29, %r16      /* save pt_regs */
1002 
1003         ldil    L%intr_return, %r2
1004 
1005 #ifdef CONFIG_64BIT
1006         ldo     -16(%r30),%r29  /* Reference param save area */
1007 #endif
1008 
1009         b       do_cpu_irq_mask
1010         ldo     R%intr_return(%r2), %r2 /* return to intr_return, not here */
1011 ENDPROC_CFI(syscall_exit_rfi)
1012 
1013 
1014         /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1015 
1016 ENTRY_CFI(intr_save)            /* for os_hpmc */
1017         mfsp    %sr7,%r16
1018         cmpib,COND(=),n 0,%r16,1f
1019         get_stack_use_cr30
1020         b       2f
1021         copy    %r8,%r26
1022 
1023 1:
1024         get_stack_use_r30
1025         copy    %r8,%r26
1026 
1027 2:
1028         save_specials   %r29
1029 
1030         /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1031         cmpib,COND(=),n        PARISC_ITLB_TRAP,%r26,skip_save_ior
1032 
1033 
1034         mfctl           %isr, %r16
1035         nop             /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1036         mfctl           %ior, %r17
1037 
1038 
1039 #ifdef CONFIG_64BIT
1040         /*
1041          * If the interrupted code was running with W bit off (32 bit),
1042          * clear the b bits (bits 0 & 1) in the ior.
1043          * save_specials left ipsw value in r8 for us to test.
1044          */
1045         extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1046         depdi           0,1,2,%r17
1047 
1048         /* adjust isr/ior: get high bits from isr and deposit in ior */
1049         space_adjust    %r16,%r17,%r1
1050 #endif
1051         STREG           %r16, PT_ISR(%r29)
1052         STREG           %r17, PT_IOR(%r29)
1053 
1054 #if defined(CONFIG_64BIT)
1055         b,n             intr_save2
1056 
1057 skip_save_ior:
1058         /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1059          * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1060          * above.
1061          */
1062         bb,COND(>=),n   %r8,PSW_W_BIT,intr_save2
1063         LDREG           PT_IASQ0(%r29), %r16
1064         LDREG           PT_IAOQ0(%r29), %r17
1065         /* adjust iasq/iaoq */
1066         space_adjust    %r16,%r17,%r1
1067         STREG           %r16, PT_IASQ0(%r29)
1068         STREG           %r17, PT_IAOQ0(%r29)
1069 #else
1070 skip_save_ior:
1071 #endif
1072 
1073 intr_save2:
1074         virt_map
1075         save_general    %r29
1076 
1077         ldo             PT_FR0(%r29), %r25
1078         save_fp         %r25
1079         
1080         loadgp
1081 
1082         copy            %r29, %r25      /* arg1 is pt_regs */
1083 #ifdef CONFIG_64BIT
1084         ldo             -16(%r30),%r29  /* Reference param save area */
1085 #endif
1086 
1087         ldil            L%intr_check_sig, %r2
1088         copy            %r25, %r16      /* save pt_regs */
1089 
1090         b               handle_interruption
1091         ldo             R%intr_check_sig(%r2), %r2
1092 ENDPROC_CFI(intr_save)
1093 
1094 
1095         /*
1096          * Note for all tlb miss handlers:
1097          *
1098          * cr24 contains a pointer to the kernel address space
1099          * page directory.
1100          *
1101          * cr25 contains a pointer to the current user address
1102          * space page directory.
1103          *
1104          * sr3 will contain the space id of the user address space
1105          * of the current running thread while that thread is
1106          * running in the kernel.
1107          */
1108 
1109         /*
1110          * register number allocations.  Note that these are all
1111          * in the shadowed registers
1112          */
1113 
1114         t0 = r1         /* temporary register 0 */
1115         va = r8         /* virtual address for which the trap occurred */
1116         t1 = r9         /* temporary register 1 */
1117         pte  = r16      /* pte/phys page # */
1118         prot = r17      /* prot bits */
1119         spc  = r24      /* space for which the trap occurred */
1120         ptp = r25       /* page directory/page table pointer */
1121 
1122 #ifdef CONFIG_64BIT
1123 
1124 dtlb_miss_20w:
1125         space_adjust    spc,va,t0
1126         get_pgd         spc,ptp
1127         space_check     spc,t0,dtlb_fault
1128 
1129         L3_ptep         ptp,pte,t0,va,dtlb_check_alias_20w
1130 
1131         ptl_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1132         update_accessed ptp,pte,t0,t1
1133 
1134         make_insert_tlb spc,pte,prot,t1
1135         
1136         idtlbt          pte,prot
1137 
1138         ptl_unlock      spc,t0,t1
1139         rfir
1140         nop
1141 
1142 dtlb_check_alias_20w:
1143         do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
1144 
1145         idtlbt          pte,prot
1146 
1147         insert_nops     NUM_PIPELINE_INSNS - 1
1148         rfir
1149         nop
1150 
1151 nadtlb_miss_20w:
1152         space_adjust    spc,va,t0
1153         get_pgd         spc,ptp
1154         space_check     spc,t0,nadtlb_fault
1155 
1156         L3_ptep         ptp,pte,t0,va,nadtlb_check_alias_20w
1157 
1158         ptl_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1159         update_accessed ptp,pte,t0,t1
1160 
1161         make_insert_tlb spc,pte,prot,t1
1162 
1163         idtlbt          pte,prot
1164 
1165         ptl_unlock      spc,t0,t1
1166         rfir
1167         nop
1168 
1169 nadtlb_check_alias_20w:
1170         do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1171 
1172         idtlbt          pte,prot
1173 
1174         insert_nops     NUM_PIPELINE_INSNS - 1
1175         rfir
1176         nop
1177 
1178 #else
1179 
1180 dtlb_miss_11:
1181         get_pgd         spc,ptp
1182 
1183         space_check     spc,t0,dtlb_fault
1184 
1185         L2_ptep         ptp,pte,t0,va,dtlb_check_alias_11
1186 
1187         ptl_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_11
1188         update_accessed ptp,pte,t0,t1
1189 
1190         make_insert_tlb_11      spc,pte,prot
1191 
1192         mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1193         mtsp            spc,%sr1
1194 
1195         idtlba          pte,(%sr1,va)
1196         idtlbp          prot,(%sr1,va)
1197 
1198         mtsp            t1, %sr1        /* Restore sr1 */
1199 
1200         ptl_unlock      spc,t0,t1
1201         rfir
1202         nop
1203 
1204 dtlb_check_alias_11:
1205         do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,11
1206 
1207         idtlba          pte,(va)
1208         idtlbp          prot,(va)
1209 
1210         insert_nops     NUM_PIPELINE_INSNS - 1
1211         rfir
1212         nop
1213 
1214 nadtlb_miss_11:
1215         get_pgd         spc,ptp
1216 
1217         space_check     spc,t0,nadtlb_fault
1218 
1219         L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_11
1220 
1221         ptl_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1222         update_accessed ptp,pte,t0,t1
1223 
1224         make_insert_tlb_11      spc,pte,prot
1225 
1226         mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1227         mtsp            spc,%sr1
1228 
1229         idtlba          pte,(%sr1,va)
1230         idtlbp          prot,(%sr1,va)
1231 
1232         mtsp            t1, %sr1        /* Restore sr1 */
1233 
1234         ptl_unlock      spc,t0,t1
1235         rfir
1236         nop
1237 
1238 nadtlb_check_alias_11:
1239         do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1240 
1241         idtlba          pte,(va)
1242         idtlbp          prot,(va)
1243 
1244         insert_nops     NUM_PIPELINE_INSNS - 1
1245         rfir
1246         nop
1247 
1248 dtlb_miss_20:
1249         space_adjust    spc,va,t0
1250         get_pgd         spc,ptp
1251         space_check     spc,t0,dtlb_fault
1252 
1253         L2_ptep         ptp,pte,t0,va,dtlb_check_alias_20
1254 
1255         ptl_lock        spc,ptp,pte,t0,t1,dtlb_check_alias_20
1256         update_accessed ptp,pte,t0,t1
1257 
1258         make_insert_tlb spc,pte,prot,t1
1259 
1260         f_extend        pte,t1
1261 
1262         idtlbt          pte,prot
1263 
1264         ptl_unlock      spc,t0,t1
1265         rfir
1266         nop
1267 
1268 dtlb_check_alias_20:
1269         do_alias        spc,t0,t1,va,pte,prot,dtlb_fault,20
1270         
1271         idtlbt          pte,prot
1272 
1273         insert_nops     NUM_PIPELINE_INSNS - 1
1274         rfir
1275         nop
1276 
1277 nadtlb_miss_20:
1278         get_pgd         spc,ptp
1279 
1280         space_check     spc,t0,nadtlb_fault
1281 
1282         L2_ptep         ptp,pte,t0,va,nadtlb_check_alias_20
1283 
1284         ptl_lock        spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1285         update_accessed ptp,pte,t0,t1
1286 
1287         make_insert_tlb spc,pte,prot,t1
1288 
1289         f_extend        pte,t1
1290         
1291         idtlbt          pte,prot
1292 
1293         ptl_unlock      spc,t0,t1
1294         rfir
1295         nop
1296 
1297 nadtlb_check_alias_20:
1298         do_alias        spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1299 
1300         idtlbt          pte,prot
1301 
1302         insert_nops     NUM_PIPELINE_INSNS - 1
1303         rfir
1304         nop
1305 
1306 #endif
1307 
1308 nadtlb_emulate:
1309 
1310         /*
1311          * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and
1312          * probei instructions. The kernel no longer faults doing flushes.
1313          * Use of lpa and probe instructions is rare. Given the issue
1314          * with shadow registers, we defer everything to the "slow" path.
1315          */
1316         b,n             nadtlb_fault
1317 
1318 #ifdef CONFIG_64BIT
1319 itlb_miss_20w:
1320 
1321         /*
1322          * I miss is a little different, since we allow users to fault
1323          * on the gateway page which is in the kernel address space.
1324          */
1325 
1326         space_adjust    spc,va,t0
1327         get_pgd         spc,ptp
1328         space_check     spc,t0,itlb_fault
1329 
1330         L3_ptep         ptp,pte,t0,va,itlb_fault
1331 
1332         ptl_lock        spc,ptp,pte,t0,t1,itlb_fault
1333         update_accessed ptp,pte,t0,t1
1334 
1335         make_insert_tlb spc,pte,prot,t1
1336         
1337         iitlbt          pte,prot
1338 
1339         ptl_unlock      spc,t0,t1
1340         rfir
1341         nop
1342 
1343 naitlb_miss_20w:
1344 
1345         /*
1346          * I miss is a little different, since we allow users to fault
1347          * on the gateway page which is in the kernel address space.
1348          */
1349 
1350         space_adjust    spc,va,t0
1351         get_pgd         spc,ptp
1352         space_check     spc,t0,naitlb_fault
1353 
1354         L3_ptep         ptp,pte,t0,va,naitlb_check_alias_20w
1355 
1356         ptl_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1357         update_accessed ptp,pte,t0,t1
1358 
1359         make_insert_tlb spc,pte,prot,t1
1360 
1361         iitlbt          pte,prot
1362 
1363         ptl_unlock      spc,t0,t1
1364         rfir
1365         nop
1366 
1367 naitlb_check_alias_20w:
1368         do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
1369 
1370         iitlbt          pte,prot
1371 
1372         insert_nops     NUM_PIPELINE_INSNS - 1
1373         rfir
1374         nop
1375 
1376 #else
1377 
1378 itlb_miss_11:
1379         get_pgd         spc,ptp
1380 
1381         space_check     spc,t0,itlb_fault
1382 
1383         L2_ptep         ptp,pte,t0,va,itlb_fault
1384 
1385         ptl_lock        spc,ptp,pte,t0,t1,itlb_fault
1386         update_accessed ptp,pte,t0,t1
1387 
1388         make_insert_tlb_11      spc,pte,prot
1389 
1390         mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1391         mtsp            spc,%sr1
1392 
1393         iitlba          pte,(%sr1,va)
1394         iitlbp          prot,(%sr1,va)
1395 
1396         mtsp            t1, %sr1        /* Restore sr1 */
1397 
1398         ptl_unlock      spc,t0,t1
1399         rfir
1400         nop
1401 
1402 naitlb_miss_11:
1403         get_pgd         spc,ptp
1404 
1405         space_check     spc,t0,naitlb_fault
1406 
1407         L2_ptep         ptp,pte,t0,va,naitlb_check_alias_11
1408 
1409         ptl_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_11
1410         update_accessed ptp,pte,t0,t1
1411 
1412         make_insert_tlb_11      spc,pte,prot
1413 
1414         mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1415         mtsp            spc,%sr1
1416 
1417         iitlba          pte,(%sr1,va)
1418         iitlbp          prot,(%sr1,va)
1419 
1420         mtsp            t1, %sr1        /* Restore sr1 */
1421 
1422         ptl_unlock      spc,t0,t1
1423         rfir
1424         nop
1425 
1426 naitlb_check_alias_11:
1427         do_alias        spc,t0,t1,va,pte,prot,itlb_fault,11
1428 
1429         iitlba          pte,(%sr0, va)
1430         iitlbp          prot,(%sr0, va)
1431 
1432         insert_nops     NUM_PIPELINE_INSNS - 1
1433         rfir
1434         nop
1435 
1436 
1437 itlb_miss_20:
1438         get_pgd         spc,ptp
1439 
1440         space_check     spc,t0,itlb_fault
1441 
1442         L2_ptep         ptp,pte,t0,va,itlb_fault
1443 
1444         ptl_lock        spc,ptp,pte,t0,t1,itlb_fault
1445         update_accessed ptp,pte,t0,t1
1446 
1447         make_insert_tlb spc,pte,prot,t1
1448 
1449         f_extend        pte,t1
1450 
1451         iitlbt          pte,prot
1452 
1453         ptl_unlock      spc,t0,t1
1454         rfir
1455         nop
1456 
1457 naitlb_miss_20:
1458         get_pgd         spc,ptp
1459 
1460         space_check     spc,t0,naitlb_fault
1461 
1462         L2_ptep         ptp,pte,t0,va,naitlb_check_alias_20
1463 
1464         ptl_lock        spc,ptp,pte,t0,t1,naitlb_check_alias_20
1465         update_accessed ptp,pte,t0,t1
1466 
1467         make_insert_tlb spc,pte,prot,t1
1468 
1469         f_extend        pte,t1
1470 
1471         iitlbt          pte,prot
1472 
1473         ptl_unlock      spc,t0,t1
1474         rfir
1475         nop
1476 
1477 naitlb_check_alias_20:
1478         do_alias        spc,t0,t1,va,pte,prot,naitlb_fault,20
1479 
1480         iitlbt          pte,prot
1481 
1482         insert_nops     NUM_PIPELINE_INSNS - 1
1483         rfir
1484         nop
1485 
1486 #endif
1487 
1488 #ifdef CONFIG_64BIT
1489 
1490 dbit_trap_20w:
1491         space_adjust    spc,va,t0
1492         get_pgd         spc,ptp
1493         space_check     spc,t0,dbit_fault
1494 
1495         L3_ptep         ptp,pte,t0,va,dbit_fault
1496 
1497         ptl_lock        spc,ptp,pte,t0,t1,dbit_fault
1498         update_dirty    ptp,pte,t1
1499 
1500         make_insert_tlb spc,pte,prot,t1
1501                 
1502         idtlbt          pte,prot
1503 
1504         ptl_unlock      spc,t0,t1
1505         rfir
1506         nop
1507 #else
1508 
1509 dbit_trap_11:
1510 
1511         get_pgd         spc,ptp
1512 
1513         space_check     spc,t0,dbit_fault
1514 
1515         L2_ptep         ptp,pte,t0,va,dbit_fault
1516 
1517         ptl_lock        spc,ptp,pte,t0,t1,dbit_fault
1518         update_dirty    ptp,pte,t1
1519 
1520         make_insert_tlb_11      spc,pte,prot
1521 
1522         mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1523         mtsp            spc,%sr1
1524 
1525         idtlba          pte,(%sr1,va)
1526         idtlbp          prot,(%sr1,va)
1527 
1528         mtsp            t1, %sr1     /* Restore sr1 */
1529 
1530         ptl_unlock      spc,t0,t1
1531         rfir
1532         nop
1533 
1534 dbit_trap_20:
1535         get_pgd         spc,ptp
1536 
1537         space_check     spc,t0,dbit_fault
1538 
1539         L2_ptep         ptp,pte,t0,va,dbit_fault
1540 
1541         ptl_lock        spc,ptp,pte,t0,t1,dbit_fault
1542         update_dirty    ptp,pte,t1
1543 
1544         make_insert_tlb spc,pte,prot,t1
1545 
1546         f_extend        pte,t1
1547         
1548         idtlbt          pte,prot
1549 
1550         ptl_unlock      spc,t0,t1
1551         rfir
1552         nop
1553 #endif
1554 
1555         .import handle_interruption,code
1556 
1557 kernel_bad_space:
1558         b               intr_save
1559         ldi             31,%r8  /* Use an unused code */
1560 
1561 dbit_fault:
1562         b               intr_save
1563         ldi             20,%r8
1564 
1565 itlb_fault:
1566         b               intr_save
1567         ldi             PARISC_ITLB_TRAP,%r8
1568 
1569 nadtlb_fault:
1570         b               intr_save
1571         ldi             17,%r8
1572 
1573 naitlb_fault:
1574         b               intr_save
1575         ldi             16,%r8
1576 
1577 dtlb_fault:
1578         b               intr_save
1579         ldi             15,%r8
1580 
1581         /* Register saving semantics for system calls:
1582 
1583            %r1             clobbered by system call macro in userspace
1584            %r2             saved in PT_REGS by gateway page
1585            %r3  - %r18     preserved by C code (saved by signal code)
1586            %r19 - %r20     saved in PT_REGS by gateway page
1587            %r21 - %r22     non-standard syscall args
1588                            stored in kernel stack by gateway page
1589            %r23 - %r26     arg3-arg0, saved in PT_REGS by gateway page
1590            %r27 - %r30     saved in PT_REGS by gateway page
1591            %r31            syscall return pointer
1592          */
1593 
1594         /* Floating point registers (FIXME: what do we do with these?)
1595 
1596            %fr0  - %fr3    status/exception, not preserved
1597            %fr4  - %fr7    arguments
1598            %fr8  - %fr11   not preserved by C code
1599            %fr12 - %fr21   preserved by C code
1600            %fr22 - %fr31   not preserved by C code
1601          */
1602 
1603         .macro  reg_save regs
1604         STREG   %r3, PT_GR3(\regs)
1605         STREG   %r4, PT_GR4(\regs)
1606         STREG   %r5, PT_GR5(\regs)
1607         STREG   %r6, PT_GR6(\regs)
1608         STREG   %r7, PT_GR7(\regs)
1609         STREG   %r8, PT_GR8(\regs)
1610         STREG   %r9, PT_GR9(\regs)
1611         STREG   %r10,PT_GR10(\regs)
1612         STREG   %r11,PT_GR11(\regs)
1613         STREG   %r12,PT_GR12(\regs)
1614         STREG   %r13,PT_GR13(\regs)
1615         STREG   %r14,PT_GR14(\regs)
1616         STREG   %r15,PT_GR15(\regs)
1617         STREG   %r16,PT_GR16(\regs)
1618         STREG   %r17,PT_GR17(\regs)
1619         STREG   %r18,PT_GR18(\regs)
1620         .endm
1621 
1622         .macro  reg_restore regs
1623         LDREG   PT_GR3(\regs), %r3
1624         LDREG   PT_GR4(\regs), %r4
1625         LDREG   PT_GR5(\regs), %r5
1626         LDREG   PT_GR6(\regs), %r6
1627         LDREG   PT_GR7(\regs), %r7
1628         LDREG   PT_GR8(\regs), %r8
1629         LDREG   PT_GR9(\regs), %r9
1630         LDREG   PT_GR10(\regs),%r10
1631         LDREG   PT_GR11(\regs),%r11
1632         LDREG   PT_GR12(\regs),%r12
1633         LDREG   PT_GR13(\regs),%r13
1634         LDREG   PT_GR14(\regs),%r14
1635         LDREG   PT_GR15(\regs),%r15
1636         LDREG   PT_GR16(\regs),%r16
1637         LDREG   PT_GR17(\regs),%r17
1638         LDREG   PT_GR18(\regs),%r18
1639         .endm
1640 
1641         .macro  fork_like name
1642 ENTRY_CFI(sys_\name\()_wrapper)
1643         mfctl   %cr30,%r1
1644         ldo     TASK_REGS(%r1),%r1
1645         reg_save %r1
1646         mfctl   %cr27, %r28
1647         ldil    L%sys_\name, %r31
1648         be      R%sys_\name(%sr4,%r31)
1649         STREG   %r28, PT_CR27(%r1)
1650 ENDPROC_CFI(sys_\name\()_wrapper)
1651         .endm
1652 
1653 fork_like clone
1654 fork_like clone3
1655 fork_like fork
1656 fork_like vfork
1657 
1658         /* Set the return value for the child */
1659 ENTRY(child_return)
1660         BL      schedule_tail, %r2
1661         nop
1662 finish_child_return:
1663         mfctl   %cr30,%r1
1664         ldo     TASK_REGS(%r1),%r1       /* get pt regs */
1665 
1666         LDREG   PT_CR27(%r1), %r3
1667         mtctl   %r3, %cr27
1668         reg_restore %r1
1669         b       syscall_exit
1670         copy    %r0,%r28
1671 END(child_return)
1672 
1673 ENTRY_CFI(sys_rt_sigreturn_wrapper)
1674         mfctl   %cr30,%r26
1675         ldo     TASK_REGS(%r26),%r26    /* get pt regs */
1676         /* Don't save regs, we are going to restore them from sigcontext. */
1677         STREG   %r2, -RP_OFFSET(%r30)
1678 #ifdef CONFIG_64BIT
1679         ldo     FRAME_SIZE(%r30), %r30
1680         BL      sys_rt_sigreturn,%r2
1681         ldo     -16(%r30),%r29          /* Reference param save area */
1682 #else
1683         BL      sys_rt_sigreturn,%r2
1684         ldo     FRAME_SIZE(%r30), %r30
1685 #endif
1686 
1687         ldo     -FRAME_SIZE(%r30), %r30
1688         LDREG   -RP_OFFSET(%r30), %r2
1689 
1690         /* FIXME: I think we need to restore a few more things here. */
1691         mfctl   %cr30,%r1
1692         ldo     TASK_REGS(%r1),%r1      /* get pt regs */
1693         reg_restore %r1
1694 
1695         /* If the signal was received while the process was blocked on a
1696          * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1697          * take us to syscall_exit_rfi and on to intr_return.
1698          */
1699         bv      %r0(%r2)
1700         LDREG   PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1701 ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1702 
1703 ENTRY(syscall_exit)
1704         /* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1705          * via syscall_exit_rfi if the signal was received while the process
1706          * was running.
1707          */
1708 
1709         /* save return value now */
1710         mfctl     %cr30, %r1
1711         STREG     %r28,TASK_PT_GR28(%r1)
1712 
1713         /* Seems to me that dp could be wrong here, if the syscall involved
1714          * calling a module, and nothing got round to restoring dp on return.
1715          */
1716         loadgp
1717 
1718 syscall_check_resched:
1719 
1720         /* check for reschedule */
1721         mfctl   %cr30,%r19
1722         LDREG   TASK_TI_FLAGS(%r19),%r19        /* long */
1723         bb,<,n  %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1724 
1725         .import do_signal,code
1726 syscall_check_sig:
1727         mfctl   %cr30,%r19
1728         LDREG   TASK_TI_FLAGS(%r19),%r19
1729         ldi     (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1730         and,COND(<>)    %r19, %r26, %r0
1731         b,n     syscall_restore /* skip past if we've nothing to do */
1732 
1733 syscall_do_signal:
1734         /* Save callee-save registers (for sigcontext).
1735          * FIXME: After this point the process structure should be
1736          * consistent with all the relevant state of the process
1737          * before the syscall.  We need to verify this.
1738          */
1739         mfctl   %cr30,%r1
1740         ldo     TASK_REGS(%r1), %r26            /* struct pt_regs *regs */
1741         reg_save %r26
1742 
1743 #ifdef CONFIG_64BIT
1744         ldo     -16(%r30),%r29                  /* Reference param save area */
1745 #endif
1746 
1747         BL      do_notify_resume,%r2
1748         ldi     1, %r25                         /* long in_syscall = 1 */
1749 
1750         mfctl   %cr30,%r1
1751         ldo     TASK_REGS(%r1), %r20            /* reload pt_regs */
1752         reg_restore %r20
1753 
1754         b,n     syscall_check_sig
1755 
1756 syscall_restore:
1757         mfctl   %cr30,%r1
1758 
1759         /* Are we being ptraced? */
1760         LDREG   TASK_TI_FLAGS(%r1),%r19
1761         ldi     _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1762         and,COND(=)     %r19,%r2,%r0
1763         b,n     syscall_restore_rfi
1764 
1765         ldo     TASK_PT_FR31(%r1),%r19             /* reload fpregs */
1766         rest_fp %r19
1767 
1768         LDREG   TASK_PT_SAR(%r1),%r19              /* restore SAR */
1769         mtsar   %r19
1770 
1771         LDREG   TASK_PT_GR2(%r1),%r2               /* restore user rp */
1772         LDREG   TASK_PT_GR19(%r1),%r19
1773         LDREG   TASK_PT_GR20(%r1),%r20
1774         LDREG   TASK_PT_GR21(%r1),%r21
1775         LDREG   TASK_PT_GR22(%r1),%r22
1776         LDREG   TASK_PT_GR23(%r1),%r23
1777         LDREG   TASK_PT_GR24(%r1),%r24
1778         LDREG   TASK_PT_GR25(%r1),%r25
1779         LDREG   TASK_PT_GR26(%r1),%r26
1780         LDREG   TASK_PT_GR27(%r1),%r27     /* restore user dp */
1781         LDREG   TASK_PT_GR28(%r1),%r28     /* syscall return value */
1782         LDREG   TASK_PT_GR29(%r1),%r29
1783         LDREG   TASK_PT_GR31(%r1),%r31     /* restore syscall rp */
1784 
1785         /* NOTE: We use rsm/ssm pair to make this operation atomic */
1786         LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1787         rsm     PSW_SM_I, %r0
1788         copy    %r1,%r30                           /* Restore user sp */
1789         mfsp    %sr3,%r1                           /* Get user space id */
1790         mtsp    %r1,%sr7                           /* Restore sr7 */
1791         ssm     PSW_SM_I, %r0
1792 
1793         /* Set sr2 to zero for userspace syscalls to work. */
1794         mtsp    %r0,%sr2 
1795         mtsp    %r1,%sr4                           /* Restore sr4 */
1796         mtsp    %r1,%sr5                           /* Restore sr5 */
1797         mtsp    %r1,%sr6                           /* Restore sr6 */
1798 
1799         depi    PRIV_USER,31,2,%r31     /* ensure return to user mode. */
1800 
1801 #ifdef CONFIG_64BIT
1802         /* decide whether to reset the wide mode bit
1803          *
1804          * For a syscall, the W bit is stored in the lowest bit
1805          * of sp.  Extract it and reset W if it is zero */
1806         extrd,u,*<>     %r30,63,1,%r1
1807         rsm     PSW_SM_W, %r0
1808         /* now reset the lowest bit of sp if it was set */
1809         xor     %r30,%r1,%r30
1810 #endif
1811         be,n    0(%sr3,%r31)                       /* return to user space */
1812 
1813         /* We have to return via an RFI, so that PSW T and R bits can be set
1814          * appropriately.
1815          * This sets up pt_regs so we can return via intr_restore, which is not
1816          * the most efficient way of doing things, but it works.
1817          */
1818 syscall_restore_rfi:
1819         ldo     -1(%r0),%r2                        /* Set recovery cntr to -1 */
1820         mtctl   %r2,%cr0                           /*   for immediate trap */
1821         LDREG   TASK_PT_PSW(%r1),%r2               /* Get old PSW */
1822         ldi     0x0b,%r20                          /* Create new PSW */
1823         depi    -1,13,1,%r20                       /* C, Q, D, and I bits */
1824 
1825         /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1826          * set in thread_info.h and converted to PA bitmap
1827          * numbers in asm-offsets.c */
1828 
1829         /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1830         extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1831         depi    -1,27,1,%r20                       /* R bit */
1832 
1833         /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1834         extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1835         depi    -1,7,1,%r20                        /* T bit */
1836 
1837         STREG   %r20,TASK_PT_PSW(%r1)
1838 
1839         /* Always store space registers, since sr3 can be changed (e.g. fork) */
1840 
1841         mfsp    %sr3,%r25
1842         STREG   %r25,TASK_PT_SR3(%r1)
1843         STREG   %r25,TASK_PT_SR4(%r1)
1844         STREG   %r25,TASK_PT_SR5(%r1)
1845         STREG   %r25,TASK_PT_SR6(%r1)
1846         STREG   %r25,TASK_PT_SR7(%r1)
1847         STREG   %r25,TASK_PT_IASQ0(%r1)
1848         STREG   %r25,TASK_PT_IASQ1(%r1)
1849 
1850         /* XXX W bit??? */
1851         /* Now if old D bit is clear, it means we didn't save all registers
1852          * on syscall entry, so do that now.  This only happens on TRACEME
1853          * calls, or if someone attached to us while we were on a syscall.
1854          * We could make this more efficient by not saving r3-r18, but
1855          * then we wouldn't be able to use the common intr_restore path.
1856          * It is only for traced processes anyway, so performance is not
1857          * an issue.
1858          */
1859         bb,<    %r2,30,pt_regs_ok                  /* Branch if D set */
1860         ldo     TASK_REGS(%r1),%r25
1861         reg_save %r25                              /* Save r3 to r18 */
1862 
1863         /* Save the current sr */
1864         mfsp    %sr0,%r2
1865         STREG   %r2,TASK_PT_SR0(%r1)
1866 
1867         /* Save the scratch sr */
1868         mfsp    %sr1,%r2
1869         STREG   %r2,TASK_PT_SR1(%r1)
1870 
1871         /* sr2 should be set to zero for userspace syscalls */
1872         STREG   %r0,TASK_PT_SR2(%r1)
1873 
1874         LDREG   TASK_PT_GR31(%r1),%r2
1875         depi    PRIV_USER,31,2,%r2      /* ensure return to user mode. */
1876         STREG   %r2,TASK_PT_IAOQ0(%r1)
1877         ldo     4(%r2),%r2
1878         STREG   %r2,TASK_PT_IAOQ1(%r1)
1879         b       intr_restore
1880         copy    %r25,%r16
1881 
1882 pt_regs_ok:
1883         LDREG   TASK_PT_IAOQ0(%r1),%r2
1884         depi    PRIV_USER,31,2,%r2      /* ensure return to user mode. */
1885         STREG   %r2,TASK_PT_IAOQ0(%r1)
1886         LDREG   TASK_PT_IAOQ1(%r1),%r2
1887         depi    PRIV_USER,31,2,%r2
1888         STREG   %r2,TASK_PT_IAOQ1(%r1)
1889         b       intr_restore
1890         copy    %r25,%r16
1891 
1892 syscall_do_resched:
1893         load32  syscall_check_resched,%r2 /* if resched, we start over again */
1894         load32  schedule,%r19
1895         bv      %r0(%r19)               /* jumps to schedule() */
1896 #ifdef CONFIG_64BIT
1897         ldo     -16(%r30),%r29          /* Reference param save area */
1898 #else
1899         nop
1900 #endif
1901 END(syscall_exit)
1902 
1903 
1904 #ifdef CONFIG_FUNCTION_TRACER
1905 
1906         .import ftrace_function_trampoline,code
1907         .align L1_CACHE_BYTES
1908 ENTRY_CFI(mcount, caller)
1909 _mcount:
1910         .export _mcount,data
1911         /*
1912          * The 64bit mcount() function pointer needs 4 dwords, of which the
1913          * first two are free.  We optimize it here and put 2 instructions for
1914          * calling mcount(), and 2 instructions for ftrace_stub().  That way we
1915          * have all on one L1 cacheline.
1916          */
1917         ldi     0, %arg3
1918         b       ftrace_function_trampoline
1919         copy    %r3, %arg2      /* caller original %sp */
1920 ftrace_stub:
1921         .globl ftrace_stub
1922         .type  ftrace_stub, @function
1923 #ifdef CONFIG_64BIT
1924         bve     (%rp)
1925 #else
1926         bv      %r0(%rp)
1927 #endif
1928         nop
1929 #ifdef CONFIG_64BIT
1930         .dword mcount
1931         .dword 0 /* code in head.S puts value of global gp here */
1932 #endif
1933 ENDPROC_CFI(mcount)
1934 
1935 #ifdef CONFIG_DYNAMIC_FTRACE
1936 
1937 #ifdef CONFIG_64BIT
1938 #define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1939 #else
1940 #define FTRACE_FRAME_SIZE FRAME_SIZE
1941 #endif
1942 ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1943 ftrace_caller:
1944         .global ftrace_caller
1945 
1946         STREG   %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1947         ldo     -FTRACE_FRAME_SIZE(%sp), %r3
1948         STREG   %rp, -RP_OFFSET(%r3)
1949 
1950         /* Offset 0 is already allocated for %r1 */
1951         STREG   %r23, 2*REG_SZ(%r3)
1952         STREG   %r24, 3*REG_SZ(%r3)
1953         STREG   %r25, 4*REG_SZ(%r3)
1954         STREG   %r26, 5*REG_SZ(%r3)
1955         STREG   %r28, 6*REG_SZ(%r3)
1956         STREG   %r29, 7*REG_SZ(%r3)
1957 #ifdef CONFIG_64BIT
1958         STREG   %r19, 8*REG_SZ(%r3)
1959         STREG   %r20, 9*REG_SZ(%r3)
1960         STREG   %r21, 10*REG_SZ(%r3)
1961         STREG   %r22, 11*REG_SZ(%r3)
1962         STREG   %r27, 12*REG_SZ(%r3)
1963         STREG   %r31, 13*REG_SZ(%r3)
1964         loadgp
1965         ldo     -16(%sp),%r29
1966 #endif
1967         LDREG   0(%r3), %r25
1968         copy    %rp, %r26
1969         ldo     -8(%r25), %r25
1970         ldi     0, %r23         /* no pt_regs */
1971         b,l     ftrace_function_trampoline, %rp
1972         copy    %r3, %r24
1973 
1974         LDREG   -RP_OFFSET(%r3), %rp
1975         LDREG   2*REG_SZ(%r3), %r23
1976         LDREG   3*REG_SZ(%r3), %r24
1977         LDREG   4*REG_SZ(%r3), %r25
1978         LDREG   5*REG_SZ(%r3), %r26
1979         LDREG   6*REG_SZ(%r3), %r28
1980         LDREG   7*REG_SZ(%r3), %r29
1981 #ifdef CONFIG_64BIT
1982         LDREG   8*REG_SZ(%r3), %r19
1983         LDREG   9*REG_SZ(%r3), %r20
1984         LDREG   10*REG_SZ(%r3), %r21
1985         LDREG   11*REG_SZ(%r3), %r22
1986         LDREG   12*REG_SZ(%r3), %r27
1987         LDREG   13*REG_SZ(%r3), %r31
1988 #endif
1989         LDREG   1*REG_SZ(%r3), %r3
1990 
1991         LDREGM  -FTRACE_FRAME_SIZE(%sp), %r1
1992         /* Adjust return point to jump back to beginning of traced function */
1993         ldo     -4(%r1), %r1
1994         bv,n    (%r1)
1995 
1996 ENDPROC_CFI(ftrace_caller)
1997 
1998 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
1999 ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2000         CALLS,SAVE_RP,SAVE_SP)
2001 ftrace_regs_caller:
2002         .global ftrace_regs_caller
2003 
2004         ldo     -FTRACE_FRAME_SIZE(%sp), %r1
2005         STREG   %rp, -RP_OFFSET(%r1)
2006 
2007         copy    %sp, %r1
2008         ldo     PT_SZ_ALGN(%sp), %sp
2009 
2010         STREG   %rp, PT_GR2(%r1)
2011         STREG   %r3, PT_GR3(%r1)
2012         STREG   %r4, PT_GR4(%r1)
2013         STREG   %r5, PT_GR5(%r1)
2014         STREG   %r6, PT_GR6(%r1)
2015         STREG   %r7, PT_GR7(%r1)
2016         STREG   %r8, PT_GR8(%r1)
2017         STREG   %r9, PT_GR9(%r1)
2018         STREG   %r10, PT_GR10(%r1)
2019         STREG   %r11, PT_GR11(%r1)
2020         STREG   %r12, PT_GR12(%r1)
2021         STREG   %r13, PT_GR13(%r1)
2022         STREG   %r14, PT_GR14(%r1)
2023         STREG   %r15, PT_GR15(%r1)
2024         STREG   %r16, PT_GR16(%r1)
2025         STREG   %r17, PT_GR17(%r1)
2026         STREG   %r18, PT_GR18(%r1)
2027         STREG   %r19, PT_GR19(%r1)
2028         STREG   %r20, PT_GR20(%r1)
2029         STREG   %r21, PT_GR21(%r1)
2030         STREG   %r22, PT_GR22(%r1)
2031         STREG   %r23, PT_GR23(%r1)
2032         STREG   %r24, PT_GR24(%r1)
2033         STREG   %r25, PT_GR25(%r1)
2034         STREG   %r26, PT_GR26(%r1)
2035         STREG   %r27, PT_GR27(%r1)
2036         STREG   %r28, PT_GR28(%r1)
2037         STREG   %r29, PT_GR29(%r1)
2038         STREG   %r30, PT_GR30(%r1)
2039         STREG   %r31, PT_GR31(%r1)
2040         mfctl   %cr11, %r26
2041         STREG   %r26, PT_SAR(%r1)
2042 
2043         copy    %rp, %r26
2044         LDREG   -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2045         ldo     -8(%r25), %r25
2046         ldo     -FTRACE_FRAME_SIZE(%r1), %arg2
2047         b,l     ftrace_function_trampoline, %rp
2048         copy    %r1, %arg3 /* struct pt_regs */
2049 
2050         ldo     -PT_SZ_ALGN(%sp), %r1
2051 
2052         LDREG   PT_SAR(%r1), %rp
2053         mtctl   %rp, %cr11
2054 
2055         LDREG   PT_GR2(%r1), %rp
2056         LDREG   PT_GR3(%r1), %r3
2057         LDREG   PT_GR4(%r1), %r4
2058         LDREG   PT_GR5(%r1), %r5
2059         LDREG   PT_GR6(%r1), %r6
2060         LDREG   PT_GR7(%r1), %r7
2061         LDREG   PT_GR8(%r1), %r8
2062         LDREG   PT_GR9(%r1), %r9
2063         LDREG   PT_GR10(%r1),%r10
2064         LDREG   PT_GR11(%r1),%r11
2065         LDREG   PT_GR12(%r1),%r12
2066         LDREG   PT_GR13(%r1),%r13
2067         LDREG   PT_GR14(%r1),%r14
2068         LDREG   PT_GR15(%r1),%r15
2069         LDREG   PT_GR16(%r1),%r16
2070         LDREG   PT_GR17(%r1),%r17
2071         LDREG   PT_GR18(%r1),%r18
2072         LDREG   PT_GR19(%r1),%r19
2073         LDREG   PT_GR20(%r1),%r20
2074         LDREG   PT_GR21(%r1),%r21
2075         LDREG   PT_GR22(%r1),%r22
2076         LDREG   PT_GR23(%r1),%r23
2077         LDREG   PT_GR24(%r1),%r24
2078         LDREG   PT_GR25(%r1),%r25
2079         LDREG   PT_GR26(%r1),%r26
2080         LDREG   PT_GR27(%r1),%r27
2081         LDREG   PT_GR28(%r1),%r28
2082         LDREG   PT_GR29(%r1),%r29
2083         LDREG   PT_GR30(%r1),%r30
2084         LDREG   PT_GR31(%r1),%r31
2085 
2086         ldo     -PT_SZ_ALGN(%sp), %sp
2087         LDREGM  -FTRACE_FRAME_SIZE(%sp), %r1
2088         /* Adjust return point to jump back to beginning of traced function */
2089         ldo     -4(%r1), %r1
2090         bv,n    (%r1)
2091 
2092 ENDPROC_CFI(ftrace_regs_caller)
2093 
2094 #endif
2095 #endif
2096 
2097 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2098         .align 8
2099 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2100         .export parisc_return_to_handler,data
2101 parisc_return_to_handler:
2102         copy %r3,%r1
2103         STREG %r0,-RP_OFFSET(%sp)       /* store 0 as %rp */
2104         copy %sp,%r3
2105         STREGM %r1,FRAME_SIZE(%sp)
2106         STREG %ret0,8(%r3)
2107         STREG %ret1,16(%r3)
2108 
2109 #ifdef CONFIG_64BIT
2110         loadgp
2111 #endif
2112 
2113         /* call ftrace_return_to_handler(0) */
2114         .import ftrace_return_to_handler,code
2115         load32 ftrace_return_to_handler,%ret0
2116         load32 .Lftrace_ret,%r2
2117 #ifdef CONFIG_64BIT
2118         ldo -16(%sp),%ret1              /* Reference param save area */
2119         bve     (%ret0)
2120 #else
2121         bv      %r0(%ret0)
2122 #endif
2123         ldi 0,%r26
2124 .Lftrace_ret:
2125         copy %ret0,%rp
2126 
2127         /* restore original return values */
2128         LDREG 8(%r3),%ret0
2129         LDREG 16(%r3),%ret1
2130 
2131         /* return from function */
2132 #ifdef CONFIG_64BIT
2133         bve     (%rp)
2134 #else
2135         bv      %r0(%rp)
2136 #endif
2137         LDREGM -FRAME_SIZE(%sp),%r3
2138 ENDPROC_CFI(return_to_handler)
2139 
2140 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2141 
2142 #endif  /* CONFIG_FUNCTION_TRACER */
2143 
2144 #ifdef CONFIG_IRQSTACKS
2145 /* void call_on_stack(unsigned long param1, void *func,
2146                       unsigned long new_stack) */
2147 ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2148 ENTRY(_call_on_stack)
2149         copy    %sp, %r1
2150 
2151         /* Regarding the HPPA calling conventions for function pointers,
2152            we assume the PIC register is not changed across call.  For
2153            CONFIG_64BIT, the argument pointer is left to point at the
2154            argument region allocated for the call to call_on_stack. */
2155 
2156         /* Switch to new stack.  We allocate two frames.  */
2157         ldo     2*FRAME_SIZE(%arg2), %sp
2158 # ifdef CONFIG_64BIT
2159         /* Save previous stack pointer and return pointer in frame marker */
2160         STREG   %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2161         /* Calls always use function descriptor */
2162         LDREG   16(%arg1), %arg1
2163         bve,l   (%arg1), %rp
2164         STREG   %r1, -FRAME_SIZE-REG_SZ(%sp)
2165         LDREG   -FRAME_SIZE-RP_OFFSET(%sp), %rp
2166         bve     (%rp)
2167         LDREG   -FRAME_SIZE-REG_SZ(%sp), %sp
2168 # else
2169         /* Save previous stack pointer and return pointer in frame marker */
2170         STREG   %r1, -FRAME_SIZE-REG_SZ(%sp)
2171         STREG   %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2172         /* Calls use function descriptor if PLABEL bit is set */
2173         bb,>=,n %arg1, 30, 1f
2174         depwi   0,31,2, %arg1
2175         LDREG   0(%arg1), %arg1
2176 1:
2177         be,l    0(%sr4,%arg1), %sr0, %r31
2178         copy    %r31, %rp
2179         LDREG   -FRAME_SIZE-RP_OFFSET(%sp), %rp
2180         bv      (%rp)
2181         LDREG   -FRAME_SIZE-REG_SZ(%sp), %sp
2182 # endif /* CONFIG_64BIT */
2183 ENDPROC_CFI(call_on_stack)
2184 #endif /* CONFIG_IRQSTACKS */
2185 
2186 ENTRY_CFI(get_register)
2187         /*
2188          * get_register is used by the non access tlb miss handlers to
2189          * copy the value of the general register specified in r8 into
2190          * r1. This routine can't be used for shadowed registers, since
2191          * the rfir will restore the original value. So, for the shadowed
2192          * registers we put a -1 into r1 to indicate that the register
2193          * should not be used (the register being copied could also have
2194          * a -1 in it, but that is OK, it just means that we will have
2195          * to use the slow path instead).
2196          */
2197         blr     %r8,%r0
2198         nop
2199         bv      %r0(%r25)    /* r0 */
2200         copy    %r0,%r1
2201         bv      %r0(%r25)    /* r1 - shadowed */
2202         ldi     -1,%r1
2203         bv      %r0(%r25)    /* r2 */
2204         copy    %r2,%r1
2205         bv      %r0(%r25)    /* r3 */
2206         copy    %r3,%r1
2207         bv      %r0(%r25)    /* r4 */
2208         copy    %r4,%r1
2209         bv      %r0(%r25)    /* r5 */
2210         copy    %r5,%r1
2211         bv      %r0(%r25)    /* r6 */
2212         copy    %r6,%r1
2213         bv      %r0(%r25)    /* r7 */
2214         copy    %r7,%r1
2215         bv      %r0(%r25)    /* r8 - shadowed */
2216         ldi     -1,%r1
2217         bv      %r0(%r25)    /* r9 - shadowed */
2218         ldi     -1,%r1
2219         bv      %r0(%r25)    /* r10 */
2220         copy    %r10,%r1
2221         bv      %r0(%r25)    /* r11 */
2222         copy    %r11,%r1
2223         bv      %r0(%r25)    /* r12 */
2224         copy    %r12,%r1
2225         bv      %r0(%r25)    /* r13 */
2226         copy    %r13,%r1
2227         bv      %r0(%r25)    /* r14 */
2228         copy    %r14,%r1
2229         bv      %r0(%r25)    /* r15 */
2230         copy    %r15,%r1
2231         bv      %r0(%r25)    /* r16 - shadowed */
2232         ldi     -1,%r1
2233         bv      %r0(%r25)    /* r17 - shadowed */
2234         ldi     -1,%r1
2235         bv      %r0(%r25)    /* r18 */
2236         copy    %r18,%r1
2237         bv      %r0(%r25)    /* r19 */
2238         copy    %r19,%r1
2239         bv      %r0(%r25)    /* r20 */
2240         copy    %r20,%r1
2241         bv      %r0(%r25)    /* r21 */
2242         copy    %r21,%r1
2243         bv      %r0(%r25)    /* r22 */
2244         copy    %r22,%r1
2245         bv      %r0(%r25)    /* r23 */
2246         copy    %r23,%r1
2247         bv      %r0(%r25)    /* r24 - shadowed */
2248         ldi     -1,%r1
2249         bv      %r0(%r25)    /* r25 - shadowed */
2250         ldi     -1,%r1
2251         bv      %r0(%r25)    /* r26 */
2252         copy    %r26,%r1
2253         bv      %r0(%r25)    /* r27 */
2254         copy    %r27,%r1
2255         bv      %r0(%r25)    /* r28 */
2256         copy    %r28,%r1
2257         bv      %r0(%r25)    /* r29 */
2258         copy    %r29,%r1
2259         bv      %r0(%r25)    /* r30 */
2260         copy    %r30,%r1
2261         bv      %r0(%r25)    /* r31 */
2262         copy    %r31,%r1
2263 ENDPROC_CFI(get_register)
2264 
2265 
2266 ENTRY_CFI(set_register)
2267         /*
2268          * set_register is used by the non access tlb miss handlers to
2269          * copy the value of r1 into the general register specified in
2270          * r8.
2271          */
2272         blr     %r8,%r0
2273         nop
2274         bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2275         copy    %r1,%r0
2276         bv      %r0(%r25)    /* r1 */
2277         copy    %r1,%r1
2278         bv      %r0(%r25)    /* r2 */
2279         copy    %r1,%r2
2280         bv      %r0(%r25)    /* r3 */
2281         copy    %r1,%r3
2282         bv      %r0(%r25)    /* r4 */
2283         copy    %r1,%r4
2284         bv      %r0(%r25)    /* r5 */
2285         copy    %r1,%r5
2286         bv      %r0(%r25)    /* r6 */
2287         copy    %r1,%r6
2288         bv      %r0(%r25)    /* r7 */
2289         copy    %r1,%r7
2290         bv      %r0(%r25)    /* r8 */
2291         copy    %r1,%r8
2292         bv      %r0(%r25)    /* r9 */
2293         copy    %r1,%r9
2294         bv      %r0(%r25)    /* r10 */
2295         copy    %r1,%r10
2296         bv      %r0(%r25)    /* r11 */
2297         copy    %r1,%r11
2298         bv      %r0(%r25)    /* r12 */
2299         copy    %r1,%r12
2300         bv      %r0(%r25)    /* r13 */
2301         copy    %r1,%r13
2302         bv      %r0(%r25)    /* r14 */
2303         copy    %r1,%r14
2304         bv      %r0(%r25)    /* r15 */
2305         copy    %r1,%r15
2306         bv      %r0(%r25)    /* r16 */
2307         copy    %r1,%r16
2308         bv      %r0(%r25)    /* r17 */
2309         copy    %r1,%r17
2310         bv      %r0(%r25)    /* r18 */
2311         copy    %r1,%r18
2312         bv      %r0(%r25)    /* r19 */
2313         copy    %r1,%r19
2314         bv      %r0(%r25)    /* r20 */
2315         copy    %r1,%r20
2316         bv      %r0(%r25)    /* r21 */
2317         copy    %r1,%r21
2318         bv      %r0(%r25)    /* r22 */
2319         copy    %r1,%r22
2320         bv      %r0(%r25)    /* r23 */
2321         copy    %r1,%r23
2322         bv      %r0(%r25)    /* r24 */
2323         copy    %r1,%r24
2324         bv      %r0(%r25)    /* r25 */
2325         copy    %r1,%r25
2326         bv      %r0(%r25)    /* r26 */
2327         copy    %r1,%r26
2328         bv      %r0(%r25)    /* r27 */
2329         copy    %r1,%r27
2330         bv      %r0(%r25)    /* r28 */
2331         copy    %r1,%r28
2332         bv      %r0(%r25)    /* r29 */
2333         copy    %r1,%r29
2334         bv      %r0(%r25)    /* r30 */
2335         copy    %r1,%r30
2336         bv      %r0(%r25)    /* r31 */
2337         copy    %r1,%r31
2338 ENDPROC_CFI(set_register)
2339 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php