~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/switch.S

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-or-later */
  2 #include <linux/objtool.h>
  3 #include <asm/asm-offsets.h>
  4 #include <asm/code-patching-asm.h>
  5 #include <asm/mmu.h>
  6 #include <asm/ppc_asm.h>
  7 #include <asm/kup.h>
  8 #include <asm/thread_info.h>
  9 
 10 .section ".text","ax",@progbits
 11 
 12 #ifdef CONFIG_PPC_BOOK3S_64
 13 /*
 14  * Cancel all explict user streams as they will have no use after context
 15  * switch and will stop the HW from creating streams itself
 16  */
 17 #define STOP_STREAMS            \
 18         DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
 19 
 20 #define FLUSH_COUNT_CACHE       \
 21 1:      nop;                    \
 22         patch_site 1b, patch__call_flush_branch_caches1; \
 23 1:      nop;                    \
 24         patch_site 1b, patch__call_flush_branch_caches2; \
 25 1:      nop;                    \
 26         patch_site 1b, patch__call_flush_branch_caches3
 27 
 28 .macro nops number
 29         .rept \number
 30         nop
 31         .endr
 32 .endm
 33 
 34 .balign 32
 35 .global flush_branch_caches
 36 flush_branch_caches:
 37         /* Save LR into r9 */
 38         mflr    r9
 39 
 40         // Flush the link stack
 41         .rept 64
 42         ANNOTATE_INTRA_FUNCTION_CALL
 43         bl      .+4
 44         .endr
 45         b       1f
 46         nops    6
 47 
 48         .balign 32
 49         /* Restore LR */
 50 1:      mtlr    r9
 51 
 52         // If we're just flushing the link stack, return here
 53 3:      nop
 54         patch_site 3b patch__flush_link_stack_return
 55 
 56         li      r9,0x7fff
 57         mtctr   r9
 58 
 59         PPC_BCCTR_FLUSH
 60 
 61 2:      nop
 62         patch_site 2b patch__flush_count_cache_return
 63 
 64         nops    3
 65 
 66         .rept 278
 67         .balign 32
 68         PPC_BCCTR_FLUSH
 69         nops    7
 70         .endr
 71 
 72         blr
 73 
 74 #ifdef CONFIG_PPC_64S_HASH_MMU
 75 .balign 32
 76 /*
 77  * New stack pointer in r8, old stack pointer in r1, must not clobber r3
 78  */
 79 pin_stack_slb:
 80 BEGIN_FTR_SECTION
 81         clrrdi  r6,r8,28        /* get its ESID */
 82         clrrdi  r9,r1,28        /* get current sp ESID */
 83 FTR_SECTION_ELSE
 84         clrrdi  r6,r8,40        /* get its 1T ESID */
 85         clrrdi  r9,r1,40        /* get current sp 1T ESID */
 86 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
 87         clrldi. r0,r6,2         /* is new ESID c00000000? */
 88         cmpd    cr1,r6,r9       /* or is new ESID the same as current ESID? */
 89         cror    eq,4*cr1+eq,eq
 90         beq     2f              /* if yes, don't slbie it */
 91 
 92         /* Bolt in the new stack SLB entry */
 93         ld      r7,KSP_VSID(r4) /* Get new stack's VSID */
 94         oris    r0,r6,(SLB_ESID_V)@h
 95         ori     r0,r0,(SLB_NUM_BOLTED-1)@l
 96 BEGIN_FTR_SECTION
 97         li      r9,MMU_SEGSIZE_1T       /* insert B field */
 98         oris    r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
 99         rldimi  r7,r9,SLB_VSID_SSIZE_SHIFT,0
100 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
101 
102         /* Update the last bolted SLB.  No write barriers are needed
103          * here, provided we only update the current CPU's SLB shadow
104          * buffer.
105          */
106         ld      r9,PACA_SLBSHADOWPTR(r13)
107         li      r12,0
108         std     r12,SLBSHADOW_STACKESID(r9)     /* Clear ESID */
109         li      r12,SLBSHADOW_STACKVSID
110         STDX_BE r7,r12,r9                       /* Save VSID */
111         li      r12,SLBSHADOW_STACKESID
112         STDX_BE r0,r12,r9                       /* Save ESID */
113 
114         /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
115          * we have 1TB segments, the only CPUs known to have the errata
116          * only support less than 1TB of system memory and we'll never
117          * actually hit this code path.
118          */
119 
120         isync
121         slbie   r6
122 BEGIN_FTR_SECTION
123         slbie   r6              /* Workaround POWER5 < DD2.1 issue */
124 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
125         slbmte  r7,r0
126         isync
127 2:      blr
128         .size pin_stack_slb,.-pin_stack_slb
129 #endif /* CONFIG_PPC_64S_HASH_MMU */
130 
131 #else
132 #define STOP_STREAMS
133 #define FLUSH_COUNT_CACHE
134 #endif /* CONFIG_PPC_BOOK3S_64 */
135 
136 /*
137  * do_switch_32/64 have the same calling convention as _switch, i.e., r3,r4
138  * are prev and next thread_struct *, and returns prev task_struct * in r3.
139 
140  * This switches the stack, current, and does other task switch housekeeping.
141  */
142 .macro do_switch_32
143         tophys(r0,r4)
144         mtspr   SPRN_SPRG_THREAD,r0     /* Update current THREAD phys addr */
145         lwz     r1,KSP(r4)      /* Load new stack pointer */
146 
147         /* save the old current 'last' for return value */
148         mr      r3,r2
149         addi    r2,r4,-THREAD   /* Update current */
150 .endm
151 
152 .macro do_switch_64
153         ld      r8,KSP(r4)      /* Load new stack pointer */
154 
155         kuap_check_amr r9, r10
156 
157         FLUSH_COUNT_CACHE       /* Clobbers r9, ctr */
158 
159         STOP_STREAMS            /* Clobbers r6 */
160 
161         addi    r3,r3,-THREAD   /* old thread -> task_struct for return value */
162         addi    r6,r4,-THREAD   /* new thread -> task_struct */
163         std     r6,PACACURRENT(r13)     /* Set new task_struct to 'current' */
164 #if defined(CONFIG_STACKPROTECTOR)
165         ld      r6, TASK_CANARY(r6)
166         std     r6, PACA_CANARY(r13)
167 #endif
168         /* Set new PACAKSAVE */
169         clrrdi  r7,r8,THREAD_SHIFT      /* base of new stack */
170         addi    r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
171         std     r7,PACAKSAVE(r13)
172 
173 #ifdef CONFIG_PPC_64S_HASH_MMU
174 BEGIN_MMU_FTR_SECTION
175         bl      pin_stack_slb
176 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
177 #endif
178         /*
179          * PMU interrupts in radix may come in here. They will use r1, not
180          * PACAKSAVE, so this stack switch will not cause a problem. They
181          * will store to the process stack, which may then be migrated to
182          * another CPU. However the rq lock release on this CPU paired with
183          * the rq lock acquire on the new CPU before the stack becomes
184          * active on the new CPU, will order those stores.
185          */
186         mr      r1,r8           /* start using new stack pointer */
187 .endm
188 
189 /*
190  * This routine switches between two different tasks.  The process
191  * state of one is saved on its kernel stack.  Then the state
192  * of the other is restored from its kernel stack.  The memory
193  * management hardware is updated to the second process's state.
194  * Finally, we can return to the second process.
195  * On entry, r3 points to the THREAD for the current task, r4
196  * points to the THREAD for the new task.
197  *
198  * This routine is always called with interrupts disabled.
199  *
200  * Note: there are two ways to get to the "going out" portion
201  * of this code; either by coming in via the entry (_switch)
202  * or via "fork" which must set up an environment equivalent
203  * to the "_switch" path.  If you change this , you'll have to
204  * change the fork code also.
205  *
206  * The code which creates the new task context is in 'copy_thread'
207  * in arch/ppc/kernel/process.c
208  *
209  * Note: this uses SWITCH_FRAME_SIZE rather than USER_INT_FRAME_SIZE
210  * because we don't need to leave the redzone ABI gap at the top of
211  * the kernel stack.
212  */
213 _GLOBAL(_switch)
214         PPC_CREATE_STACK_FRAME(SWITCH_FRAME_SIZE)
215         PPC_STL         r1,KSP(r3)      /* Set old stack pointer */
216         SAVE_NVGPRS(r1)                 /* volatiles are caller-saved -- Cort */
217         PPC_STL         r0,_NIP(r1)     /* Return to switch caller */
218         mfcr            r0
219         stw             r0,_CCR(r1)
220 
221         /*
222          * On SMP kernels, care must be taken because a task may be
223          * scheduled off CPUx and on to CPUy. Memory ordering must be
224          * considered.
225          *
226          * Cacheable stores on CPUx will be visible when the task is
227          * scheduled on CPUy by virtue of the core scheduler barriers
228          * (see "Notes on Program-Order guarantees on SMP systems." in
229          * kernel/sched/core.c).
230          *
231          * Uncacheable stores in the case of involuntary preemption must
232          * be taken care of. The smp_mb__after_spinlock() in __schedule()
233          * is implemented as hwsync on powerpc, which orders MMIO too. So
234          * long as there is an hwsync in the context switch path, it will
235          * be executed on the source CPU after the task has performed
236          * all MMIO ops on that CPU, and on the destination CPU before the
237          * task performs any MMIO ops there.
238          */
239 
240         /*
241          * The kernel context switch path must contain a spin_lock,
242          * which contains larx/stcx, which will clear any reservation
243          * of the task being switched.
244          */
245 
246 #ifdef CONFIG_PPC32
247         do_switch_32
248 #else
249         do_switch_64
250 #endif
251 
252         lwz     r0,_CCR(r1)
253         mtcrf   0xFF,r0
254         REST_NVGPRS(r1)         /* volatiles are destroyed -- Cort */
255         PPC_LL  r0,_NIP(r1)     /* Return to _switch caller in new task */
256         mtlr    r0
257         addi    r1,r1,SWITCH_FRAME_SIZE
258         blr

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php