~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Copyright (C) 2012 Regents of the University of California
  4  * Copyright (C) 2017 SiFive
  5  */
  6 
  7 #include <linux/init.h>
  8 #include <linux/linkage.h>
  9 
 10 #include <asm/asm.h>
 11 #include <asm/csr.h>
 12 #include <asm/scs.h>
 13 #include <asm/unistd.h>
 14 #include <asm/page.h>
 15 #include <asm/thread_info.h>
 16 #include <asm/asm-offsets.h>
 17 #include <asm/errata_list.h>
 18 #include <linux/sizes.h>
 19 
 20         .section .irqentry.text, "ax"
 21 
 22 .macro new_vmalloc_check
 23         REG_S   a0, TASK_TI_A0(tp)
 24         csrr    a0, CSR_CAUSE
 25         /* Exclude IRQs */
 26         blt     a0, zero, _new_vmalloc_restore_context_a0
 27 
 28         REG_S   a1, TASK_TI_A1(tp)
 29         /* Only check new_vmalloc if we are in page/protection fault */
 30         li      a1, EXC_LOAD_PAGE_FAULT
 31         beq     a0, a1, _new_vmalloc_kernel_address
 32         li      a1, EXC_STORE_PAGE_FAULT
 33         beq     a0, a1, _new_vmalloc_kernel_address
 34         li      a1, EXC_INST_PAGE_FAULT
 35         bne     a0, a1, _new_vmalloc_restore_context_a1
 36 
 37 _new_vmalloc_kernel_address:
 38         /* Is it a kernel address? */
 39         csrr    a0, CSR_TVAL
 40         bge     a0, zero, _new_vmalloc_restore_context_a1
 41 
 42         /* Check if a new vmalloc mapping appeared that could explain the trap */
 43         REG_S   a2, TASK_TI_A2(tp)
 44         /*
 45          * Computes:
 46          * a0 = &new_vmalloc[BIT_WORD(cpu)]
 47          * a1 = BIT_MASK(cpu)
 48          */
 49         REG_L   a2, TASK_TI_CPU(tp)
 50         /*
 51          * Compute the new_vmalloc element position:
 52          * (cpu / 64) * 8 = (cpu >> 6) << 3
 53          */
 54         srli    a1, a2, 6
 55         slli    a1, a1, 3
 56         la      a0, new_vmalloc
 57         add     a0, a0, a1
 58         /*
 59          * Compute the bit position in the new_vmalloc element:
 60          * bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
 61          *         = cpu - ((cpu >> 6) << 3) << 3
 62          */
 63         slli    a1, a1, 3
 64         sub     a1, a2, a1
 65         /* Compute the "get mask": 1 << bit_pos */
 66         li      a2, 1
 67         sll     a1, a2, a1
 68 
 69         /* Check the value of new_vmalloc for this cpu */
 70         REG_L   a2, 0(a0)
 71         and     a2, a2, a1
 72         beq     a2, zero, _new_vmalloc_restore_context
 73 
 74         /* Atomically reset the current cpu bit in new_vmalloc */
 75         amoxor.d        a0, a1, (a0)
 76 
 77         /* Only emit a sfence.vma if the uarch caches invalid entries */
 78         ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
 79 
 80         REG_L   a0, TASK_TI_A0(tp)
 81         REG_L   a1, TASK_TI_A1(tp)
 82         REG_L   a2, TASK_TI_A2(tp)
 83         csrw    CSR_SCRATCH, x0
 84         sret
 85 
 86 _new_vmalloc_restore_context:
 87         REG_L   a2, TASK_TI_A2(tp)
 88 _new_vmalloc_restore_context_a1:
 89         REG_L   a1, TASK_TI_A1(tp)
 90 _new_vmalloc_restore_context_a0:
 91         REG_L   a0, TASK_TI_A0(tp)
 92 .endm
 93 
 94 
 95 SYM_CODE_START(handle_exception)
 96         /*
 97          * If coming from userspace, preserve the user thread pointer and load
 98          * the kernel thread pointer.  If we came from the kernel, the scratch
 99          * register will contain 0, and we should continue on the current TP.
100          */
101         csrrw tp, CSR_SCRATCH, tp
102         bnez tp, .Lsave_context
103 
104 .Lrestore_kernel_tpsp:
105         csrr tp, CSR_SCRATCH
106 
107 #ifdef CONFIG_64BIT
108         /*
109          * The RISC-V kernel does not eagerly emit a sfence.vma after each
110          * new vmalloc mapping, which may result in exceptions:
111          * - if the uarch caches invalid entries, the new mapping would not be
112          *   observed by the page table walker and an invalidation is needed.
113          * - if the uarch does not cache invalid entries, a reordered access
114          *   could "miss" the new mapping and traps: in that case, we only need
115          *   to retry the access, no sfence.vma is required.
116          */
117         new_vmalloc_check
118 #endif
119 
120         REG_S sp, TASK_TI_KERNEL_SP(tp)
121 
122 #ifdef CONFIG_VMAP_STACK
123         addi sp, sp, -(PT_SIZE_ON_STACK)
124         srli sp, sp, THREAD_SHIFT
125         andi sp, sp, 0x1
126         bnez sp, handle_kernel_stack_overflow
127         REG_L sp, TASK_TI_KERNEL_SP(tp)
128 #endif
129 
130 .Lsave_context:
131         REG_S sp, TASK_TI_USER_SP(tp)
132         REG_L sp, TASK_TI_KERNEL_SP(tp)
133         addi sp, sp, -(PT_SIZE_ON_STACK)
134         REG_S x1,  PT_RA(sp)
135         REG_S x3,  PT_GP(sp)
136         REG_S x5,  PT_T0(sp)
137         save_from_x6_to_x31
138 
139         /*
140          * Disable user-mode memory access as it should only be set in the
141          * actual user copy routines.
142          *
143          * Disable the FPU/Vector to detect illegal usage of floating point
144          * or vector in kernel space.
145          */
146         li t0, SR_SUM | SR_FS_VS
147 
148         REG_L s0, TASK_TI_USER_SP(tp)
149         csrrc s1, CSR_STATUS, t0
150         csrr s2, CSR_EPC
151         csrr s3, CSR_TVAL
152         csrr s4, CSR_CAUSE
153         csrr s5, CSR_SCRATCH
154         REG_S s0, PT_SP(sp)
155         REG_S s1, PT_STATUS(sp)
156         REG_S s2, PT_EPC(sp)
157         REG_S s3, PT_BADADDR(sp)
158         REG_S s4, PT_CAUSE(sp)
159         REG_S s5, PT_TP(sp)
160 
161         /*
162          * Set the scratch register to 0, so that if a recursive exception
163          * occurs, the exception vector knows it came from the kernel
164          */
165         csrw CSR_SCRATCH, x0
166 
167         /* Load the global pointer */
168         load_global_pointer
169 
170         /* Load the kernel shadow call stack pointer if coming from userspace */
171         scs_load_current_if_task_changed s5
172 
173 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
174         move a0, sp
175         call riscv_v_context_nesting_start
176 #endif
177         move a0, sp /* pt_regs */
178 
179         /*
180          * MSB of cause differentiates between
181          * interrupts and exceptions
182          */
183         bge s4, zero, 1f
184 
185         /* Handle interrupts */
186         call do_irq
187         j ret_from_exception
188 1:
189         /* Handle other exceptions */
190         slli t0, s4, RISCV_LGPTR
191         la t1, excp_vect_table
192         la t2, excp_vect_table_end
193         add t0, t1, t0
194         /* Check if exception code lies within bounds */
195         bgeu t0, t2, 3f
196         REG_L t1, 0(t0)
197 2:      jalr t1
198         j ret_from_exception
199 3:
200 
201         la t1, do_trap_unknown
202         j 2b
203 SYM_CODE_END(handle_exception)
204 ASM_NOKPROBE(handle_exception)
205 
206 /*
207  * The ret_from_exception must be called with interrupt disabled. Here is the
208  * caller list:
209  *  - handle_exception
210  *  - ret_from_fork
211  */
212 SYM_CODE_START_NOALIGN(ret_from_exception)
213         REG_L s0, PT_STATUS(sp)
214 #ifdef CONFIG_RISCV_M_MODE
215         /* the MPP value is too large to be used as an immediate arg for addi */
216         li t0, SR_MPP
217         and s0, s0, t0
218 #else
219         andi s0, s0, SR_SPP
220 #endif
221         bnez s0, 1f
222 
223 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
224         call    stackleak_erase_on_task_stack
225 #endif
226 
227         /* Save unwound kernel stack pointer in thread_info */
228         addi s0, sp, PT_SIZE_ON_STACK
229         REG_S s0, TASK_TI_KERNEL_SP(tp)
230 
231         /* Save the kernel shadow call stack pointer */
232         scs_save_current
233 
234         /*
235          * Save TP into the scratch register , so we can find the kernel data
236          * structures again.
237          */
238         csrw CSR_SCRATCH, tp
239 1:
240 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
241         move a0, sp
242         call riscv_v_context_nesting_end
243 #endif
244         REG_L a0, PT_STATUS(sp)
245         /*
246          * The current load reservation is effectively part of the processor's
247          * state, in the sense that load reservations cannot be shared between
248          * different hart contexts.  We can't actually save and restore a load
249          * reservation, so instead here we clear any existing reservation --
250          * it's always legal for implementations to clear load reservations at
251          * any point (as long as the forward progress guarantee is kept, but
252          * we'll ignore that here).
253          *
254          * Dangling load reservations can be the result of taking a trap in the
255          * middle of an LR/SC sequence, but can also be the result of a taken
256          * forward branch around an SC -- which is how we implement CAS.  As a
257          * result we need to clear reservations between the last CAS and the
258          * jump back to the new context.  While it is unlikely the store
259          * completes, implementations are allowed to expand reservations to be
260          * arbitrarily large.
261          */
262         REG_L  a2, PT_EPC(sp)
263         REG_SC x0, a2, PT_EPC(sp)
264 
265         csrw CSR_STATUS, a0
266         csrw CSR_EPC, a2
267 
268         REG_L x1,  PT_RA(sp)
269         REG_L x3,  PT_GP(sp)
270         REG_L x4,  PT_TP(sp)
271         REG_L x5,  PT_T0(sp)
272         restore_from_x6_to_x31
273 
274         REG_L x2,  PT_SP(sp)
275 
276 #ifdef CONFIG_RISCV_M_MODE
277         mret
278 #else
279         sret
280 #endif
281 SYM_CODE_END(ret_from_exception)
282 ASM_NOKPROBE(ret_from_exception)
283 
284 #ifdef CONFIG_VMAP_STACK
285 SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
286         /* we reach here from kernel context, sscratch must be 0 */
287         csrrw x31, CSR_SCRATCH, x31
288         asm_per_cpu sp, overflow_stack, x31
289         li x31, OVERFLOW_STACK_SIZE
290         add sp, sp, x31
291         /* zero out x31 again and restore x31 */
292         xor x31, x31, x31
293         csrrw x31, CSR_SCRATCH, x31
294 
295         addi sp, sp, -(PT_SIZE_ON_STACK)
296 
297         //save context to overflow stack
298         REG_S x1,  PT_RA(sp)
299         REG_S x3,  PT_GP(sp)
300         REG_S x5,  PT_T0(sp)
301         save_from_x6_to_x31
302 
303         REG_L s0, TASK_TI_KERNEL_SP(tp)
304         csrr s1, CSR_STATUS
305         csrr s2, CSR_EPC
306         csrr s3, CSR_TVAL
307         csrr s4, CSR_CAUSE
308         csrr s5, CSR_SCRATCH
309         REG_S s0, PT_SP(sp)
310         REG_S s1, PT_STATUS(sp)
311         REG_S s2, PT_EPC(sp)
312         REG_S s3, PT_BADADDR(sp)
313         REG_S s4, PT_CAUSE(sp)
314         REG_S s5, PT_TP(sp)
315         move a0, sp
316         tail handle_bad_stack
317 SYM_CODE_END(handle_kernel_stack_overflow)
318 ASM_NOKPROBE(handle_kernel_stack_overflow)
319 #endif
320 
321 SYM_CODE_START(ret_from_fork)
322         call schedule_tail
323         beqz s0, 1f     /* not from kernel thread */
324         /* Call fn(arg) */
325         move a0, s1
326         jalr s0
327 1:
328         move a0, sp /* pt_regs */
329         call syscall_exit_to_user_mode
330         j ret_from_exception
331 SYM_CODE_END(ret_from_fork)
332 
333 #ifdef CONFIG_IRQ_STACKS
334 /*
335  * void call_on_irq_stack(struct pt_regs *regs,
336  *                        void (*func)(struct pt_regs *));
337  *
338  * Calls func(regs) using the per-CPU IRQ stack.
339  */
340 SYM_FUNC_START(call_on_irq_stack)
341         /* Create a frame record to save ra and s0 (fp) */
342         addi    sp, sp, -STACKFRAME_SIZE_ON_STACK
343         REG_S   ra, STACKFRAME_RA(sp)
344         REG_S   s0, STACKFRAME_FP(sp)
345         addi    s0, sp, STACKFRAME_SIZE_ON_STACK
346 
347         /* Switch to the per-CPU shadow call stack */
348         scs_save_current
349         scs_load_irq_stack t0
350 
351         /* Switch to the per-CPU IRQ stack and call the handler */
352         load_per_cpu t0, irq_stack_ptr, t1
353         li      t1, IRQ_STACK_SIZE
354         add     sp, t0, t1
355         jalr    a1
356 
357         /* Switch back to the thread shadow call stack */
358         scs_load_current
359 
360         /* Switch back to the thread stack and restore ra and s0 */
361         addi    sp, s0, -STACKFRAME_SIZE_ON_STACK
362         REG_L   ra, STACKFRAME_RA(sp)
363         REG_L   s0, STACKFRAME_FP(sp)
364         addi    sp, sp, STACKFRAME_SIZE_ON_STACK
365 
366         ret
367 SYM_FUNC_END(call_on_irq_stack)
368 #endif /* CONFIG_IRQ_STACKS */
369 
370 /*
371  * Integer register context switch
372  * The callee-saved registers must be saved and restored.
373  *
374  *   a0: previous task_struct (must be preserved across the switch)
375  *   a1: next task_struct
376  *
377  * The value of a0 and a1 must be preserved by this function, as that's how
378  * arguments are passed to schedule_tail.
379  */
380 SYM_FUNC_START(__switch_to)
381         /* Save context into prev->thread */
382         li    a4,  TASK_THREAD_RA
383         add   a3, a0, a4
384         add   a4, a1, a4
385         REG_S ra,  TASK_THREAD_RA_RA(a3)
386         REG_S sp,  TASK_THREAD_SP_RA(a3)
387         REG_S s0,  TASK_THREAD_S0_RA(a3)
388         REG_S s1,  TASK_THREAD_S1_RA(a3)
389         REG_S s2,  TASK_THREAD_S2_RA(a3)
390         REG_S s3,  TASK_THREAD_S3_RA(a3)
391         REG_S s4,  TASK_THREAD_S4_RA(a3)
392         REG_S s5,  TASK_THREAD_S5_RA(a3)
393         REG_S s6,  TASK_THREAD_S6_RA(a3)
394         REG_S s7,  TASK_THREAD_S7_RA(a3)
395         REG_S s8,  TASK_THREAD_S8_RA(a3)
396         REG_S s9,  TASK_THREAD_S9_RA(a3)
397         REG_S s10, TASK_THREAD_S10_RA(a3)
398         REG_S s11, TASK_THREAD_S11_RA(a3)
399         /* Save the kernel shadow call stack pointer */
400         scs_save_current
401         /* Restore context from next->thread */
402         REG_L ra,  TASK_THREAD_RA_RA(a4)
403         REG_L sp,  TASK_THREAD_SP_RA(a4)
404         REG_L s0,  TASK_THREAD_S0_RA(a4)
405         REG_L s1,  TASK_THREAD_S1_RA(a4)
406         REG_L s2,  TASK_THREAD_S2_RA(a4)
407         REG_L s3,  TASK_THREAD_S3_RA(a4)
408         REG_L s4,  TASK_THREAD_S4_RA(a4)
409         REG_L s5,  TASK_THREAD_S5_RA(a4)
410         REG_L s6,  TASK_THREAD_S6_RA(a4)
411         REG_L s7,  TASK_THREAD_S7_RA(a4)
412         REG_L s8,  TASK_THREAD_S8_RA(a4)
413         REG_L s9,  TASK_THREAD_S9_RA(a4)
414         REG_L s10, TASK_THREAD_S10_RA(a4)
415         REG_L s11, TASK_THREAD_S11_RA(a4)
416         /* The offset of thread_info in task_struct is zero. */
417         move tp, a1
418         /* Switch to the next shadow call stack */
419         scs_load_current
420         ret
421 SYM_FUNC_END(__switch_to)
422 
423 #ifndef CONFIG_MMU
424 #define do_page_fault do_trap_unknown
425 #endif
426 
427         .section ".rodata"
428         .align LGREG
429         /* Exception vector table */
430 SYM_DATA_START_LOCAL(excp_vect_table)
431         RISCV_PTR do_trap_insn_misaligned
432         ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
433         RISCV_PTR do_trap_insn_illegal
434         RISCV_PTR do_trap_break
435         RISCV_PTR do_trap_load_misaligned
436         RISCV_PTR do_trap_load_fault
437         RISCV_PTR do_trap_store_misaligned
438         RISCV_PTR do_trap_store_fault
439         RISCV_PTR do_trap_ecall_u /* system call */
440         RISCV_PTR do_trap_ecall_s
441         RISCV_PTR do_trap_unknown
442         RISCV_PTR do_trap_ecall_m
443         /* instruciton page fault */
444         ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
445         RISCV_PTR do_page_fault   /* load page fault */
446         RISCV_PTR do_trap_unknown
447         RISCV_PTR do_page_fault   /* store page fault */
448 SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
449 
450 #ifndef CONFIG_MMU
451 SYM_DATA_START(__user_rt_sigreturn)
452         li a7, __NR_rt_sigreturn
453         ecall
454 SYM_DATA_END(__user_rt_sigreturn)
455 #endif

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php