~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/entry-common.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Exception handling code
  4  *
  5  * Copyright (C) 2019 ARM Ltd.
  6  */
  7 
  8 #include <linux/context_tracking.h>
  9 #include <linux/kasan.h>
 10 #include <linux/linkage.h>
 11 #include <linux/lockdep.h>
 12 #include <linux/ptrace.h>
 13 #include <linux/resume_user_mode.h>
 14 #include <linux/sched.h>
 15 #include <linux/sched/debug.h>
 16 #include <linux/thread_info.h>
 17 
 18 #include <asm/cpufeature.h>
 19 #include <asm/daifflags.h>
 20 #include <asm/esr.h>
 21 #include <asm/exception.h>
 22 #include <asm/irq_regs.h>
 23 #include <asm/kprobes.h>
 24 #include <asm/mmu.h>
 25 #include <asm/processor.h>
 26 #include <asm/sdei.h>
 27 #include <asm/stacktrace.h>
 28 #include <asm/sysreg.h>
 29 #include <asm/system_misc.h>
 30 
 31 /*
 32  * Handle IRQ/context state management when entering from kernel mode.
 33  * Before this function is called it is not safe to call regular kernel code,
 34  * instrumentable code, or any code which may trigger an exception.
 35  *
 36  * This is intended to match the logic in irqentry_enter(), handling the kernel
 37  * mode transitions only.
 38  */
 39 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
 40 {
 41         regs->exit_rcu = false;
 42 
 43         if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
 44                 lockdep_hardirqs_off(CALLER_ADDR0);
 45                 ct_irq_enter();
 46                 trace_hardirqs_off_finish();
 47 
 48                 regs->exit_rcu = true;
 49                 return;
 50         }
 51 
 52         lockdep_hardirqs_off(CALLER_ADDR0);
 53         rcu_irq_enter_check_tick();
 54         trace_hardirqs_off_finish();
 55 }
 56 
 57 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
 58 {
 59         __enter_from_kernel_mode(regs);
 60         mte_check_tfsr_entry();
 61         mte_disable_tco_entry(current);
 62 }
 63 
 64 /*
 65  * Handle IRQ/context state management when exiting to kernel mode.
 66  * After this function returns it is not safe to call regular kernel code,
 67  * instrumentable code, or any code which may trigger an exception.
 68  *
 69  * This is intended to match the logic in irqentry_exit(), handling the kernel
 70  * mode transitions only, and with preemption handled elsewhere.
 71  */
 72 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
 73 {
 74         lockdep_assert_irqs_disabled();
 75 
 76         if (interrupts_enabled(regs)) {
 77                 if (regs->exit_rcu) {
 78                         trace_hardirqs_on_prepare();
 79                         lockdep_hardirqs_on_prepare();
 80                         ct_irq_exit();
 81                         lockdep_hardirqs_on(CALLER_ADDR0);
 82                         return;
 83                 }
 84 
 85                 trace_hardirqs_on();
 86         } else {
 87                 if (regs->exit_rcu)
 88                         ct_irq_exit();
 89         }
 90 }
 91 
 92 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
 93 {
 94         mte_check_tfsr_exit();
 95         __exit_to_kernel_mode(regs);
 96 }
 97 
 98 /*
 99  * Handle IRQ/context state management when entering from user mode.
100  * Before this function is called it is not safe to call regular kernel code,
101  * instrumentable code, or any code which may trigger an exception.
102  */
103 static __always_inline void __enter_from_user_mode(void)
104 {
105         lockdep_hardirqs_off(CALLER_ADDR0);
106         CT_WARN_ON(ct_state() != CONTEXT_USER);
107         user_exit_irqoff();
108         trace_hardirqs_off_finish();
109         mte_disable_tco_entry(current);
110 }
111 
112 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
113 {
114         __enter_from_user_mode();
115 }
116 
117 /*
118  * Handle IRQ/context state management when exiting to user mode.
119  * After this function returns it is not safe to call regular kernel code,
120  * instrumentable code, or any code which may trigger an exception.
121  */
122 static __always_inline void __exit_to_user_mode(void)
123 {
124         trace_hardirqs_on_prepare();
125         lockdep_hardirqs_on_prepare();
126         user_enter_irqoff();
127         lockdep_hardirqs_on(CALLER_ADDR0);
128 }
129 
130 static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
131 {
132         do {
133                 local_irq_enable();
134 
135                 if (thread_flags & _TIF_NEED_RESCHED)
136                         schedule();
137 
138                 if (thread_flags & _TIF_UPROBE)
139                         uprobe_notify_resume(regs);
140 
141                 if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
142                         clear_thread_flag(TIF_MTE_ASYNC_FAULT);
143                         send_sig_fault(SIGSEGV, SEGV_MTEAERR,
144                                        (void __user *)NULL, current);
145                 }
146 
147                 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
148                         do_signal(regs);
149 
150                 if (thread_flags & _TIF_NOTIFY_RESUME)
151                         resume_user_mode_work(regs);
152 
153                 if (thread_flags & _TIF_FOREIGN_FPSTATE)
154                         fpsimd_restore_current_state();
155 
156                 local_irq_disable();
157                 thread_flags = read_thread_flags();
158         } while (thread_flags & _TIF_WORK_MASK);
159 }
160 
161 static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
162 {
163         unsigned long flags;
164 
165         local_irq_disable();
166 
167         flags = read_thread_flags();
168         if (unlikely(flags & _TIF_WORK_MASK))
169                 do_notify_resume(regs, flags);
170 
171         local_daif_mask();
172 
173         lockdep_sys_exit();
174 }
175 
176 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
177 {
178         exit_to_user_mode_prepare(regs);
179         mte_check_tfsr_exit();
180         __exit_to_user_mode();
181 }
182 
183 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
184 {
185         exit_to_user_mode(regs);
186 }
187 
188 /*
189  * Handle IRQ/context state management when entering an NMI from user/kernel
190  * mode. Before this function is called it is not safe to call regular kernel
191  * code, instrumentable code, or any code which may trigger an exception.
192  */
193 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
194 {
195         regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
196 
197         __nmi_enter();
198         lockdep_hardirqs_off(CALLER_ADDR0);
199         lockdep_hardirq_enter();
200         ct_nmi_enter();
201 
202         trace_hardirqs_off_finish();
203         ftrace_nmi_enter();
204 }
205 
206 /*
207  * Handle IRQ/context state management when exiting an NMI from user/kernel
208  * mode. After this function returns it is not safe to call regular kernel
209  * code, instrumentable code, or any code which may trigger an exception.
210  */
211 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
212 {
213         bool restore = regs->lockdep_hardirqs;
214 
215         ftrace_nmi_exit();
216         if (restore) {
217                 trace_hardirqs_on_prepare();
218                 lockdep_hardirqs_on_prepare();
219         }
220 
221         ct_nmi_exit();
222         lockdep_hardirq_exit();
223         if (restore)
224                 lockdep_hardirqs_on(CALLER_ADDR0);
225         __nmi_exit();
226 }
227 
228 /*
229  * Handle IRQ/context state management when entering a debug exception from
230  * kernel mode. Before this function is called it is not safe to call regular
231  * kernel code, instrumentable code, or any code which may trigger an exception.
232  */
233 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
234 {
235         regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
236 
237         lockdep_hardirqs_off(CALLER_ADDR0);
238         ct_nmi_enter();
239 
240         trace_hardirqs_off_finish();
241 }
242 
243 /*
244  * Handle IRQ/context state management when exiting a debug exception from
245  * kernel mode. After this function returns it is not safe to call regular
246  * kernel code, instrumentable code, or any code which may trigger an exception.
247  */
248 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
249 {
250         bool restore = regs->lockdep_hardirqs;
251 
252         if (restore) {
253                 trace_hardirqs_on_prepare();
254                 lockdep_hardirqs_on_prepare();
255         }
256 
257         ct_nmi_exit();
258         if (restore)
259                 lockdep_hardirqs_on(CALLER_ADDR0);
260 }
261 
262 #ifdef CONFIG_PREEMPT_DYNAMIC
263 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
264 #define need_irq_preemption() \
265         (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
266 #else
267 #define need_irq_preemption()   (IS_ENABLED(CONFIG_PREEMPTION))
268 #endif
269 
270 static void __sched arm64_preempt_schedule_irq(void)
271 {
272         if (!need_irq_preemption())
273                 return;
274 
275         /*
276          * Note: thread_info::preempt_count includes both thread_info::count
277          * and thread_info::need_resched, and is not equivalent to
278          * preempt_count().
279          */
280         if (READ_ONCE(current_thread_info()->preempt_count) != 0)
281                 return;
282 
283         /*
284          * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
285          * priority masking is used the GIC irqchip driver will clear DAIF.IF
286          * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
287          * DAIF we must have handled an NMI, so skip preemption.
288          */
289         if (system_uses_irq_prio_masking() && read_sysreg(daif))
290                 return;
291 
292         /*
293          * Preempting a task from an IRQ means we leave copies of PSTATE
294          * on the stack. cpufeature's enable calls may modify PSTATE, but
295          * resuming one of these preempted tasks would undo those changes.
296          *
297          * Only allow a task to be preempted once cpufeatures have been
298          * enabled.
299          */
300         if (system_capabilities_finalized())
301                 preempt_schedule_irq();
302 }
303 
304 static void do_interrupt_handler(struct pt_regs *regs,
305                                  void (*handler)(struct pt_regs *))
306 {
307         struct pt_regs *old_regs = set_irq_regs(regs);
308 
309         if (on_thread_stack())
310                 call_on_irq_stack(regs, handler);
311         else
312                 handler(regs);
313 
314         set_irq_regs(old_regs);
315 }
316 
317 extern void (*handle_arch_irq)(struct pt_regs *);
318 extern void (*handle_arch_fiq)(struct pt_regs *);
319 
320 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
321                                       unsigned long esr)
322 {
323         arm64_enter_nmi(regs);
324 
325         console_verbose();
326 
327         pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
328                 vector, smp_processor_id(), esr,
329                 esr_get_class_string(esr));
330 
331         __show_regs(regs);
332         panic("Unhandled exception");
333 }
334 
335 #define UNHANDLED(el, regsize, vector)                                                  \
336 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)       \
337 {                                                                                       \
338         const char *desc = #regsize "-bit " #el " " #vector;                            \
339         __panic_unhandled(regs, desc, read_sysreg(esr_el1));                            \
340 }
341 
342 #ifdef CONFIG_ARM64_ERRATUM_1463225
343 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
344 
345 static void cortex_a76_erratum_1463225_svc_handler(void)
346 {
347         u32 reg, val;
348 
349         if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
350                 return;
351 
352         if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
353                 return;
354 
355         __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
356         reg = read_sysreg(mdscr_el1);
357         val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
358         write_sysreg(val, mdscr_el1);
359         asm volatile("msr daifclr, #8");
360         isb();
361 
362         /* We will have taken a single-step exception by this point */
363 
364         write_sysreg(reg, mdscr_el1);
365         __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
366 }
367 
368 static __always_inline bool
369 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
370 {
371         if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
372                 return false;
373 
374         /*
375          * We've taken a dummy step exception from the kernel to ensure
376          * that interrupts are re-enabled on the syscall path. Return back
377          * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
378          * masked so that we can safely restore the mdscr and get on with
379          * handling the syscall.
380          */
381         regs->pstate |= PSR_D_BIT;
382         return true;
383 }
384 #else /* CONFIG_ARM64_ERRATUM_1463225 */
385 static void cortex_a76_erratum_1463225_svc_handler(void) { }
386 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
387 {
388         return false;
389 }
390 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
391 
392 /*
393  * As per the ABI exit SME streaming mode and clear the SVE state not
394  * shared with FPSIMD on syscall entry.
395  */
396 static inline void fp_user_discard(void)
397 {
398         /*
399          * If SME is active then exit streaming mode.  If ZA is active
400          * then flush the SVE registers but leave userspace access to
401          * both SVE and SME enabled, otherwise disable SME for the
402          * task and fall through to disabling SVE too.  This means
403          * that after a syscall we never have any streaming mode
404          * register state to track, if this changes the KVM code will
405          * need updating.
406          */
407         if (system_supports_sme())
408                 sme_smstop_sm();
409 
410         if (!system_supports_sve())
411                 return;
412 
413         if (test_thread_flag(TIF_SVE)) {
414                 unsigned int sve_vq_minus_one;
415 
416                 sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
417                 sve_flush_live(true, sve_vq_minus_one);
418         }
419 }
420 
421 UNHANDLED(el1t, 64, sync)
422 UNHANDLED(el1t, 64, irq)
423 UNHANDLED(el1t, 64, fiq)
424 UNHANDLED(el1t, 64, error)
425 
426 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
427 {
428         unsigned long far = read_sysreg(far_el1);
429 
430         enter_from_kernel_mode(regs);
431         local_daif_inherit(regs);
432         do_mem_abort(far, esr, regs);
433         local_daif_mask();
434         exit_to_kernel_mode(regs);
435 }
436 
437 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
438 {
439         unsigned long far = read_sysreg(far_el1);
440 
441         enter_from_kernel_mode(regs);
442         local_daif_inherit(regs);
443         do_sp_pc_abort(far, esr, regs);
444         local_daif_mask();
445         exit_to_kernel_mode(regs);
446 }
447 
448 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
449 {
450         enter_from_kernel_mode(regs);
451         local_daif_inherit(regs);
452         do_el1_undef(regs, esr);
453         local_daif_mask();
454         exit_to_kernel_mode(regs);
455 }
456 
457 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
458 {
459         enter_from_kernel_mode(regs);
460         local_daif_inherit(regs);
461         do_el1_bti(regs, esr);
462         local_daif_mask();
463         exit_to_kernel_mode(regs);
464 }
465 
466 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
467 {
468         unsigned long far = read_sysreg(far_el1);
469 
470         arm64_enter_el1_dbg(regs);
471         if (!cortex_a76_erratum_1463225_debug_handler(regs))
472                 do_debug_exception(far, esr, regs);
473         arm64_exit_el1_dbg(regs);
474 }
475 
476 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
477 {
478         enter_from_kernel_mode(regs);
479         local_daif_inherit(regs);
480         do_el1_fpac(regs, esr);
481         local_daif_mask();
482         exit_to_kernel_mode(regs);
483 }
484 
485 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
486 {
487         unsigned long esr = read_sysreg(esr_el1);
488 
489         switch (ESR_ELx_EC(esr)) {
490         case ESR_ELx_EC_DABT_CUR:
491         case ESR_ELx_EC_IABT_CUR:
492                 el1_abort(regs, esr);
493                 break;
494         /*
495          * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
496          * recursive exception when trying to push the initial pt_regs.
497          */
498         case ESR_ELx_EC_PC_ALIGN:
499                 el1_pc(regs, esr);
500                 break;
501         case ESR_ELx_EC_SYS64:
502         case ESR_ELx_EC_UNKNOWN:
503                 el1_undef(regs, esr);
504                 break;
505         case ESR_ELx_EC_BTI:
506                 el1_bti(regs, esr);
507                 break;
508         case ESR_ELx_EC_BREAKPT_CUR:
509         case ESR_ELx_EC_SOFTSTP_CUR:
510         case ESR_ELx_EC_WATCHPT_CUR:
511         case ESR_ELx_EC_BRK64:
512                 el1_dbg(regs, esr);
513                 break;
514         case ESR_ELx_EC_FPAC:
515                 el1_fpac(regs, esr);
516                 break;
517         default:
518                 __panic_unhandled(regs, "64-bit el1h sync", esr);
519         }
520 }
521 
522 static __always_inline void __el1_pnmi(struct pt_regs *regs,
523                                        void (*handler)(struct pt_regs *))
524 {
525         arm64_enter_nmi(regs);
526         do_interrupt_handler(regs, handler);
527         arm64_exit_nmi(regs);
528 }
529 
530 static __always_inline void __el1_irq(struct pt_regs *regs,
531                                       void (*handler)(struct pt_regs *))
532 {
533         enter_from_kernel_mode(regs);
534 
535         irq_enter_rcu();
536         do_interrupt_handler(regs, handler);
537         irq_exit_rcu();
538 
539         arm64_preempt_schedule_irq();
540 
541         exit_to_kernel_mode(regs);
542 }
543 static void noinstr el1_interrupt(struct pt_regs *regs,
544                                   void (*handler)(struct pt_regs *))
545 {
546         write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
547 
548         if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
549                 __el1_pnmi(regs, handler);
550         else
551                 __el1_irq(regs, handler);
552 }
553 
554 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
555 {
556         el1_interrupt(regs, handle_arch_irq);
557 }
558 
559 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
560 {
561         el1_interrupt(regs, handle_arch_fiq);
562 }
563 
564 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
565 {
566         unsigned long esr = read_sysreg(esr_el1);
567 
568         local_daif_restore(DAIF_ERRCTX);
569         arm64_enter_nmi(regs);
570         do_serror(regs, esr);
571         arm64_exit_nmi(regs);
572 }
573 
574 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
575 {
576         unsigned long far = read_sysreg(far_el1);
577 
578         enter_from_user_mode(regs);
579         local_daif_restore(DAIF_PROCCTX);
580         do_mem_abort(far, esr, regs);
581         exit_to_user_mode(regs);
582 }
583 
584 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
585 {
586         unsigned long far = read_sysreg(far_el1);
587 
588         /*
589          * We've taken an instruction abort from userspace and not yet
590          * re-enabled IRQs. If the address is a kernel address, apply
591          * BP hardening prior to enabling IRQs and pre-emption.
592          */
593         if (!is_ttbr0_addr(far))
594                 arm64_apply_bp_hardening();
595 
596         enter_from_user_mode(regs);
597         local_daif_restore(DAIF_PROCCTX);
598         do_mem_abort(far, esr, regs);
599         exit_to_user_mode(regs);
600 }
601 
602 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
603 {
604         enter_from_user_mode(regs);
605         local_daif_restore(DAIF_PROCCTX);
606         do_fpsimd_acc(esr, regs);
607         exit_to_user_mode(regs);
608 }
609 
610 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
611 {
612         enter_from_user_mode(regs);
613         local_daif_restore(DAIF_PROCCTX);
614         do_sve_acc(esr, regs);
615         exit_to_user_mode(regs);
616 }
617 
618 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
619 {
620         enter_from_user_mode(regs);
621         local_daif_restore(DAIF_PROCCTX);
622         do_sme_acc(esr, regs);
623         exit_to_user_mode(regs);
624 }
625 
626 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
627 {
628         enter_from_user_mode(regs);
629         local_daif_restore(DAIF_PROCCTX);
630         do_fpsimd_exc(esr, regs);
631         exit_to_user_mode(regs);
632 }
633 
634 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
635 {
636         enter_from_user_mode(regs);
637         local_daif_restore(DAIF_PROCCTX);
638         do_el0_sys(esr, regs);
639         exit_to_user_mode(regs);
640 }
641 
642 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
643 {
644         unsigned long far = read_sysreg(far_el1);
645 
646         if (!is_ttbr0_addr(instruction_pointer(regs)))
647                 arm64_apply_bp_hardening();
648 
649         enter_from_user_mode(regs);
650         local_daif_restore(DAIF_PROCCTX);
651         do_sp_pc_abort(far, esr, regs);
652         exit_to_user_mode(regs);
653 }
654 
655 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
656 {
657         enter_from_user_mode(regs);
658         local_daif_restore(DAIF_PROCCTX);
659         do_sp_pc_abort(regs->sp, esr, regs);
660         exit_to_user_mode(regs);
661 }
662 
663 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
664 {
665         enter_from_user_mode(regs);
666         local_daif_restore(DAIF_PROCCTX);
667         do_el0_undef(regs, esr);
668         exit_to_user_mode(regs);
669 }
670 
671 static void noinstr el0_bti(struct pt_regs *regs)
672 {
673         enter_from_user_mode(regs);
674         local_daif_restore(DAIF_PROCCTX);
675         do_el0_bti(regs);
676         exit_to_user_mode(regs);
677 }
678 
679 static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
680 {
681         enter_from_user_mode(regs);
682         local_daif_restore(DAIF_PROCCTX);
683         do_el0_mops(regs, esr);
684         exit_to_user_mode(regs);
685 }
686 
687 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
688 {
689         enter_from_user_mode(regs);
690         local_daif_restore(DAIF_PROCCTX);
691         bad_el0_sync(regs, 0, esr);
692         exit_to_user_mode(regs);
693 }
694 
695 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
696 {
697         /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
698         unsigned long far = read_sysreg(far_el1);
699 
700         enter_from_user_mode(regs);
701         do_debug_exception(far, esr, regs);
702         local_daif_restore(DAIF_PROCCTX);
703         exit_to_user_mode(regs);
704 }
705 
706 static void noinstr el0_svc(struct pt_regs *regs)
707 {
708         enter_from_user_mode(regs);
709         cortex_a76_erratum_1463225_svc_handler();
710         fp_user_discard();
711         local_daif_restore(DAIF_PROCCTX);
712         do_el0_svc(regs);
713         exit_to_user_mode(regs);
714 }
715 
716 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
717 {
718         enter_from_user_mode(regs);
719         local_daif_restore(DAIF_PROCCTX);
720         do_el0_fpac(regs, esr);
721         exit_to_user_mode(regs);
722 }
723 
724 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
725 {
726         unsigned long esr = read_sysreg(esr_el1);
727 
728         switch (ESR_ELx_EC(esr)) {
729         case ESR_ELx_EC_SVC64:
730                 el0_svc(regs);
731                 break;
732         case ESR_ELx_EC_DABT_LOW:
733                 el0_da(regs, esr);
734                 break;
735         case ESR_ELx_EC_IABT_LOW:
736                 el0_ia(regs, esr);
737                 break;
738         case ESR_ELx_EC_FP_ASIMD:
739                 el0_fpsimd_acc(regs, esr);
740                 break;
741         case ESR_ELx_EC_SVE:
742                 el0_sve_acc(regs, esr);
743                 break;
744         case ESR_ELx_EC_SME:
745                 el0_sme_acc(regs, esr);
746                 break;
747         case ESR_ELx_EC_FP_EXC64:
748                 el0_fpsimd_exc(regs, esr);
749                 break;
750         case ESR_ELx_EC_SYS64:
751         case ESR_ELx_EC_WFx:
752                 el0_sys(regs, esr);
753                 break;
754         case ESR_ELx_EC_SP_ALIGN:
755                 el0_sp(regs, esr);
756                 break;
757         case ESR_ELx_EC_PC_ALIGN:
758                 el0_pc(regs, esr);
759                 break;
760         case ESR_ELx_EC_UNKNOWN:
761                 el0_undef(regs, esr);
762                 break;
763         case ESR_ELx_EC_BTI:
764                 el0_bti(regs);
765                 break;
766         case ESR_ELx_EC_MOPS:
767                 el0_mops(regs, esr);
768                 break;
769         case ESR_ELx_EC_BREAKPT_LOW:
770         case ESR_ELx_EC_SOFTSTP_LOW:
771         case ESR_ELx_EC_WATCHPT_LOW:
772         case ESR_ELx_EC_BRK64:
773                 el0_dbg(regs, esr);
774                 break;
775         case ESR_ELx_EC_FPAC:
776                 el0_fpac(regs, esr);
777                 break;
778         default:
779                 el0_inv(regs, esr);
780         }
781 }
782 
783 static void noinstr el0_interrupt(struct pt_regs *regs,
784                                   void (*handler)(struct pt_regs *))
785 {
786         enter_from_user_mode(regs);
787 
788         write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
789 
790         if (regs->pc & BIT(55))
791                 arm64_apply_bp_hardening();
792 
793         irq_enter_rcu();
794         do_interrupt_handler(regs, handler);
795         irq_exit_rcu();
796 
797         exit_to_user_mode(regs);
798 }
799 
800 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
801 {
802         el0_interrupt(regs, handle_arch_irq);
803 }
804 
805 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
806 {
807         __el0_irq_handler_common(regs);
808 }
809 
810 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
811 {
812         el0_interrupt(regs, handle_arch_fiq);
813 }
814 
815 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
816 {
817         __el0_fiq_handler_common(regs);
818 }
819 
820 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
821 {
822         unsigned long esr = read_sysreg(esr_el1);
823 
824         enter_from_user_mode(regs);
825         local_daif_restore(DAIF_ERRCTX);
826         arm64_enter_nmi(regs);
827         do_serror(regs, esr);
828         arm64_exit_nmi(regs);
829         local_daif_restore(DAIF_PROCCTX);
830         exit_to_user_mode(regs);
831 }
832 
833 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
834 {
835         __el0_error_handler_common(regs);
836 }
837 
838 #ifdef CONFIG_COMPAT
839 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
840 {
841         enter_from_user_mode(regs);
842         local_daif_restore(DAIF_PROCCTX);
843         do_el0_cp15(esr, regs);
844         exit_to_user_mode(regs);
845 }
846 
847 static void noinstr el0_svc_compat(struct pt_regs *regs)
848 {
849         enter_from_user_mode(regs);
850         cortex_a76_erratum_1463225_svc_handler();
851         local_daif_restore(DAIF_PROCCTX);
852         do_el0_svc_compat(regs);
853         exit_to_user_mode(regs);
854 }
855 
856 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
857 {
858         unsigned long esr = read_sysreg(esr_el1);
859 
860         switch (ESR_ELx_EC(esr)) {
861         case ESR_ELx_EC_SVC32:
862                 el0_svc_compat(regs);
863                 break;
864         case ESR_ELx_EC_DABT_LOW:
865                 el0_da(regs, esr);
866                 break;
867         case ESR_ELx_EC_IABT_LOW:
868                 el0_ia(regs, esr);
869                 break;
870         case ESR_ELx_EC_FP_ASIMD:
871                 el0_fpsimd_acc(regs, esr);
872                 break;
873         case ESR_ELx_EC_FP_EXC32:
874                 el0_fpsimd_exc(regs, esr);
875                 break;
876         case ESR_ELx_EC_PC_ALIGN:
877                 el0_pc(regs, esr);
878                 break;
879         case ESR_ELx_EC_UNKNOWN:
880         case ESR_ELx_EC_CP14_MR:
881         case ESR_ELx_EC_CP14_LS:
882         case ESR_ELx_EC_CP14_64:
883                 el0_undef(regs, esr);
884                 break;
885         case ESR_ELx_EC_CP15_32:
886         case ESR_ELx_EC_CP15_64:
887                 el0_cp15(regs, esr);
888                 break;
889         case ESR_ELx_EC_BREAKPT_LOW:
890         case ESR_ELx_EC_SOFTSTP_LOW:
891         case ESR_ELx_EC_WATCHPT_LOW:
892         case ESR_ELx_EC_BKPT32:
893                 el0_dbg(regs, esr);
894                 break;
895         default:
896                 el0_inv(regs, esr);
897         }
898 }
899 
900 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
901 {
902         __el0_irq_handler_common(regs);
903 }
904 
905 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
906 {
907         __el0_fiq_handler_common(regs);
908 }
909 
910 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
911 {
912         __el0_error_handler_common(regs);
913 }
914 #else /* CONFIG_COMPAT */
915 UNHANDLED(el0t, 32, sync)
916 UNHANDLED(el0t, 32, irq)
917 UNHANDLED(el0t, 32, fiq)
918 UNHANDLED(el0t, 32, error)
919 #endif /* CONFIG_COMPAT */
920 
921 #ifdef CONFIG_VMAP_STACK
922 asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
923 {
924         unsigned long esr = read_sysreg(esr_el1);
925         unsigned long far = read_sysreg(far_el1);
926 
927         arm64_enter_nmi(regs);
928         panic_bad_stack(regs, esr, far);
929 }
930 #endif /* CONFIG_VMAP_STACK */
931 
932 #ifdef CONFIG_ARM_SDE_INTERFACE
933 asmlinkage noinstr unsigned long
934 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
935 {
936         unsigned long ret;
937 
938         /*
939          * We didn't take an exception to get here, so the HW hasn't
940          * set/cleared bits in PSTATE that we may rely on.
941          *
942          * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
943          * whether PSTATE bits are inherited unchanged or generated from
944          * scratch, and the TF-A implementation always clears PAN and always
945          * clears UAO. There are no other known implementations.
946          *
947          * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
948          * PSTATE is modified upon architectural exceptions, and so PAN is
949          * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
950          * cleared.
951          *
952          * We must explicitly reset PAN to the expected state, including
953          * clearing it when the host isn't using it, in case a VM had it set.
954          */
955         if (system_uses_hw_pan())
956                 set_pstate_pan(1);
957         else if (cpu_has_pan())
958                 set_pstate_pan(0);
959 
960         arm64_enter_nmi(regs);
961         ret = do_sdei_event(regs, arg);
962         arm64_exit_nmi(regs);
963 
964         return ret;
965 }
966 #endif /* CONFIG_ARM_SDE_INTERFACE */
967 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php