1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * linux/arch/arm/kernel/entry-v7m.S 4 * 5 * Copyright (C) 2008 ARM Ltd. 6 * 7 * Low-level vector interface routines for the 8 */ 9 #include <asm/page.h> 10 #include <asm/glue.h> 11 #include <asm/thread_notify.h> 12 #include <asm/v7m.h> 13 14 #include "entry-header.S" 15 16 #ifdef CONFIG_TRACE_IRQFLAGS 17 #error "CONFIG_TRACE_IRQFLAGS not supported on 18 #endif 19 20 __invalid_entry: 21 v7m_exception_entry 22 #ifdef CONFIG_PRINTK 23 adr r0, strerr 24 mrs r1, ipsr 25 mov r2, lr 26 bl _printk 27 #endif 28 mov r0, sp 29 bl show_regs 30 1: b 1b 31 ENDPROC(__invalid_entry) 32 33 strerr: .asciz "\nUnhandled exception: IPSR = 34 35 .align 2 36 __irq_entry: 37 v7m_exception_entry 38 39 @ 40 @ Invoke the IRQ handler 41 @ 42 mov r0, sp 43 ldr_this_cpu sp, irq_stack_ptr, r1, r2 44 45 @ 46 @ If we took the interrupt while runni 47 @ be using the IRQ stack, so revert to 48 @ 49 subs r2, sp, r0 @ SP a 50 rsbscs r2, r2, #THREAD_SIZE @ ... 51 movcs sp, r0 52 53 push {r0, lr} @ pres 54 55 @ routine called with r0 = struct pt_r 56 bl generic_handle_arch_irq 57 58 pop {r0, lr} 59 mov sp, r0 60 61 @ 62 @ Check for any pending work if return 63 @ 64 ldr r1, =BASEADDR_V7M_SCB 65 ldr r0, [r1, V7M_SCB_ICSR] 66 tst r0, V7M_SCB_ICSR_RETTOBASE 67 beq 2f 68 69 get_thread_info tsk 70 ldr r2, [tsk, #TI_FLAGS] 71 movs r2, r2, lsl #16 72 beq 2f @ no w 73 mov r0, #V7M_SCB_ICSR_PENDSVSET 74 str r0, [r1, V7M_SCB_ICSR] @ rais 75 76 2: 77 @ registers r0-r3 and r12 are automati 78 @ return. r4-r7 were not clobbered in 79 @ correctness they don't need to be re 80 @ restored here. The easiest way to do 81 ldmia sp!, {r0-r11} 82 add sp, #PT_REGS_SIZE-S_IP 83 cpsie i 84 bx lr 85 ENDPROC(__irq_entry) 86 87 __pendsv_entry: 88 v7m_exception_entry 89 90 ldr r1, =BASEADDR_V7M_SCB 91 mov r0, #V7M_SCB_ICSR_PENDSVCLR 92 str r0, [r1, V7M_SCB_ICSR] @ clea 93 94 @ execute the pending work, including 95 get_thread_info tsk 96 mov why, #0 97 b ret_to_user_from_irq 98 ENDPROC(__pendsv_entry) 99 100 /* 101 * Register switch for ARMv7-M processors. 102 * r0 = previous task_struct, r1 = previous th 103 * previous and next are guaranteed not to be 104 */ 105 ENTRY(__switch_to) 106 .fnstart 107 .cantunwind 108 add ip, r1, #TI_CPU_SAVE 109 stmia ip!, {r4 - r11} @ Stor 110 str sp, [ip], #4 111 str lr, [ip], #4 112 mov r5, r0 113 mov r6, r2 @ Pres 114 add r4, r2, #TI_CPU_SAVE 115 ldr r0, =thread_notify_head 116 mov r1, #THREAD_NOTIFY_SWITCH 117 bl atomic_notifier_call_chain 118 mov r0, r5 119 mov r1, r6 120 ldmia r4, {r4 - r12, lr} @ Load 121 set_current r1, r2 122 mov sp, ip 123 bx lr 124 .fnend 125 ENDPROC(__switch_to) 126 127 .data 128 #if CONFIG_CPU_V7M_NUM_IRQ <= 112 129 .align 9 130 #else 131 .align 10 132 #endif 133 134 /* 135 * Vector table (Natural alignment need to be 136 */ 137 ENTRY(vector_table) 138 .long 0 @ 0 - 139 .long __invalid_entry @ 1 - 140 .long __invalid_entry @ 2 - 141 .long __invalid_entry @ 3 - 142 .long __invalid_entry @ 4 - 143 .long __invalid_entry @ 5 - 144 .long __invalid_entry @ 6 - 145 .long __invalid_entry @ 7 - 146 .long __invalid_entry @ 8 - 147 .long __invalid_entry @ 9 - 148 .long __invalid_entry @ 10 - 149 .long vector_swi @ 11 - 150 .long __invalid_entry @ 12 - 151 .long __invalid_entry @ 13 - 152 .long __pendsv_entry @ 14 - 153 .long __invalid_entry @ 15 - 154 .rept CONFIG_CPU_V7M_NUM_IRQ 155 .long __irq_entry @ Exte 156 .endr 157 .align 2 158 .globl exc_ret 159 exc_ret: 160 .space 4
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.