1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 1994-2002 Russell King 4 * Copyright (c) 2003, 2020 ARM Limited 5 * All Rights Reserved 6 */ 7 8 #include <linux/init.h> 9 #include <linux/linkage.h> 10 #include <asm/assembler.h> 11 #include <asm/page.h> 12 13 #ifdef __ARMEB__ 14 #define LOW_OFFSET 0x4 15 #define HIGH_OFFSET 0x0 16 #else 17 #define LOW_OFFSET 0x0 18 #define HIGH_OFFSET 0x4 19 #endif 20 21 /* 22 * __fixup_pv_table - patch the stub instructi 23 * PHYS_OFFSET and PAGE_OFF 24 * 2 MiB aligned. 25 * 26 * Called from head.S, which expects the follo 27 * r1 = machine no, r2 = atags or dtb, 28 * r8 = phys_offset, r9 = cpuid, r10 = proci 29 */ 30 __HEAD 31 ENTRY(__fixup_pv_table) 32 mov r0, r8, lsr #PAGE_SHIFT @ conv 33 str_l r0, __pv_phys_pfn_offset, r3 34 35 adr_l r0, __pv_offset 36 subs r3, r8, #PAGE_OFFSET @ PHYS 37 mvn ip, #0 38 strcc ip, [r0, #HIGH_OFFSET] @ save 39 str r3, [r0, #LOW_OFFSET] @ save 40 41 mov r0, r3, lsr #21 @ cons 42 teq r3, r0, lsl #21 @ must 43 bne 0f 44 45 adr_l r4, __pv_table_begin 46 adr_l r5, __pv_table_end 47 b __fixup_a_pv_table 48 49 0: mov r0, r0 @ dead 50 b 0b 51 ENDPROC(__fixup_pv_table) 52 53 .text 54 __fixup_a_pv_table: 55 adr_l r6, __pv_offset 56 ldr r0, [r6, #HIGH_OFFSET] @ pv_o 57 ldr r6, [r6, #LOW_OFFSET] @ pv_o 58 cmn r0, #1 59 #ifdef CONFIG_THUMB2_KERNEL 60 @ 61 @ The Thumb-2 versions of the patchabl 62 @ 63 @ phys-to-virt: movw 64 @ lsl 65 @ sub 66 @ 67 @ virt-to-phys (non-LPAE): movw 68 @ lsl 69 @ add 70 @ 71 @ virt-to-phys (LPAE): movw 72 @ lsl 73 @ adds 74 @ mov 75 @ adc 76 @ 77 @ In the non-LPAE case, all patchable 78 @ instructions, where we need to patch 79 @ second halfword of the opcode (the 1 80 @ as imm4:i:imm3:imm8) 81 @ 82 @ 15 11 10 9 4 83 @ +-----------+---+-------------+ 84 @ MOVW | 1 1 1 1 0 | i | 1 0 0 1 0 0 | 85 @ +-----------+---+-------------+ 86 @ 87 @ In the LPAE case, we also need to pa 88 @ offset into the immediate field of t 89 @ to a MVN instruction if the offset i 90 @ need to inspect the first halfword o 91 @ it is MOVW or MOV/MVN, and to perfor 92 @ needed. The encoding of the immediat 93 @ of i:imm3 != 0b0000, but fortunately 94 @ order bits, which can be patched int 95 @ cleared) 96 @ 97 @ 15 11 10 9 5 98 @ +-----------+---+--------------- 99 @ MOV | 1 1 1 1 0 | i | 0 0 0 1 0 0 1 100 @ MVN | 1 1 1 1 0 | i | 0 0 0 1 1 0 1 101 @ +-----------+---+--------------- 102 @ 103 moveq r0, #0x200000 @ set 104 lsrs r3, r6, #29 @ isol 105 ubfx r6, r6, #21, #8 @ put 106 bfi r6, r3, #12, #3 @ put 107 b .Lnext 108 .Lloop: add r7, r4 109 adds r4, #4 @ clea 110 #ifdef CONFIG_ARM_LPAE 111 ldrh ip, [r7] 112 ARM_BE8(rev16 ip, ip) 113 tst ip, #0x200 @ MOVW 114 bne 0f @ skip 115 bic ip, #0x20 @ clea 116 orr ip, ip, r0, lsr #16 @ MOV 117 ARM_BE8(rev16 ip, ip) 118 strh ip, [r7] 119 @ Z flag is set 120 0: 121 #endif 122 ldrh ip, [r7, #2] 123 ARM_BE8(rev16 ip, ip) 124 and ip, #0xf00 @ clea 125 orreq ip, r0 @ Z fl 126 orrne ip, r6 @ Z fl 127 ARM_BE8(rev16 ip, ip) 128 strh ip, [r7, #2] 129 #else 130 #ifdef CONFIG_CPU_ENDIAN_BE8 131 @ in BE8, we load data in BE, but instructions 132 #define PV_BIT24 0x00000001 133 #define PV_IMM8_MASK 0xff000000 134 #define PV_IMMR_MSB 0x00080000 135 #else 136 #define PV_BIT24 0x01000000 137 #define PV_IMM8_MASK 0x000000ff 138 #define PV_IMMR_MSB 0x00000800 139 #endif 140 141 @ 142 @ The ARM versions of the patchable se 143 @ 144 @ phys-to-virt: sub 145 @ sub 146 @ 147 @ virt-to-phys (non-LPAE): add 148 @ add 149 @ 150 @ virt-to-phys (LPAE): movw 151 @ adds 152 @ mov 153 @ adc 154 @ 155 @ In the non-LPAE case, all patchable 156 @ instructions, where we need to patch 157 @ immediate field of the opcode, which 158 @ rotation value. (The effective value 159 @ rotated right by [2 * imm12<11:8>] b 160 @ 161 @ 31 28 27 23 22 20 19 1 162 @ +------+-----------------+----- 163 @ ADD | cond | 0 0 1 0 1 0 0 0 | Rn 164 @ SUB | cond | 0 0 1 0 0 1 0 0 | Rn 165 @ MOV | cond | 0 0 1 1 1 0 1 0 | Rn 166 @ MVN | cond | 0 0 1 1 1 1 1 0 | Rn 167 @ +------+-----------------+----- 168 @ 169 @ In the LPAE case, we use a MOVW inst 170 @ word, and patch in the high word of 171 @ field of the subsequent MOV instruct 172 @ instruction if the offset is negativ 173 @ instructions based on bits 23:22 of 174 @ distinguished from MOV/MVN (all usin 175 @ bit 24. 176 @ 177 @ 31 28 27 23 22 20 19 1 178 @ +------+-----------------+----- 179 @ MOVW | cond | 0 0 1 1 0 0 0 0 | imm4 180 @ +------+-----------------+----- 181 @ 182 moveq r0, #0x400000 @ set 183 mov r3, r6, lsr #16 @ put 184 mov r6, r6, lsr #24 @ put 185 and r3, r3, #0xf0 @ only 186 b .Lnext 187 .Lloop: ldr ip, [r7, r4] 188 #ifdef CONFIG_ARM_LPAE 189 tst ip, #PV_BIT24 @ ADD/ 190 beq 1f 191 ARM_BE8(rev ip, ip) 192 tst ip, #0xc00000 @ MOVW 193 bic ip, ip, #0x400000 @ clea 194 bfc ip, #0, #12 @ clea 195 orreq ip, ip, r6, lsl #4 @ MOVW 196 orreq ip, ip, r3, lsr #4 @ MOVW 197 orrne ip, ip, r0 @ MOV 198 ARM_BE8(rev ip, ip) 199 b 2f 200 1: 201 #endif 202 tst ip, #PV_IMMR_MSB 203 bic ip, ip, #PV_IMM8_MASK 204 orreq ip, ip, r6 ARM_BE8(, lsl #24) 205 orrne ip, ip, r3 ARM_BE8(, lsl #24) 206 2: 207 str ip, [r7, r4] 208 add r4, r4, #4 209 #endif 210 211 .Lnext: 212 cmp r4, r5 213 ldrcc r7, [r4] @ use 214 bcc .Lloop 215 ret lr 216 ENDPROC(__fixup_a_pv_table) 217 218 ENTRY(fixup_pv_table) 219 stmfd sp!, {r4 - r7, lr} 220 mov r4, r0 @ r0 = 221 add r5, r0, r1 @ r1 = 222 bl __fixup_a_pv_table 223 ldmfd sp!, {r4 - r7, pc} 224 ENDPROC(fixup_pv_table) 225 226 .data 227 .align 2 228 .globl __pv_phys_pfn_offset 229 .type __pv_phys_pfn_offset, %object 230 __pv_phys_pfn_offset: 231 .word 0 232 .size __pv_phys_pfn_offset, . -__pv_ 233 234 .globl __pv_offset 235 .type __pv_offset, %object 236 __pv_offset: 237 .quad 0 238 .size __pv_offset, . -__pv_offset
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.