1 /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 3 * Copyright (C) 2020 ARM Ltd. 4 */ 5 #ifndef __ASM_MTE_KASAN_H 6 #define __ASM_MTE_KASAN_H 7 8 #include <asm/compiler.h> 9 #include <asm/cputype.h> 10 #include <asm/mte-def.h> 11 12 #ifndef __ASSEMBLY__ 13 14 #include <linux/types.h> 15 16 #ifdef CONFIG_KASAN_HW_TAGS 17 18 /* Whether the MTE asynchronous mode is enable 19 DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mo 20 21 static inline bool system_uses_mte_async_or_as 22 { 23 return static_branch_unlikely(&mte_asy 24 } 25 26 #else /* CONFIG_KASAN_HW_TAGS */ 27 28 static inline bool system_uses_mte_async_or_as 29 { 30 return false; 31 } 32 33 #endif /* CONFIG_KASAN_HW_TAGS */ 34 35 #ifdef CONFIG_ARM64_MTE 36 37 /* 38 * The Tag Check Flag (TCF) mode for MTE is pe 39 * affects EL0 and TCF affects EL1 irrespectiv 40 * used. 41 * The kernel accesses TTBR0 usually with LDTR 42 * when UAO is available, so these would act a 43 * TCF0. 44 * However futex.h code uses exclusives which 45 * EL1, this can potentially cause a tag check 46 * user disables TCF0. 47 * 48 * To address the problem we set the PSTATE.TC 49 * and reset it in uaccess_disable(). 50 * 51 * The Tag check override (TCO) bit disables t 52 * preventing the issue. 53 */ 54 static inline void mte_disable_tco(void) 55 { 56 asm volatile(ALTERNATIVE("nop", SET_PS 57 ARM64_MTE, CO 58 } 59 60 static inline void mte_enable_tco(void) 61 { 62 asm volatile(ALTERNATIVE("nop", SET_PS 63 ARM64_MTE, CO 64 } 65 66 /* 67 * These functions disable tag checking only i 68 * since the sync mode generates exceptions sy 69 * nofault or load_unaligned_zeropad can handl 70 */ 71 static inline void __mte_disable_tco_async(voi 72 { 73 if (system_uses_mte_async_or_asymm_mod 74 mte_disable_tco(); 75 } 76 77 static inline void __mte_enable_tco_async(void 78 { 79 if (system_uses_mte_async_or_asymm_mod 80 mte_enable_tco(); 81 } 82 83 /* 84 * These functions are meant to be only used f 85 * the arch_*() interface defined in asm/memor 86 * These functions don't include system_suppor 87 * as KASAN only calls them when MTE is suppor 88 */ 89 90 static inline u8 mte_get_ptr_tag(void *ptr) 91 { 92 /* Note: The format of KASAN tags is 0 93 u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> M 94 95 return tag; 96 } 97 98 /* Get allocation tag for the address. */ 99 static inline u8 mte_get_mem_tag(void *addr) 100 { 101 asm(__MTE_PREAMBLE "ldg %0, [%0]" 102 : "+r" (addr)); 103 104 return mte_get_ptr_tag(addr); 105 } 106 107 /* Generate a random tag. */ 108 static inline u8 mte_get_random_tag(void) 109 { 110 void *addr; 111 112 asm(__MTE_PREAMBLE "irg %0, %0" 113 : "=r" (addr)); 114 115 return mte_get_ptr_tag(addr); 116 } 117 118 static inline u64 __stg_post(u64 p) 119 { 120 asm volatile(__MTE_PREAMBLE "stg %0, [ 121 : "+r"(p) 122 : 123 : "memory"); 124 return p; 125 } 126 127 static inline u64 __stzg_post(u64 p) 128 { 129 asm volatile(__MTE_PREAMBLE "stzg %0, 130 : "+r"(p) 131 : 132 : "memory"); 133 return p; 134 } 135 136 static inline void __dc_gva(u64 p) 137 { 138 asm volatile(__MTE_PREAMBLE "dc gva, % 139 } 140 141 static inline void __dc_gzva(u64 p) 142 { 143 asm volatile(__MTE_PREAMBLE "dc gzva, 144 } 145 146 /* 147 * Assign allocation tags for a region of memo 148 * Note: The address must be non-NULL and MTE_ 149 * size must be MTE_GRANULE_SIZE aligned. 150 */ 151 static inline void mte_set_mem_tag_range(void 152 bool 153 { 154 u64 curr, mask, dczid, dczid_bs, dczid 155 156 /* Read DC G(Z)VA block size from the 157 dczid = read_cpuid(DCZID_EL0); 158 dczid_bs = 4ul << (dczid & 0xf); 159 dczid_dzp = (dczid >> 4) & 1; 160 161 curr = (u64)__tag_set(addr, tag); 162 mask = dczid_bs - 1; 163 /* STG/STZG up to the end of the first 164 end1 = curr | mask; 165 end3 = curr + size; 166 /* DC GVA / GZVA in [end1, end2) */ 167 end2 = end3 & ~mask; 168 169 /* 170 * The following code uses STG on the 171 * start address is aligned - it appea 172 * check + conditional branch. Also, i 173 * GVA blocks, the first two loops can 174 * branch each. 175 */ 176 #define SET_MEMTAG_RANGE(stg_post, dc_gva) 177 do { 178 if (!dczid_dzp && size >= 2 * 179 do { 180 curr = stg_pos 181 } while (curr < end1); 182 183 do { 184 dc_gva(curr); 185 curr += dczid_ 186 } while (curr < end2); 187 } 188 189 while (curr < end3) 190 curr = stg_post(curr); 191 } while (0) 192 193 if (init) 194 SET_MEMTAG_RANGE(__stzg_post, 195 else 196 SET_MEMTAG_RANGE(__stg_post, _ 197 #undef SET_MEMTAG_RANGE 198 } 199 200 void mte_enable_kernel_sync(void); 201 void mte_enable_kernel_async(void); 202 void mte_enable_kernel_asymm(void); 203 204 #else /* CONFIG_ARM64_MTE */ 205 206 static inline void mte_disable_tco(void) 207 { 208 } 209 210 static inline void mte_enable_tco(void) 211 { 212 } 213 214 static inline void __mte_disable_tco_async(voi 215 { 216 } 217 218 static inline void __mte_enable_tco_async(void 219 { 220 } 221 222 static inline u8 mte_get_ptr_tag(void *ptr) 223 { 224 return 0xFF; 225 } 226 227 static inline u8 mte_get_mem_tag(void *addr) 228 { 229 return 0xFF; 230 } 231 232 static inline u8 mte_get_random_tag(void) 233 { 234 return 0xFF; 235 } 236 237 static inline void mte_set_mem_tag_range(void 238 239 { 240 } 241 242 static inline void mte_enable_kernel_sync(void 243 { 244 } 245 246 static inline void mte_enable_kernel_async(voi 247 { 248 } 249 250 static inline void mte_enable_kernel_asymm(voi 251 { 252 } 253 254 #endif /* CONFIG_ARM64_MTE */ 255 256 #endif /* __ASSEMBLY__ */ 257 258 #endif /* __ASM_MTE_KASAN_H */ 259
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.