1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2023 Loongson Technology Corporation Limited 4 */ 5 #define pr_fmt(fmt) "kasan: " fmt 6 #include <linux/kasan.h> 7 #include <linux/memblock.h> 8 #include <linux/sched/task.h> 9 10 #include <asm/tlbflush.h> 11 #include <asm/pgalloc.h> 12 #include <asm-generic/sections.h> 13 14 static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); 15 16 #ifdef __PAGETABLE_PUD_FOLDED 17 #define __p4d_none(early, p4d) (0) 18 #else 19 #define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \ 20 (__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud))) 21 #endif 22 23 #ifdef __PAGETABLE_PMD_FOLDED 24 #define __pud_none(early, pud) (0) 25 #else 26 #define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \ 27 (__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd))) 28 #endif 29 30 #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \ 31 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte))) 32 33 #define __pte_none(early, pte) (early ? pte_none(pte) : \ 34 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page))) 35 36 bool kasan_early_stage = true; 37 38 void *kasan_mem_to_shadow(const void *addr) 39 { 40 if (!kasan_arch_is_ready()) { 41 return (void *)(kasan_early_shadow_page); 42 } else { 43 unsigned long maddr = (unsigned long)addr; 44 unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff; 45 unsigned long offset = 0; 46 47 if (maddr >= FIXADDR_START) 48 return (void *)(kasan_early_shadow_page); 49 50 maddr &= XRANGE_SHADOW_MASK; 51 switch (xrange) { 52 case XKPRANGE_CC_SEG: 53 offset = XKPRANGE_CC_SHADOW_OFFSET; 54 break; 55 case XKPRANGE_UC_SEG: 56 offset = XKPRANGE_UC_SHADOW_OFFSET; 57 break; 58 case XKVRANGE_VC_SEG: 59 offset = XKVRANGE_VC_SHADOW_OFFSET; 60 break; 61 default: 62 WARN_ON(1); 63 return NULL; 64 } 65 66 return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset); 67 } 68 } 69 70 const void *kasan_shadow_to_mem(const void *shadow_addr) 71 { 72 unsigned long addr = (unsigned long)shadow_addr; 73 74 if (unlikely(addr > KASAN_SHADOW_END) || 75 unlikely(addr < KASAN_SHADOW_START)) { 76 WARN_ON(1); 77 return NULL; 78 } 79 80 if (addr >= XKVRANGE_VC_SHADOW_OFFSET) 81 return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START); 82 else if (addr >= XKPRANGE_UC_SHADOW_OFFSET) 83 return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START); 84 else if (addr >= XKPRANGE_CC_SHADOW_OFFSET) 85 return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START); 86 else { 87 WARN_ON(1); 88 return NULL; 89 } 90 } 91 92 /* 93 * Alloc memory for shadow memory page table. 94 */ 95 static phys_addr_t __init kasan_alloc_zeroed_page(int node) 96 { 97 void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, 98 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); 99 if (!p) 100 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", 101 __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS)); 102 103 return __pa(p); 104 } 105 106 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early) 107 { 108 if (__pmd_none(early, pmdp_get(pmdp))) { 109 phys_addr_t pte_phys = early ? 110 __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node); 111 if (!early) 112 memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte)); 113 pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys)); 114 } 115 116 return pte_offset_kernel(pmdp, addr); 117 } 118 119 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early) 120 { 121 if (__pud_none(early, pudp_get(pudp))) { 122 phys_addr_t pmd_phys = early ? 123 __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node); 124 if (!early) 125 memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd)); 126 pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys)); 127 } 128 129 return pmd_offset(pudp, addr); 130 } 131 132 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early) 133 { 134 if (__p4d_none(early, p4dp_get(p4dp))) { 135 phys_addr_t pud_phys = early ? 136 __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node); 137 if (!early) 138 memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud)); 139 p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys)); 140 } 141 142 return pud_offset(p4dp, addr); 143 } 144 145 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, 146 unsigned long end, int node, bool early) 147 { 148 unsigned long next; 149 pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early); 150 151 do { 152 phys_addr_t page_phys = early ? 153 __pa_symbol(kasan_early_shadow_page) 154 : kasan_alloc_zeroed_page(node); 155 next = addr + PAGE_SIZE; 156 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); 157 } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep))); 158 } 159 160 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, 161 unsigned long end, int node, bool early) 162 { 163 unsigned long next; 164 pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early); 165 166 do { 167 next = pmd_addr_end(addr, end); 168 kasan_pte_populate(pmdp, addr, next, node, early); 169 } while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp))); 170 } 171 172 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, 173 unsigned long end, int node, bool early) 174 { 175 unsigned long next; 176 pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early); 177 178 do { 179 next = pud_addr_end(addr, end); 180 kasan_pmd_populate(pudp, addr, next, node, early); 181 } while (pudp++, addr = next, addr != end); 182 } 183 184 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr, 185 unsigned long end, int node, bool early) 186 { 187 unsigned long next; 188 p4d_t *p4dp = p4d_offset(pgdp, addr); 189 190 do { 191 next = p4d_addr_end(addr, end); 192 kasan_pud_populate(p4dp, addr, next, node, early); 193 } while (p4dp++, addr = next, addr != end); 194 } 195 196 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, 197 int node, bool early) 198 { 199 unsigned long next; 200 pgd_t *pgdp; 201 202 pgdp = pgd_offset_k(addr); 203 204 do { 205 next = pgd_addr_end(addr, end); 206 kasan_p4d_populate(pgdp, addr, next, node, early); 207 } while (pgdp++, addr = next, addr != end); 208 209 } 210 211 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */ 212 static void __init kasan_map_populate(unsigned long start, unsigned long end, 213 int node) 214 { 215 kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false); 216 } 217 218 asmlinkage void __init kasan_early_init(void) 219 { 220 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); 221 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); 222 } 223 224 static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval) 225 { 226 WRITE_ONCE(*pgdp, pgdval); 227 } 228 229 static void __init clear_pgds(unsigned long start, unsigned long end) 230 { 231 /* 232 * Remove references to kasan page tables from 233 * swapper_pg_dir. pgd_clear() can't be used 234 * here because it's nop on 2,3-level pagetable setups 235 */ 236 for (; start < end; start += PGDIR_SIZE) 237 kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0)); 238 } 239 240 void __init kasan_init(void) 241 { 242 u64 i; 243 phys_addr_t pa_start, pa_end; 244 245 /* 246 * PGD was populated as invalid_pmd_table or invalid_pud_table 247 * in pagetable_init() which depends on how many levels of page 248 * table you are using, but we had to clean the gpd of kasan 249 * shadow memory, as the pgd value is none-zero. 250 * The assertion pgd_none is going to be false and the formal populate 251 * afterwards is not going to create any new pgd at all. 252 */ 253 memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir)); 254 csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH); 255 local_flush_tlb_all(); 256 257 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 258 259 /* Maps everything to a single page of zeroes */ 260 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true); 261 262 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), 263 kasan_mem_to_shadow((void *)KFENCE_AREA_END)); 264 265 kasan_early_stage = false; 266 267 /* Populate the linear mapping */ 268 for_each_mem_range(i, &pa_start, &pa_end) { 269 void *start = (void *)phys_to_virt(pa_start); 270 void *end = (void *)phys_to_virt(pa_end); 271 272 if (start >= end) 273 break; 274 275 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), 276 (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE); 277 } 278 279 /* Populate modules mapping */ 280 kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR), 281 (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE); 282 /* 283 * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we 284 * should make sure that it maps the zero page read-only. 285 */ 286 for (i = 0; i < PTRS_PER_PTE; i++) 287 set_pte(&kasan_early_shadow_pte[i], 288 pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO)); 289 290 memset(kasan_early_shadow_page, 0, PAGE_SIZE); 291 csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH); 292 local_flush_tlb_all(); 293 294 /* At this point kasan is fully initialized. Enable error messages */ 295 init_task.kasan_depth = 0; 296 pr_info("KernelAddressSanitizer initialized.\n"); 297 } 298
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.