1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KASAN_H 2 #ifndef _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 4 4 5 #include <linux/bug.h> << 6 #include <linux/kasan-enabled.h> << 7 #include <linux/kasan-tags.h> << 8 #include <linux/kernel.h> << 9 #include <linux/static_key.h> << 10 #include <linux/types.h> 5 #include <linux/types.h> 11 6 12 struct kmem_cache; 7 struct kmem_cache; 13 struct page; 8 struct page; 14 struct slab; << 15 struct vm_struct; 9 struct vm_struct; 16 struct task_struct; 10 struct task_struct; 17 11 18 #ifdef CONFIG_KASAN 12 #ifdef CONFIG_KASAN 19 13 20 #include <linux/linkage.h> << 21 #include <asm/kasan.h> 14 #include <asm/kasan.h> 22 !! 15 #include <asm/pgtable.h> 23 #endif << 24 << 25 typedef unsigned int __bitwise kasan_vmalloc_f << 26 << 27 #define KASAN_VMALLOC_NONE ((__fo << 28 #define KASAN_VMALLOC_INIT ((__fo << 29 #define KASAN_VMALLOC_VM_ALLOC ((__fo << 30 #define KASAN_VMALLOC_PROT_NORMAL ((__fo << 31 << 32 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 33 << 34 #include <linux/pgtable.h> << 35 << 36 /* Software KASAN implementations use shadow m << 37 << 38 #ifdef CONFIG_KASAN_SW_TAGS << 39 /* This matches KASAN_TAG_INVALID. */ << 40 #define KASAN_SHADOW_INIT 0xFE << 41 #else << 42 #define KASAN_SHADOW_INIT 0 << 43 #endif << 44 << 45 #ifndef PTE_HWTABLE_PTRS << 46 #define PTE_HWTABLE_PTRS 0 << 47 #endif << 48 16 49 extern unsigned char kasan_early_shadow_page[P 17 extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_P !! 18 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; 51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_P !! 19 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; 52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_P !! 20 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; 53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_P 21 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; 54 22 55 int kasan_populate_early_shadow(const void *sh 23 int kasan_populate_early_shadow(const void *shadow_start, 56 const void *sh 24 const void *shadow_end); 57 25 58 #ifndef kasan_mem_to_shadow << 59 static inline void *kasan_mem_to_shadow(const 26 static inline void *kasan_mem_to_shadow(const void *addr) 60 { 27 { 61 return (void *)((unsigned long)addr >> 28 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 62 + KASAN_SHADOW_OFFSET; 29 + KASAN_SHADOW_OFFSET; 63 } 30 } 64 #endif << 65 << 66 int kasan_add_zero_shadow(void *start, unsigne << 67 void kasan_remove_zero_shadow(void *start, uns << 68 31 69 /* Enable reporting bugs after kasan_disable_c 32 /* Enable reporting bugs after kasan_disable_current() */ 70 extern void kasan_enable_current(void); 33 extern void kasan_enable_current(void); 71 34 72 /* Disable reporting bugs for current task */ 35 /* Disable reporting bugs for current task */ 73 extern void kasan_disable_current(void); 36 extern void kasan_disable_current(void); 74 37 75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ !! 38 void kasan_unpoison_shadow(const void *address, size_t size); 76 39 77 static inline int kasan_add_zero_shadow(void * !! 40 void kasan_unpoison_task_stack(struct task_struct *task); 78 { !! 41 void kasan_unpoison_stack_above_sp_to(const void *watermark); 79 return 0; << 80 } << 81 static inline void kasan_remove_zero_shadow(vo << 82 unsign << 83 {} << 84 << 85 static inline void kasan_enable_current(void) << 86 static inline void kasan_disable_current(void) << 87 << 88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN << 89 << 90 #ifdef CONFIG_KASAN_HW_TAGS << 91 << 92 #else /* CONFIG_KASAN_HW_TAGS */ << 93 << 94 #endif /* CONFIG_KASAN_HW_TAGS */ << 95 << 96 static inline bool kasan_has_integrated_init(v << 97 { << 98 return kasan_hw_tags_enabled(); << 99 } << 100 << 101 #ifdef CONFIG_KASAN << 102 void __kasan_unpoison_range(const void *addr, << 103 static __always_inline void kasan_unpoison_ran << 104 { << 105 if (kasan_enabled()) << 106 __kasan_unpoison_range(addr, s << 107 } << 108 << 109 void __kasan_poison_pages(struct page *page, u << 110 static __always_inline void kasan_poison_pages << 111 << 112 { << 113 if (kasan_enabled()) << 114 __kasan_poison_pages(page, ord << 115 } << 116 42 117 bool __kasan_unpoison_pages(struct page *page, !! 43 void kasan_alloc_pages(struct page *page, unsigned int order); 118 static __always_inline bool kasan_unpoison_pag !! 44 void kasan_free_pages(struct page *page, unsigned int order); 119 << 120 { << 121 if (kasan_enabled()) << 122 return __kasan_unpoison_pages( << 123 return false; << 124 } << 125 45 126 void __kasan_poison_slab(struct slab *slab); !! 46 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 127 static __always_inline void kasan_poison_slab( !! 47 slab_flags_t *flags); 128 { << 129 if (kasan_enabled()) << 130 __kasan_poison_slab(slab); << 131 } << 132 << 133 void __kasan_unpoison_new_object(struct kmem_c << 134 /** << 135 * kasan_unpoison_new_object - Temporarily unp << 136 * @cache: Cache the object belong to. << 137 * @object: Pointer to the object. << 138 * << 139 * This function is intended for the slab allo << 140 * temporarily unpoisons an object from a newl << 141 * anything else. The object must later be rep << 142 * kasan_poison_new_object(). << 143 */ << 144 static __always_inline void kasan_unpoison_new << 145 << 146 { << 147 if (kasan_enabled()) << 148 __kasan_unpoison_new_object(ca << 149 } << 150 48 151 void __kasan_poison_new_object(struct kmem_cac !! 49 void kasan_poison_slab(struct page *page); 152 /** !! 50 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 153 * kasan_unpoison_new_object - Repoison a new !! 51 void kasan_poison_object_data(struct kmem_cache *cache, void *object); 154 * @cache: Cache the object belong to. !! 52 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, 155 * @object: Pointer to the object. !! 53 const void *object); 156 * !! 54 157 * This function is intended for the slab allo !! 55 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, 158 * repoisons an object that was previously unp !! 56 gfp_t flags); 159 * kasan_unpoison_new_object() without doing a !! 57 void kasan_kfree_large(void *ptr, unsigned long ip); 160 */ !! 58 void kasan_poison_kfree(void *ptr, unsigned long ip); 161 static __always_inline void kasan_poison_new_o !! 59 void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, 162 !! 60 size_t size, gfp_t flags); 163 { !! 61 void * __must_check kasan_krealloc(const void *object, size_t new_size, 164 if (kasan_enabled()) !! 62 gfp_t flags); 165 __kasan_poison_new_object(cach !! 63 166 } !! 64 void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object, >> 65 gfp_t flags); >> 66 bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); 167 67 168 void * __must_check __kasan_init_slab_obj(stru !! 68 struct kasan_cache { 169 cons !! 69 int alloc_meta_offset; 170 static __always_inline void * __must_check kas !! 70 int free_meta_offset; 171 struct kmem_ca !! 71 }; 172 { << 173 if (kasan_enabled()) << 174 return __kasan_init_slab_obj(c << 175 return (void *)object; << 176 } << 177 72 178 bool __kasan_slab_pre_free(struct kmem_cache * !! 73 /* 179 unsigned long ip); !! 74 * These functions provide a special case to support backing module 180 /** !! 75 * allocations with real shadow memory. With KASAN vmalloc, the special 181 * kasan_slab_pre_free - Check whether freeing !! 76 * case is unnecessary, as the work is handled in the generic case. 182 * @object: Object to be freed. !! 77 */ 183 * !! 78 #ifndef CONFIG_KASAN_VMALLOC 184 * This function checks whether freeing the gi !! 79 int kasan_module_alloc(void *addr, size_t size); 185 * check for double-free and invalid-free bugs !! 80 void kasan_free_shadow(const struct vm_struct *vm); 186 * !! 81 #else 187 * This function is intended only for use by t !! 82 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 188 * !! 83 static inline void kasan_free_shadow(const struct vm_struct *vm) {} 189 * @Return true if freeing the object is unsaf !! 84 #endif 190 */ << 191 static __always_inline bool kasan_slab_pre_fre << 192 << 193 { << 194 if (kasan_enabled()) << 195 return __kasan_slab_pre_free(s << 196 return false; << 197 } << 198 85 199 bool __kasan_slab_free(struct kmem_cache *s, v !! 86 int kasan_add_zero_shadow(void *start, unsigned long size); 200 bool still_accessible); !! 87 void kasan_remove_zero_shadow(void *start, unsigned long size); 201 /** << 202 * kasan_slab_free - Poison, initialize, and q << 203 * @object: Object to be freed. << 204 * @init: Whether to initialize the object. << 205 * @still_accessible: Whether the object conte << 206 * << 207 * This function informs that a slab object ha << 208 * supposed to be accessed anymore, except whe << 209 * (indicating that the object is in a SLAB_TY << 210 * grace period might not have passed yet). << 211 * << 212 * For KASAN modes that have integrated memory << 213 * (kasan_has_integrated_init() == true), this << 214 * the object's memory. For other modes, the @ << 215 * << 216 * This function might also take ownership of << 217 * When this happens, KASAN will defer freeing << 218 * stage and handle it internally until then. << 219 * whether KASAN took ownership of the object. << 220 * << 221 * This function is intended only for use by t << 222 * << 223 * @Return true if KASAN took ownership of the << 224 */ << 225 static __always_inline bool kasan_slab_free(st << 226 << 227 << 228 { << 229 if (kasan_enabled()) << 230 return __kasan_slab_free(s, ob << 231 return false; << 232 } << 233 88 234 void __kasan_kfree_large(void *ptr, unsigned l !! 89 size_t __ksize(const void *); 235 static __always_inline void kasan_kfree_large( !! 90 static inline void kasan_unpoison_slab(const void *ptr) 236 { 91 { 237 if (kasan_enabled()) !! 92 kasan_unpoison_shadow(ptr, __ksize(ptr)); 238 __kasan_kfree_large(ptr, _RET_ << 239 } 93 } >> 94 size_t kasan_metadata_size(struct kmem_cache *cache); 240 95 241 void * __must_check __kasan_slab_alloc(struct !! 96 bool kasan_save_enable_multi_shot(void); 242 void *o !! 97 void kasan_restore_multi_shot(bool enabled); 243 static __always_inline void * __must_check kas << 244 struct kmem_cache *s, void *ob << 245 { << 246 if (kasan_enabled()) << 247 return __kasan_slab_alloc(s, o << 248 return object; << 249 } << 250 98 251 void * __must_check __kasan_kmalloc(struct kme !! 99 #else /* CONFIG_KASAN */ 252 size_t siz << 253 static __always_inline void * __must_check kas << 254 const void *ob << 255 { << 256 if (kasan_enabled()) << 257 return __kasan_kmalloc(s, obje << 258 return (void *)object; << 259 } << 260 100 261 void * __must_check __kasan_kmalloc_large(cons !! 101 static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 262 size << 263 static __always_inline void * __must_check kas << 264 << 265 { << 266 if (kasan_enabled()) << 267 return __kasan_kmalloc_large(p << 268 return (void *)ptr; << 269 } << 270 << 271 void * __must_check __kasan_krealloc(const voi << 272 size_t ne << 273 static __always_inline void * __must_check kas << 274 << 275 { << 276 if (kasan_enabled()) << 277 return __kasan_krealloc(object << 278 return (void *)object; << 279 } << 280 102 281 bool __kasan_mempool_poison_pages(struct page !! 103 static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 282 unsigned lon !! 104 static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} 283 /** << 284 * kasan_mempool_poison_pages - Check and pois << 285 * @page: Pointer to the page allocation. << 286 * @order: Order of the allocation. << 287 * << 288 * This function is intended for kernel subsys << 289 * to reuse them instead of freeing them back << 290 * << 291 * This function is similar to kasan_mempool_p << 292 * page allocations. << 293 * << 294 * Before the poisoned allocation can be reuse << 295 * kasan_mempool_unpoison_pages(). << 296 * << 297 * Return: true if the allocation can be safel << 298 */ << 299 static __always_inline bool kasan_mempool_pois << 300 << 301 { << 302 if (kasan_enabled()) << 303 return __kasan_mempool_poison_ << 304 return true; << 305 } << 306 << 307 void __kasan_mempool_unpoison_pages(struct pag << 308 unsigned l << 309 /** << 310 * kasan_mempool_unpoison_pages - Unpoison a m << 311 * @page: Pointer to the page allocation. << 312 * @order: Order of the allocation. << 313 * << 314 * This function is intended for kernel subsys << 315 * to reuse them instead of freeing them back << 316 * << 317 * This function unpoisons a page allocation t << 318 * kasan_mempool_poison_pages() without zeroin << 319 * the tag-based modes, this function assigns << 320 */ << 321 static __always_inline void kasan_mempool_unpo << 322 << 323 { << 324 if (kasan_enabled()) << 325 __kasan_mempool_unpoison_pages << 326 } << 327 105 328 bool __kasan_mempool_poison_object(void *ptr, !! 106 static inline void kasan_enable_current(void) {} 329 /** !! 107 static inline void kasan_disable_current(void) {} 330 * kasan_mempool_poison_object - Check and poi << 331 * @ptr: Pointer to the slab allocation. << 332 * << 333 * This function is intended for kernel subsys << 334 * to reuse them instead of freeing them back << 335 * mempool). << 336 * << 337 * This function poisons a slab allocation and << 338 * without initializing the allocation's memor << 339 * quarantine (for the Generic mode). << 340 * << 341 * This function also performs checks to detec << 342 * bugs and reports them. The caller can use t << 343 * to find out if the allocation is buggy. << 344 * << 345 * Before the poisoned allocation can be reuse << 346 * kasan_mempool_unpoison_object(). << 347 * << 348 * This function operates on all slab allocati << 349 * allocations (the ones returned by kmalloc_l << 350 * size > KMALLOC_MAX_SIZE). << 351 * << 352 * Return: true if the allocation can be safel << 353 */ << 354 static __always_inline bool kasan_mempool_pois << 355 { << 356 if (kasan_enabled()) << 357 return __kasan_mempool_poison_ << 358 return true; << 359 } << 360 << 361 void __kasan_mempool_unpoison_object(void *ptr << 362 /** << 363 * kasan_mempool_unpoison_object - Unpoison a << 364 * @ptr: Pointer to the slab allocation. << 365 * @size: Size to be unpoisoned. << 366 * << 367 * This function is intended for kernel subsys << 368 * to reuse them instead of freeing them back << 369 * mempool). << 370 * << 371 * This function unpoisons a slab allocation t << 372 * kasan_mempool_poison_object() and saves an << 373 * initializing the allocation's memory. For t << 374 * does not assign a new tag to the allocation << 375 * original tags based on the pointer value. << 376 * << 377 * This function operates on all slab allocati << 378 * allocations (the ones returned by kmalloc_l << 379 * size > KMALLOC_MAX_SIZE). << 380 */ << 381 static __always_inline void kasan_mempool_unpo << 382 << 383 { << 384 if (kasan_enabled()) << 385 __kasan_mempool_unpoison_objec << 386 } << 387 108 388 /* !! 109 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} 389 * Unlike kasan_check_read/write(), kasan_chec !! 110 static inline void kasan_free_pages(struct page *page, unsigned int order) {} 390 * the hardware tag-based mode that doesn't re << 391 */ << 392 bool __kasan_check_byte(const void *addr, unsi << 393 static __always_inline bool kasan_check_byte(c << 394 { << 395 if (kasan_enabled()) << 396 return __kasan_check_byte(addr << 397 return true; << 398 } << 399 111 400 #else /* CONFIG_KASAN */ !! 112 static inline void kasan_cache_create(struct kmem_cache *cache, >> 113 unsigned int *size, >> 114 slab_flags_t *flags) {} 401 115 402 static inline void kasan_unpoison_range(const !! 116 static inline void kasan_poison_slab(struct page *page) {} 403 static inline void kasan_poison_pages(struct p !! 117 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 404 bool ini << 405 static inline bool kasan_unpoison_pages(struct << 406 bool i << 407 { << 408 return false; << 409 } << 410 static inline void kasan_poison_slab(struct sl << 411 static inline void kasan_unpoison_new_object(s << 412 void * 118 void *object) {} 413 static inline void kasan_poison_new_object(str !! 119 static inline void kasan_poison_object_data(struct kmem_cache *cache, 414 void * 120 void *object) {} 415 static inline void *kasan_init_slab_obj(struct 121 static inline void *kasan_init_slab_obj(struct kmem_cache *cache, 416 const void *ob 122 const void *object) 417 { 123 { 418 return (void *)object; 124 return (void *)object; 419 } 125 } 420 126 421 static inline bool kasan_slab_pre_free(struct !! 127 static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) 422 { << 423 return false; << 424 } << 425 << 426 static inline bool kasan_slab_free(struct kmem << 427 bool init, << 428 { 128 { 429 return false; !! 129 return ptr; 430 } << 431 static inline void kasan_kfree_large(void *ptr << 432 static inline void *kasan_slab_alloc(struct km << 433 gfp_t flags << 434 { << 435 return object; << 436 } 130 } >> 131 static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} >> 132 static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} 437 static inline void *kasan_kmalloc(struct kmem_ 133 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, 438 size_t size, g 134 size_t size, gfp_t flags) 439 { 135 { 440 return (void *)object; 136 return (void *)object; 441 } 137 } 442 static inline void *kasan_kmalloc_large(const << 443 { << 444 return (void *)ptr; << 445 } << 446 static inline void *kasan_krealloc(const void 138 static inline void *kasan_krealloc(const void *object, size_t new_size, 447 gfp_t flags) 139 gfp_t flags) 448 { 140 { 449 return (void *)object; 141 return (void *)object; 450 } 142 } 451 static inline bool kasan_mempool_poison_pages( !! 143 >> 144 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, >> 145 gfp_t flags) 452 { 146 { 453 return true; !! 147 return object; 454 } 148 } 455 static inline void kasan_mempool_unpoison_page !! 149 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, 456 static inline bool kasan_mempool_poison_object !! 150 unsigned long ip) 457 { 151 { 458 return true; !! 152 return false; 459 } 153 } 460 static inline void kasan_mempool_unpoison_obje << 461 154 462 static inline bool kasan_check_byte(const void !! 155 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } >> 156 static inline void kasan_free_shadow(const struct vm_struct *vm) {} >> 157 >> 158 static inline int kasan_add_zero_shadow(void *start, unsigned long size) 463 { 159 { 464 return true; !! 160 return 0; 465 } 161 } >> 162 static inline void kasan_remove_zero_shadow(void *start, >> 163 unsigned long size) >> 164 {} 466 165 467 #endif /* CONFIG_KASAN */ !! 166 static inline void kasan_unpoison_slab(const void *ptr) { } >> 167 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } 468 168 469 #if defined(CONFIG_KASAN) && defined(CONFIG_KA !! 169 #endif /* CONFIG_KASAN */ 470 void kasan_unpoison_task_stack(struct task_str << 471 asmlinkage void kasan_unpoison_task_stack_belo << 472 #else << 473 static inline void kasan_unpoison_task_stack(s << 474 static inline void kasan_unpoison_task_stack_b << 475 #endif << 476 170 477 #ifdef CONFIG_KASAN_GENERIC 171 #ifdef CONFIG_KASAN_GENERIC 478 172 479 struct kasan_cache { !! 173 #define KASAN_SHADOW_INIT 0 480 int alloc_meta_offset; << 481 int free_meta_offset; << 482 }; << 483 << 484 size_t kasan_metadata_size(struct kmem_cache * << 485 void kasan_cache_create(struct kmem_cache *cac << 486 slab_flags_t *flags); << 487 174 488 void kasan_cache_shrink(struct kmem_cache *cac 175 void kasan_cache_shrink(struct kmem_cache *cache); 489 void kasan_cache_shutdown(struct kmem_cache *c 176 void kasan_cache_shutdown(struct kmem_cache *cache); 490 void kasan_record_aux_stack(void *ptr); << 491 void kasan_record_aux_stack_noalloc(void *ptr) << 492 177 493 #else /* CONFIG_KASAN_GENERIC */ 178 #else /* CONFIG_KASAN_GENERIC */ 494 179 495 /* Tag-based KASAN modes do not use per-object << 496 static inline size_t kasan_metadata_size(struc << 497 << 498 { << 499 return 0; << 500 } << 501 /* And no cache-related metadata initializatio << 502 static inline void kasan_cache_create(struct k << 503 unsigned << 504 slab_fla << 505 << 506 static inline void kasan_cache_shrink(struct k 180 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 507 static inline void kasan_cache_shutdown(struct 181 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 508 static inline void kasan_record_aux_stack(void << 509 static inline void kasan_record_aux_stack_noal << 510 182 511 #endif /* CONFIG_KASAN_GENERIC */ 183 #endif /* CONFIG_KASAN_GENERIC */ 512 184 513 #if defined(CONFIG_KASAN_SW_TAGS) || defined(C !! 185 #ifdef CONFIG_KASAN_SW_TAGS 514 186 515 static inline void *kasan_reset_tag(const void !! 187 #define KASAN_SHADOW_INIT 0xFF 516 { << 517 return (void *)arch_kasan_reset_tag(ad << 518 } << 519 188 520 /** !! 189 void kasan_init_tags(void); 521 * kasan_report - print a report about a bad m !! 190 522 * @addr: address of the bad access !! 191 void *kasan_reset_tag(const void *addr); 523 * @size: size of the bad access !! 192 524 * @is_write: whether the bad access is a writ !! 193 bool kasan_report(unsigned long addr, size_t size, 525 * @ip: instruction pointer for the accessibil << 526 */ << 527 bool kasan_report(const void *addr, size_t siz << 528 bool is_write, unsigned long i 194 bool is_write, unsigned long ip); 529 195 530 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_ !! 196 #else /* CONFIG_KASAN_SW_TAGS */ >> 197 >> 198 static inline void kasan_init_tags(void) { } 531 199 532 static inline void *kasan_reset_tag(const void 200 static inline void *kasan_reset_tag(const void *addr) 533 { 201 { 534 return (void *)addr; 202 return (void *)addr; 535 } 203 } 536 204 537 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN !! 205 #endif /* CONFIG_KASAN_SW_TAGS */ 538 << 539 #ifdef CONFIG_KASAN_HW_TAGS << 540 << 541 void kasan_report_async(void); << 542 << 543 #endif /* CONFIG_KASAN_HW_TAGS */ << 544 << 545 #ifdef CONFIG_KASAN_SW_TAGS << 546 void __init kasan_init_sw_tags(void); << 547 #else << 548 static inline void kasan_init_sw_tags(void) { << 549 #endif << 550 << 551 #ifdef CONFIG_KASAN_HW_TAGS << 552 void kasan_init_hw_tags_cpu(void); << 553 void __init kasan_init_hw_tags(void); << 554 #else << 555 static inline void kasan_init_hw_tags_cpu(void << 556 static inline void kasan_init_hw_tags(void) { << 557 #endif << 558 206 559 #ifdef CONFIG_KASAN_VMALLOC 207 #ifdef CONFIG_KASAN_VMALLOC 560 << 561 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 562 << 563 void kasan_populate_early_vm_area_shadow(void << 564 int kasan_populate_vmalloc(unsigned long addr, 208 int kasan_populate_vmalloc(unsigned long addr, unsigned long size); >> 209 void kasan_poison_vmalloc(const void *start, unsigned long size); >> 210 void kasan_unpoison_vmalloc(const void *start, unsigned long size); 565 void kasan_release_vmalloc(unsigned long start 211 void kasan_release_vmalloc(unsigned long start, unsigned long end, 566 unsigned long free_ 212 unsigned long free_region_start, 567 unsigned long free_ 213 unsigned long free_region_end); 568 !! 214 #else 569 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ << 570 << 571 static inline void kasan_populate_early_vm_are << 572 << 573 { } << 574 static inline int kasan_populate_vmalloc(unsig 215 static inline int kasan_populate_vmalloc(unsigned long start, 575 unsign 216 unsigned long size) 576 { 217 { 577 return 0; 218 return 0; 578 } 219 } 579 static inline void kasan_release_vmalloc(unsig << 580 unsig << 581 unsig << 582 unsig << 583 << 584 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN << 585 << 586 void *__kasan_unpoison_vmalloc(const void *sta << 587 kasan_vmalloc_f << 588 static __always_inline void *kasan_unpoison_vm << 589 << 590 << 591 { << 592 if (kasan_enabled()) << 593 return __kasan_unpoison_vmallo << 594 return (void *)start; << 595 } << 596 220 597 void __kasan_poison_vmalloc(const void *start, !! 221 static inline void kasan_poison_vmalloc(const void *start, unsigned long size) 598 static __always_inline void kasan_poison_vmall !! 222 { } 599 !! 223 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) 600 { !! 224 { } 601 if (kasan_enabled()) << 602 __kasan_poison_vmalloc(start, << 603 } << 604 << 605 #else /* CONFIG_KASAN_VMALLOC */ << 606 << 607 static inline void kasan_populate_early_vm_are << 608 << 609 static inline int kasan_populate_vmalloc(unsig << 610 unsign << 611 { << 612 return 0; << 613 } << 614 static inline void kasan_release_vmalloc(unsig 225 static inline void kasan_release_vmalloc(unsigned long start, 615 unsig 226 unsigned long end, 616 unsig 227 unsigned long free_region_start, 617 unsig !! 228 unsigned long free_region_end) {} 618 !! 229 #endif 619 static inline void *kasan_unpoison_vmalloc(con << 620 uns << 621 kas << 622 { << 623 return (void *)start; << 624 } << 625 static inline void kasan_poison_vmalloc(const << 626 { } << 627 << 628 #endif /* CONFIG_KASAN_VMALLOC */ << 629 << 630 #if (defined(CONFIG_KASAN_GENERIC) || defined( << 631 !defined(CONFIG_KASAN_VMALLOC) << 632 << 633 /* << 634 * These functions allocate and free shadow me << 635 * They are only required when KASAN_VMALLOC i << 636 * shadow memory is allocated by the generic v << 637 */ << 638 int kasan_alloc_module_shadow(void *addr, size << 639 void kasan_free_module_shadow(const struct vm_ << 640 << 641 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN << 642 << 643 static inline int kasan_alloc_module_shadow(vo << 644 static inline void kasan_free_module_shadow(co << 645 << 646 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASA << 647 230 648 #if defined(CONFIG_KASAN_GENERIC) || defined(C !! 231 #ifdef CONFIG_KASAN_INLINE 649 void kasan_non_canonical_hook(unsigned long ad 232 void kasan_non_canonical_hook(unsigned long addr); 650 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ !! 233 #else /* CONFIG_KASAN_INLINE */ 651 static inline void kasan_non_canonical_hook(un 234 static inline void kasan_non_canonical_hook(unsigned long addr) { } 652 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN !! 235 #endif /* CONFIG_KASAN_INLINE */ 653 236 654 #endif /* LINUX_KASAN_H */ 237 #endif /* LINUX_KASAN_H */ 655 238
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.