1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KASAN_H 2 #ifndef _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 4 4 5 #include <linux/bug.h> << 6 #include <linux/kasan-enabled.h> << 7 #include <linux/kasan-tags.h> << 8 #include <linux/kernel.h> << 9 #include <linux/static_key.h> << 10 #include <linux/types.h> 5 #include <linux/types.h> 11 6 12 struct kmem_cache; 7 struct kmem_cache; 13 struct page; 8 struct page; 14 struct slab; << 15 struct vm_struct; 9 struct vm_struct; 16 struct task_struct; 10 struct task_struct; 17 11 18 #ifdef CONFIG_KASAN 12 #ifdef CONFIG_KASAN 19 13 20 #include <linux/linkage.h> << 21 #include <asm/kasan.h> 14 #include <asm/kasan.h> 22 !! 15 #include <asm/pgtable.h> 23 #endif << 24 << 25 typedef unsigned int __bitwise kasan_vmalloc_f << 26 << 27 #define KASAN_VMALLOC_NONE ((__fo << 28 #define KASAN_VMALLOC_INIT ((__fo << 29 #define KASAN_VMALLOC_VM_ALLOC ((__fo << 30 #define KASAN_VMALLOC_PROT_NORMAL ((__fo << 31 << 32 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 33 << 34 #include <linux/pgtable.h> << 35 << 36 /* Software KASAN implementations use shadow m << 37 << 38 #ifdef CONFIG_KASAN_SW_TAGS << 39 /* This matches KASAN_TAG_INVALID. */ << 40 #define KASAN_SHADOW_INIT 0xFE << 41 #else << 42 #define KASAN_SHADOW_INIT 0 << 43 #endif << 44 << 45 #ifndef PTE_HWTABLE_PTRS << 46 #define PTE_HWTABLE_PTRS 0 << 47 #endif << 48 16 49 extern unsigned char kasan_early_shadow_page[P 17 extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_P !! 18 extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; 51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_P !! 19 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; 52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_P !! 20 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; 53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_P 21 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; 54 22 55 int kasan_populate_early_shadow(const void *sh 23 int kasan_populate_early_shadow(const void *shadow_start, 56 const void *sh 24 const void *shadow_end); 57 25 58 #ifndef kasan_mem_to_shadow << 59 static inline void *kasan_mem_to_shadow(const 26 static inline void *kasan_mem_to_shadow(const void *addr) 60 { 27 { 61 return (void *)((unsigned long)addr >> 28 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 62 + KASAN_SHADOW_OFFSET; 29 + KASAN_SHADOW_OFFSET; 63 } 30 } 64 #endif << 65 << 66 int kasan_add_zero_shadow(void *start, unsigne << 67 void kasan_remove_zero_shadow(void *start, uns << 68 31 69 /* Enable reporting bugs after kasan_disable_c 32 /* Enable reporting bugs after kasan_disable_current() */ 70 extern void kasan_enable_current(void); 33 extern void kasan_enable_current(void); 71 34 72 /* Disable reporting bugs for current task */ 35 /* Disable reporting bugs for current task */ 73 extern void kasan_disable_current(void); 36 extern void kasan_disable_current(void); 74 37 75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ !! 38 void kasan_unpoison_shadow(const void *address, size_t size); 76 << 77 static inline int kasan_add_zero_shadow(void * << 78 { << 79 return 0; << 80 } << 81 static inline void kasan_remove_zero_shadow(vo << 82 unsign << 83 {} << 84 << 85 static inline void kasan_enable_current(void) << 86 static inline void kasan_disable_current(void) << 87 << 88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN << 89 << 90 #ifdef CONFIG_KASAN_HW_TAGS << 91 << 92 #else /* CONFIG_KASAN_HW_TAGS */ << 93 << 94 #endif /* CONFIG_KASAN_HW_TAGS */ << 95 << 96 static inline bool kasan_has_integrated_init(v << 97 { << 98 return kasan_hw_tags_enabled(); << 99 } << 100 << 101 #ifdef CONFIG_KASAN << 102 void __kasan_unpoison_range(const void *addr, << 103 static __always_inline void kasan_unpoison_ran << 104 { << 105 if (kasan_enabled()) << 106 __kasan_unpoison_range(addr, s << 107 } << 108 << 109 void __kasan_poison_pages(struct page *page, u << 110 static __always_inline void kasan_poison_pages << 111 << 112 { << 113 if (kasan_enabled()) << 114 __kasan_poison_pages(page, ord << 115 } << 116 << 117 bool __kasan_unpoison_pages(struct page *page, << 118 static __always_inline bool kasan_unpoison_pag << 119 << 120 { << 121 if (kasan_enabled()) << 122 return __kasan_unpoison_pages( << 123 return false; << 124 } << 125 << 126 void __kasan_poison_slab(struct slab *slab); << 127 static __always_inline void kasan_poison_slab( << 128 { << 129 if (kasan_enabled()) << 130 __kasan_poison_slab(slab); << 131 } << 132 << 133 void __kasan_unpoison_new_object(struct kmem_c << 134 /** << 135 * kasan_unpoison_new_object - Temporarily unp << 136 * @cache: Cache the object belong to. << 137 * @object: Pointer to the object. << 138 * << 139 * This function is intended for the slab allo << 140 * temporarily unpoisons an object from a newl << 141 * anything else. The object must later be rep << 142 * kasan_poison_new_object(). << 143 */ << 144 static __always_inline void kasan_unpoison_new << 145 << 146 { << 147 if (kasan_enabled()) << 148 __kasan_unpoison_new_object(ca << 149 } << 150 39 151 void __kasan_poison_new_object(struct kmem_cac !! 40 void kasan_unpoison_task_stack(struct task_struct *task); 152 /** !! 41 void kasan_unpoison_stack_above_sp_to(const void *watermark); 153 * kasan_unpoison_new_object - Repoison a new << 154 * @cache: Cache the object belong to. << 155 * @object: Pointer to the object. << 156 * << 157 * This function is intended for the slab allo << 158 * repoisons an object that was previously unp << 159 * kasan_unpoison_new_object() without doing a << 160 */ << 161 static __always_inline void kasan_poison_new_o << 162 << 163 { << 164 if (kasan_enabled()) << 165 __kasan_poison_new_object(cach << 166 } << 167 42 168 void * __must_check __kasan_init_slab_obj(stru !! 43 void kasan_alloc_pages(struct page *page, unsigned int order); 169 cons !! 44 void kasan_free_pages(struct page *page, unsigned int order); 170 static __always_inline void * __must_check kas << 171 struct kmem_ca << 172 { << 173 if (kasan_enabled()) << 174 return __kasan_init_slab_obj(c << 175 return (void *)object; << 176 } << 177 45 178 bool __kasan_slab_pre_free(struct kmem_cache * !! 46 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 179 unsigned long ip); !! 47 slab_flags_t *flags); 180 /** << 181 * kasan_slab_pre_free - Check whether freeing << 182 * @object: Object to be freed. << 183 * << 184 * This function checks whether freeing the gi << 185 * check for double-free and invalid-free bugs << 186 * << 187 * This function is intended only for use by t << 188 * << 189 * @Return true if freeing the object is unsaf << 190 */ << 191 static __always_inline bool kasan_slab_pre_fre << 192 << 193 { << 194 if (kasan_enabled()) << 195 return __kasan_slab_pre_free(s << 196 return false; << 197 } << 198 48 199 bool __kasan_slab_free(struct kmem_cache *s, v !! 49 void kasan_poison_slab(struct page *page); 200 bool still_accessible); !! 50 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 201 /** !! 51 void kasan_poison_object_data(struct kmem_cache *cache, void *object); 202 * kasan_slab_free - Poison, initialize, and q !! 52 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, 203 * @object: Object to be freed. !! 53 const void *object); 204 * @init: Whether to initialize the object. !! 54 205 * @still_accessible: Whether the object conte !! 55 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, 206 * !! 56 gfp_t flags); 207 * This function informs that a slab object ha !! 57 void kasan_kfree_large(void *ptr, unsigned long ip); 208 * supposed to be accessed anymore, except whe !! 58 void kasan_poison_kfree(void *ptr, unsigned long ip); 209 * (indicating that the object is in a SLAB_TY !! 59 void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, 210 * grace period might not have passed yet). !! 60 size_t size, gfp_t flags); 211 * !! 61 void * __must_check kasan_krealloc(const void *object, size_t new_size, 212 * For KASAN modes that have integrated memory !! 62 gfp_t flags); 213 * (kasan_has_integrated_init() == true), this !! 63 214 * the object's memory. For other modes, the @ !! 64 void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object, 215 * !! 65 gfp_t flags); 216 * This function might also take ownership of !! 66 bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); 217 * When this happens, KASAN will defer freeing << 218 * stage and handle it internally until then. << 219 * whether KASAN took ownership of the object. << 220 * << 221 * This function is intended only for use by t << 222 * << 223 * @Return true if KASAN took ownership of the << 224 */ << 225 static __always_inline bool kasan_slab_free(st << 226 << 227 << 228 { << 229 if (kasan_enabled()) << 230 return __kasan_slab_free(s, ob << 231 return false; << 232 } << 233 67 234 void __kasan_kfree_large(void *ptr, unsigned l !! 68 struct kasan_cache { 235 static __always_inline void kasan_kfree_large( !! 69 int alloc_meta_offset; 236 { !! 70 int free_meta_offset; 237 if (kasan_enabled()) !! 71 }; 238 __kasan_kfree_large(ptr, _RET_ << 239 } << 240 72 241 void * __must_check __kasan_slab_alloc(struct !! 73 int kasan_module_alloc(void *addr, size_t size); 242 void *o !! 74 void kasan_free_shadow(const struct vm_struct *vm); 243 static __always_inline void * __must_check kas << 244 struct kmem_cache *s, void *ob << 245 { << 246 if (kasan_enabled()) << 247 return __kasan_slab_alloc(s, o << 248 return object; << 249 } << 250 75 251 void * __must_check __kasan_kmalloc(struct kme !! 76 int kasan_add_zero_shadow(void *start, unsigned long size); 252 size_t siz !! 77 void kasan_remove_zero_shadow(void *start, unsigned long size); 253 static __always_inline void * __must_check kas << 254 const void *ob << 255 { << 256 if (kasan_enabled()) << 257 return __kasan_kmalloc(s, obje << 258 return (void *)object; << 259 } << 260 78 261 void * __must_check __kasan_kmalloc_large(cons !! 79 size_t ksize(const void *); 262 size !! 80 static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } 263 static __always_inline void * __must_check kas !! 81 size_t kasan_metadata_size(struct kmem_cache *cache); 264 << 265 { << 266 if (kasan_enabled()) << 267 return __kasan_kmalloc_large(p << 268 return (void *)ptr; << 269 } << 270 82 271 void * __must_check __kasan_krealloc(const voi !! 83 bool kasan_save_enable_multi_shot(void); 272 size_t ne !! 84 void kasan_restore_multi_shot(bool enabled); 273 static __always_inline void * __must_check kas << 274 << 275 { << 276 if (kasan_enabled()) << 277 return __kasan_krealloc(object << 278 return (void *)object; << 279 } << 280 85 281 bool __kasan_mempool_poison_pages(struct page !! 86 #else /* CONFIG_KASAN */ 282 unsigned lon << 283 /** << 284 * kasan_mempool_poison_pages - Check and pois << 285 * @page: Pointer to the page allocation. << 286 * @order: Order of the allocation. << 287 * << 288 * This function is intended for kernel subsys << 289 * to reuse them instead of freeing them back << 290 * << 291 * This function is similar to kasan_mempool_p << 292 * page allocations. << 293 * << 294 * Before the poisoned allocation can be reuse << 295 * kasan_mempool_unpoison_pages(). << 296 * << 297 * Return: true if the allocation can be safel << 298 */ << 299 static __always_inline bool kasan_mempool_pois << 300 << 301 { << 302 if (kasan_enabled()) << 303 return __kasan_mempool_poison_ << 304 return true; << 305 } << 306 87 307 void __kasan_mempool_unpoison_pages(struct pag !! 88 static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 308 unsigned l << 309 /** << 310 * kasan_mempool_unpoison_pages - Unpoison a m << 311 * @page: Pointer to the page allocation. << 312 * @order: Order of the allocation. << 313 * << 314 * This function is intended for kernel subsys << 315 * to reuse them instead of freeing them back << 316 * << 317 * This function unpoisons a page allocation t << 318 * kasan_mempool_poison_pages() without zeroin << 319 * the tag-based modes, this function assigns << 320 */ << 321 static __always_inline void kasan_mempool_unpo << 322 << 323 { << 324 if (kasan_enabled()) << 325 __kasan_mempool_unpoison_pages << 326 } << 327 89 328 bool __kasan_mempool_poison_object(void *ptr, !! 90 static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 329 /** !! 91 static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} 330 * kasan_mempool_poison_object - Check and poi << 331 * @ptr: Pointer to the slab allocation. << 332 * << 333 * This function is intended for kernel subsys << 334 * to reuse them instead of freeing them back << 335 * mempool). << 336 * << 337 * This function poisons a slab allocation and << 338 * without initializing the allocation's memor << 339 * quarantine (for the Generic mode). << 340 * << 341 * This function also performs checks to detec << 342 * bugs and reports them. The caller can use t << 343 * to find out if the allocation is buggy. << 344 * << 345 * Before the poisoned allocation can be reuse << 346 * kasan_mempool_unpoison_object(). << 347 * << 348 * This function operates on all slab allocati << 349 * allocations (the ones returned by kmalloc_l << 350 * size > KMALLOC_MAX_SIZE). << 351 * << 352 * Return: true if the allocation can be safel << 353 */ << 354 static __always_inline bool kasan_mempool_pois << 355 { << 356 if (kasan_enabled()) << 357 return __kasan_mempool_poison_ << 358 return true; << 359 } << 360 92 361 void __kasan_mempool_unpoison_object(void *ptr !! 93 static inline void kasan_enable_current(void) {} 362 /** !! 94 static inline void kasan_disable_current(void) {} 363 * kasan_mempool_unpoison_object - Unpoison a << 364 * @ptr: Pointer to the slab allocation. << 365 * @size: Size to be unpoisoned. << 366 * << 367 * This function is intended for kernel subsys << 368 * to reuse them instead of freeing them back << 369 * mempool). << 370 * << 371 * This function unpoisons a slab allocation t << 372 * kasan_mempool_poison_object() and saves an << 373 * initializing the allocation's memory. For t << 374 * does not assign a new tag to the allocation << 375 * original tags based on the pointer value. << 376 * << 377 * This function operates on all slab allocati << 378 * allocations (the ones returned by kmalloc_l << 379 * size > KMALLOC_MAX_SIZE). << 380 */ << 381 static __always_inline void kasan_mempool_unpo << 382 << 383 { << 384 if (kasan_enabled()) << 385 __kasan_mempool_unpoison_objec << 386 } << 387 95 388 /* !! 96 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} 389 * Unlike kasan_check_read/write(), kasan_chec !! 97 static inline void kasan_free_pages(struct page *page, unsigned int order) {} 390 * the hardware tag-based mode that doesn't re << 391 */ << 392 bool __kasan_check_byte(const void *addr, unsi << 393 static __always_inline bool kasan_check_byte(c << 394 { << 395 if (kasan_enabled()) << 396 return __kasan_check_byte(addr << 397 return true; << 398 } << 399 98 400 #else /* CONFIG_KASAN */ !! 99 static inline void kasan_cache_create(struct kmem_cache *cache, >> 100 unsigned int *size, >> 101 slab_flags_t *flags) {} 401 102 402 static inline void kasan_unpoison_range(const !! 103 static inline void kasan_poison_slab(struct page *page) {} 403 static inline void kasan_poison_pages(struct p !! 104 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 404 bool ini << 405 static inline bool kasan_unpoison_pages(struct << 406 bool i << 407 { << 408 return false; << 409 } << 410 static inline void kasan_poison_slab(struct sl << 411 static inline void kasan_unpoison_new_object(s << 412 void * 105 void *object) {} 413 static inline void kasan_poison_new_object(str !! 106 static inline void kasan_poison_object_data(struct kmem_cache *cache, 414 void * 107 void *object) {} 415 static inline void *kasan_init_slab_obj(struct 108 static inline void *kasan_init_slab_obj(struct kmem_cache *cache, 416 const void *ob 109 const void *object) 417 { 110 { 418 return (void *)object; 111 return (void *)object; 419 } 112 } 420 113 421 static inline bool kasan_slab_pre_free(struct !! 114 static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) 422 { << 423 return false; << 424 } << 425 << 426 static inline bool kasan_slab_free(struct kmem << 427 bool init, << 428 { << 429 return false; << 430 } << 431 static inline void kasan_kfree_large(void *ptr << 432 static inline void *kasan_slab_alloc(struct km << 433 gfp_t flags << 434 { 115 { 435 return object; !! 116 return ptr; 436 } 117 } >> 118 static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} >> 119 static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} 437 static inline void *kasan_kmalloc(struct kmem_ 120 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, 438 size_t size, g 121 size_t size, gfp_t flags) 439 { 122 { 440 return (void *)object; 123 return (void *)object; 441 } 124 } 442 static inline void *kasan_kmalloc_large(const << 443 { << 444 return (void *)ptr; << 445 } << 446 static inline void *kasan_krealloc(const void 125 static inline void *kasan_krealloc(const void *object, size_t new_size, 447 gfp_t flags) 126 gfp_t flags) 448 { 127 { 449 return (void *)object; 128 return (void *)object; 450 } 129 } 451 static inline bool kasan_mempool_poison_pages( !! 130 >> 131 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, >> 132 gfp_t flags) 452 { 133 { 453 return true; !! 134 return object; 454 } 135 } 455 static inline void kasan_mempool_unpoison_page !! 136 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, 456 static inline bool kasan_mempool_poison_object !! 137 unsigned long ip) 457 { 138 { 458 return true; !! 139 return false; 459 } 140 } 460 static inline void kasan_mempool_unpoison_obje << 461 141 462 static inline bool kasan_check_byte(const void !! 142 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } >> 143 static inline void kasan_free_shadow(const struct vm_struct *vm) {} >> 144 >> 145 static inline int kasan_add_zero_shadow(void *start, unsigned long size) 463 { 146 { 464 return true; !! 147 return 0; 465 } 148 } >> 149 static inline void kasan_remove_zero_shadow(void *start, >> 150 unsigned long size) >> 151 {} 466 152 467 #endif /* CONFIG_KASAN */ !! 153 static inline void kasan_unpoison_slab(const void *ptr) { } >> 154 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } 468 155 469 #if defined(CONFIG_KASAN) && defined(CONFIG_KA !! 156 #endif /* CONFIG_KASAN */ 470 void kasan_unpoison_task_stack(struct task_str << 471 asmlinkage void kasan_unpoison_task_stack_belo << 472 #else << 473 static inline void kasan_unpoison_task_stack(s << 474 static inline void kasan_unpoison_task_stack_b << 475 #endif << 476 157 477 #ifdef CONFIG_KASAN_GENERIC 158 #ifdef CONFIG_KASAN_GENERIC 478 159 479 struct kasan_cache { !! 160 #define KASAN_SHADOW_INIT 0 480 int alloc_meta_offset; << 481 int free_meta_offset; << 482 }; << 483 << 484 size_t kasan_metadata_size(struct kmem_cache * << 485 void kasan_cache_create(struct kmem_cache *cac << 486 slab_flags_t *flags); << 487 161 488 void kasan_cache_shrink(struct kmem_cache *cac 162 void kasan_cache_shrink(struct kmem_cache *cache); 489 void kasan_cache_shutdown(struct kmem_cache *c 163 void kasan_cache_shutdown(struct kmem_cache *cache); 490 void kasan_record_aux_stack(void *ptr); << 491 void kasan_record_aux_stack_noalloc(void *ptr) << 492 164 493 #else /* CONFIG_KASAN_GENERIC */ 165 #else /* CONFIG_KASAN_GENERIC */ 494 166 495 /* Tag-based KASAN modes do not use per-object << 496 static inline size_t kasan_metadata_size(struc << 497 << 498 { << 499 return 0; << 500 } << 501 /* And no cache-related metadata initializatio << 502 static inline void kasan_cache_create(struct k << 503 unsigned << 504 slab_fla << 505 << 506 static inline void kasan_cache_shrink(struct k 167 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 507 static inline void kasan_cache_shutdown(struct 168 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 508 static inline void kasan_record_aux_stack(void << 509 static inline void kasan_record_aux_stack_noal << 510 169 511 #endif /* CONFIG_KASAN_GENERIC */ 170 #endif /* CONFIG_KASAN_GENERIC */ 512 171 513 #if defined(CONFIG_KASAN_SW_TAGS) || defined(C !! 172 #ifdef CONFIG_KASAN_SW_TAGS 514 << 515 static inline void *kasan_reset_tag(const void << 516 { << 517 return (void *)arch_kasan_reset_tag(ad << 518 } << 519 << 520 /** << 521 * kasan_report - print a report about a bad m << 522 * @addr: address of the bad access << 523 * @size: size of the bad access << 524 * @is_write: whether the bad access is a writ << 525 * @ip: instruction pointer for the accessibil << 526 */ << 527 bool kasan_report(const void *addr, size_t siz << 528 bool is_write, unsigned long i << 529 << 530 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_ << 531 << 532 static inline void *kasan_reset_tag(const void << 533 { << 534 return (void *)addr; << 535 } << 536 << 537 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN << 538 173 539 #ifdef CONFIG_KASAN_HW_TAGS !! 174 #define KASAN_SHADOW_INIT 0xFF 540 175 541 void kasan_report_async(void); !! 176 void kasan_init_tags(void); 542 177 543 #endif /* CONFIG_KASAN_HW_TAGS */ !! 178 void *kasan_reset_tag(const void *addr); 544 179 545 #ifdef CONFIG_KASAN_SW_TAGS !! 180 void kasan_report(unsigned long addr, size_t size, 546 void __init kasan_init_sw_tags(void); !! 181 bool is_write, unsigned long ip); 547 #else << 548 static inline void kasan_init_sw_tags(void) { << 549 #endif << 550 << 551 #ifdef CONFIG_KASAN_HW_TAGS << 552 void kasan_init_hw_tags_cpu(void); << 553 void __init kasan_init_hw_tags(void); << 554 #else << 555 static inline void kasan_init_hw_tags_cpu(void << 556 static inline void kasan_init_hw_tags(void) { << 557 #endif << 558 << 559 #ifdef CONFIG_KASAN_VMALLOC << 560 << 561 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 562 << 563 void kasan_populate_early_vm_area_shadow(void << 564 int kasan_populate_vmalloc(unsigned long addr, << 565 void kasan_release_vmalloc(unsigned long start << 566 unsigned long free_ << 567 unsigned long free_ << 568 << 569 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ << 570 << 571 static inline void kasan_populate_early_vm_are << 572 << 573 { } << 574 static inline int kasan_populate_vmalloc(unsig << 575 unsign << 576 { << 577 return 0; << 578 } << 579 static inline void kasan_release_vmalloc(unsig << 580 unsig << 581 unsig << 582 unsig << 583 << 584 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN << 585 << 586 void *__kasan_unpoison_vmalloc(const void *sta << 587 kasan_vmalloc_f << 588 static __always_inline void *kasan_unpoison_vm << 589 << 590 << 591 { << 592 if (kasan_enabled()) << 593 return __kasan_unpoison_vmallo << 594 return (void *)start; << 595 } << 596 182 597 void __kasan_poison_vmalloc(const void *start, !! 183 #else /* CONFIG_KASAN_SW_TAGS */ 598 static __always_inline void kasan_poison_vmall << 599 << 600 { << 601 if (kasan_enabled()) << 602 __kasan_poison_vmalloc(start, << 603 } << 604 184 605 #else /* CONFIG_KASAN_VMALLOC */ !! 185 static inline void kasan_init_tags(void) { } 606 186 607 static inline void kasan_populate_early_vm_are !! 187 static inline void *kasan_reset_tag(const void *addr) 608 << 609 static inline int kasan_populate_vmalloc(unsig << 610 unsign << 611 { << 612 return 0; << 613 } << 614 static inline void kasan_release_vmalloc(unsig << 615 unsig << 616 unsig << 617 unsig << 618 << 619 static inline void *kasan_unpoison_vmalloc(con << 620 uns << 621 kas << 622 { 188 { 623 return (void *)start; !! 189 return (void *)addr; 624 } 190 } 625 static inline void kasan_poison_vmalloc(const << 626 { } << 627 << 628 #endif /* CONFIG_KASAN_VMALLOC */ << 629 << 630 #if (defined(CONFIG_KASAN_GENERIC) || defined( << 631 !defined(CONFIG_KASAN_VMALLOC) << 632 191 633 /* !! 192 #endif /* CONFIG_KASAN_SW_TAGS */ 634 * These functions allocate and free shadow me << 635 * They are only required when KASAN_VMALLOC i << 636 * shadow memory is allocated by the generic v << 637 */ << 638 int kasan_alloc_module_shadow(void *addr, size << 639 void kasan_free_module_shadow(const struct vm_ << 640 << 641 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN << 642 << 643 static inline int kasan_alloc_module_shadow(vo << 644 static inline void kasan_free_module_shadow(co << 645 << 646 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASA << 647 << 648 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 649 void kasan_non_canonical_hook(unsigned long ad << 650 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ << 651 static inline void kasan_non_canonical_hook(un << 652 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN << 653 193 654 #endif /* LINUX_KASAN_H */ 194 #endif /* LINUX_KASAN_H */ 655 195
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.