1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KASAN_H 2 #ifndef _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 4 4 5 #include <linux/bug.h> << 6 #include <linux/kasan-enabled.h> << 7 #include <linux/kasan-tags.h> << 8 #include <linux/kernel.h> << 9 #include <linux/static_key.h> << 10 #include <linux/types.h> 5 #include <linux/types.h> 11 6 12 struct kmem_cache; 7 struct kmem_cache; 13 struct page; 8 struct page; 14 struct slab; << 15 struct vm_struct; 9 struct vm_struct; 16 struct task_struct; 10 struct task_struct; 17 11 18 #ifdef CONFIG_KASAN 12 #ifdef CONFIG_KASAN 19 13 20 #include <linux/linkage.h> << 21 #include <asm/kasan.h> 14 #include <asm/kasan.h> >> 15 #include <asm/pgtable.h> 22 16 23 #endif !! 17 extern unsigned char kasan_zero_page[PAGE_SIZE]; >> 18 extern pte_t kasan_zero_pte[PTRS_PER_PTE]; >> 19 extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; >> 20 extern pud_t kasan_zero_pud[PTRS_PER_PUD]; >> 21 extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D]; 24 22 25 typedef unsigned int __bitwise kasan_vmalloc_f !! 23 void kasan_populate_zero_shadow(const void *shadow_start, 26 << 27 #define KASAN_VMALLOC_NONE ((__fo << 28 #define KASAN_VMALLOC_INIT ((__fo << 29 #define KASAN_VMALLOC_VM_ALLOC ((__fo << 30 #define KASAN_VMALLOC_PROT_NORMAL ((__fo << 31 << 32 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 33 << 34 #include <linux/pgtable.h> << 35 << 36 /* Software KASAN implementations use shadow m << 37 << 38 #ifdef CONFIG_KASAN_SW_TAGS << 39 /* This matches KASAN_TAG_INVALID. */ << 40 #define KASAN_SHADOW_INIT 0xFE << 41 #else << 42 #define KASAN_SHADOW_INIT 0 << 43 #endif << 44 << 45 #ifndef PTE_HWTABLE_PTRS << 46 #define PTE_HWTABLE_PTRS 0 << 47 #endif << 48 << 49 extern unsigned char kasan_early_shadow_page[P << 50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_P << 51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_P << 52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_P << 53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_P << 54 << 55 int kasan_populate_early_shadow(const void *sh << 56 const void *sh 24 const void *shadow_end); 57 25 58 #ifndef kasan_mem_to_shadow << 59 static inline void *kasan_mem_to_shadow(const 26 static inline void *kasan_mem_to_shadow(const void *addr) 60 { 27 { 61 return (void *)((unsigned long)addr >> 28 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 62 + KASAN_SHADOW_OFFSET; 29 + KASAN_SHADOW_OFFSET; 63 } 30 } 64 #endif << 65 << 66 int kasan_add_zero_shadow(void *start, unsigne << 67 void kasan_remove_zero_shadow(void *start, uns << 68 31 69 /* Enable reporting bugs after kasan_disable_c 32 /* Enable reporting bugs after kasan_disable_current() */ 70 extern void kasan_enable_current(void); 33 extern void kasan_enable_current(void); 71 34 72 /* Disable reporting bugs for current task */ 35 /* Disable reporting bugs for current task */ 73 extern void kasan_disable_current(void); 36 extern void kasan_disable_current(void); 74 37 75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ !! 38 void kasan_unpoison_shadow(const void *address, size_t size); 76 << 77 static inline int kasan_add_zero_shadow(void * << 78 { << 79 return 0; << 80 } << 81 static inline void kasan_remove_zero_shadow(vo << 82 unsign << 83 {} << 84 << 85 static inline void kasan_enable_current(void) << 86 static inline void kasan_disable_current(void) << 87 << 88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN << 89 << 90 #ifdef CONFIG_KASAN_HW_TAGS << 91 << 92 #else /* CONFIG_KASAN_HW_TAGS */ << 93 << 94 #endif /* CONFIG_KASAN_HW_TAGS */ << 95 << 96 static inline bool kasan_has_integrated_init(v << 97 { << 98 return kasan_hw_tags_enabled(); << 99 } << 100 << 101 #ifdef CONFIG_KASAN << 102 void __kasan_unpoison_range(const void *addr, << 103 static __always_inline void kasan_unpoison_ran << 104 { << 105 if (kasan_enabled()) << 106 __kasan_unpoison_range(addr, s << 107 } << 108 << 109 void __kasan_poison_pages(struct page *page, u << 110 static __always_inline void kasan_poison_pages << 111 << 112 { << 113 if (kasan_enabled()) << 114 __kasan_poison_pages(page, ord << 115 } << 116 << 117 bool __kasan_unpoison_pages(struct page *page, << 118 static __always_inline bool kasan_unpoison_pag << 119 << 120 { << 121 if (kasan_enabled()) << 122 return __kasan_unpoison_pages( << 123 return false; << 124 } << 125 << 126 void __kasan_poison_slab(struct slab *slab); << 127 static __always_inline void kasan_poison_slab( << 128 { << 129 if (kasan_enabled()) << 130 __kasan_poison_slab(slab); << 131 } << 132 << 133 void __kasan_unpoison_new_object(struct kmem_c << 134 /** << 135 * kasan_unpoison_new_object - Temporarily unp << 136 * @cache: Cache the object belong to. << 137 * @object: Pointer to the object. << 138 * << 139 * This function is intended for the slab allo << 140 * temporarily unpoisons an object from a newl << 141 * anything else. The object must later be rep << 142 * kasan_poison_new_object(). << 143 */ << 144 static __always_inline void kasan_unpoison_new << 145 << 146 { << 147 if (kasan_enabled()) << 148 __kasan_unpoison_new_object(ca << 149 } << 150 << 151 void __kasan_poison_new_object(struct kmem_cac << 152 /** << 153 * kasan_unpoison_new_object - Repoison a new << 154 * @cache: Cache the object belong to. << 155 * @object: Pointer to the object. << 156 * << 157 * This function is intended for the slab allo << 158 * repoisons an object that was previously unp << 159 * kasan_unpoison_new_object() without doing a << 160 */ << 161 static __always_inline void kasan_poison_new_o << 162 << 163 { << 164 if (kasan_enabled()) << 165 __kasan_poison_new_object(cach << 166 } << 167 << 168 void * __must_check __kasan_init_slab_obj(stru << 169 cons << 170 static __always_inline void * __must_check kas << 171 struct kmem_ca << 172 { << 173 if (kasan_enabled()) << 174 return __kasan_init_slab_obj(c << 175 return (void *)object; << 176 } << 177 << 178 bool __kasan_slab_pre_free(struct kmem_cache * << 179 unsigned long ip); << 180 /** << 181 * kasan_slab_pre_free - Check whether freeing << 182 * @object: Object to be freed. << 183 * << 184 * This function checks whether freeing the gi << 185 * check for double-free and invalid-free bugs << 186 * << 187 * This function is intended only for use by t << 188 * << 189 * @Return true if freeing the object is unsaf << 190 */ << 191 static __always_inline bool kasan_slab_pre_fre << 192 << 193 { << 194 if (kasan_enabled()) << 195 return __kasan_slab_pre_free(s << 196 return false; << 197 } << 198 << 199 bool __kasan_slab_free(struct kmem_cache *s, v << 200 bool still_accessible); << 201 /** << 202 * kasan_slab_free - Poison, initialize, and q << 203 * @object: Object to be freed. << 204 * @init: Whether to initialize the object. << 205 * @still_accessible: Whether the object conte << 206 * << 207 * This function informs that a slab object ha << 208 * supposed to be accessed anymore, except whe << 209 * (indicating that the object is in a SLAB_TY << 210 * grace period might not have passed yet). << 211 * << 212 * For KASAN modes that have integrated memory << 213 * (kasan_has_integrated_init() == true), this << 214 * the object's memory. For other modes, the @ << 215 * << 216 * This function might also take ownership of << 217 * When this happens, KASAN will defer freeing << 218 * stage and handle it internally until then. << 219 * whether KASAN took ownership of the object. << 220 * << 221 * This function is intended only for use by t << 222 * << 223 * @Return true if KASAN took ownership of the << 224 */ << 225 static __always_inline bool kasan_slab_free(st << 226 << 227 << 228 { << 229 if (kasan_enabled()) << 230 return __kasan_slab_free(s, ob << 231 return false; << 232 } << 233 39 234 void __kasan_kfree_large(void *ptr, unsigned l !! 40 void kasan_unpoison_task_stack(struct task_struct *task); 235 static __always_inline void kasan_kfree_large( !! 41 void kasan_unpoison_stack_above_sp_to(const void *watermark); 236 { << 237 if (kasan_enabled()) << 238 __kasan_kfree_large(ptr, _RET_ << 239 } << 240 << 241 void * __must_check __kasan_slab_alloc(struct << 242 void *o << 243 static __always_inline void * __must_check kas << 244 struct kmem_cache *s, void *ob << 245 { << 246 if (kasan_enabled()) << 247 return __kasan_slab_alloc(s, o << 248 return object; << 249 } << 250 42 251 void * __must_check __kasan_kmalloc(struct kme !! 43 void kasan_alloc_pages(struct page *page, unsigned int order); 252 size_t siz !! 44 void kasan_free_pages(struct page *page, unsigned int order); 253 static __always_inline void * __must_check kas << 254 const void *ob << 255 { << 256 if (kasan_enabled()) << 257 return __kasan_kmalloc(s, obje << 258 return (void *)object; << 259 } << 260 45 261 void * __must_check __kasan_kmalloc_large(cons !! 46 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 262 size !! 47 slab_flags_t *flags); 263 static __always_inline void * __must_check kas !! 48 void kasan_cache_shrink(struct kmem_cache *cache); 264 !! 49 void kasan_cache_shutdown(struct kmem_cache *cache); 265 { << 266 if (kasan_enabled()) << 267 return __kasan_kmalloc_large(p << 268 return (void *)ptr; << 269 } << 270 50 271 void * __must_check __kasan_krealloc(const voi !! 51 void kasan_poison_slab(struct page *page); 272 size_t ne !! 52 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 273 static __always_inline void * __must_check kas !! 53 void kasan_poison_object_data(struct kmem_cache *cache, void *object); 274 !! 54 void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); 275 { !! 55 276 if (kasan_enabled()) !! 56 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); 277 return __kasan_krealloc(object !! 57 void kasan_kfree_large(void *ptr, unsigned long ip); 278 return (void *)object; !! 58 void kasan_poison_kfree(void *ptr, unsigned long ip); 279 } !! 59 void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, >> 60 gfp_t flags); >> 61 void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); 280 62 281 bool __kasan_mempool_poison_pages(struct page !! 63 void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); 282 unsigned lon !! 64 bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); 283 /** << 284 * kasan_mempool_poison_pages - Check and pois << 285 * @page: Pointer to the page allocation. << 286 * @order: Order of the allocation. << 287 * << 288 * This function is intended for kernel subsys << 289 * to reuse them instead of freeing them back << 290 * << 291 * This function is similar to kasan_mempool_p << 292 * page allocations. << 293 * << 294 * Before the poisoned allocation can be reuse << 295 * kasan_mempool_unpoison_pages(). << 296 * << 297 * Return: true if the allocation can be safel << 298 */ << 299 static __always_inline bool kasan_mempool_pois << 300 << 301 { << 302 if (kasan_enabled()) << 303 return __kasan_mempool_poison_ << 304 return true; << 305 } << 306 65 307 void __kasan_mempool_unpoison_pages(struct pag !! 66 struct kasan_cache { 308 unsigned l !! 67 int alloc_meta_offset; 309 /** !! 68 int free_meta_offset; 310 * kasan_mempool_unpoison_pages - Unpoison a m !! 69 }; 311 * @page: Pointer to the page allocation. << 312 * @order: Order of the allocation. << 313 * << 314 * This function is intended for kernel subsys << 315 * to reuse them instead of freeing them back << 316 * << 317 * This function unpoisons a page allocation t << 318 * kasan_mempool_poison_pages() without zeroin << 319 * the tag-based modes, this function assigns << 320 */ << 321 static __always_inline void kasan_mempool_unpo << 322 << 323 { << 324 if (kasan_enabled()) << 325 __kasan_mempool_unpoison_pages << 326 } << 327 70 328 bool __kasan_mempool_poison_object(void *ptr, !! 71 int kasan_module_alloc(void *addr, size_t size); 329 /** !! 72 void kasan_free_shadow(const struct vm_struct *vm); 330 * kasan_mempool_poison_object - Check and poi << 331 * @ptr: Pointer to the slab allocation. << 332 * << 333 * This function is intended for kernel subsys << 334 * to reuse them instead of freeing them back << 335 * mempool). << 336 * << 337 * This function poisons a slab allocation and << 338 * without initializing the allocation's memor << 339 * quarantine (for the Generic mode). << 340 * << 341 * This function also performs checks to detec << 342 * bugs and reports them. The caller can use t << 343 * to find out if the allocation is buggy. << 344 * << 345 * Before the poisoned allocation can be reuse << 346 * kasan_mempool_unpoison_object(). << 347 * << 348 * This function operates on all slab allocati << 349 * allocations (the ones returned by kmalloc_l << 350 * size > KMALLOC_MAX_SIZE). << 351 * << 352 * Return: true if the allocation can be safel << 353 */ << 354 static __always_inline bool kasan_mempool_pois << 355 { << 356 if (kasan_enabled()) << 357 return __kasan_mempool_poison_ << 358 return true; << 359 } << 360 73 361 void __kasan_mempool_unpoison_object(void *ptr !! 74 size_t ksize(const void *); 362 /** !! 75 static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } 363 * kasan_mempool_unpoison_object - Unpoison a !! 76 size_t kasan_metadata_size(struct kmem_cache *cache); 364 * @ptr: Pointer to the slab allocation. << 365 * @size: Size to be unpoisoned. << 366 * << 367 * This function is intended for kernel subsys << 368 * to reuse them instead of freeing them back << 369 * mempool). << 370 * << 371 * This function unpoisons a slab allocation t << 372 * kasan_mempool_poison_object() and saves an << 373 * initializing the allocation's memory. For t << 374 * does not assign a new tag to the allocation << 375 * original tags based on the pointer value. << 376 * << 377 * This function operates on all slab allocati << 378 * allocations (the ones returned by kmalloc_l << 379 * size > KMALLOC_MAX_SIZE). << 380 */ << 381 static __always_inline void kasan_mempool_unpo << 382 << 383 { << 384 if (kasan_enabled()) << 385 __kasan_mempool_unpoison_objec << 386 } << 387 77 388 /* !! 78 bool kasan_save_enable_multi_shot(void); 389 * Unlike kasan_check_read/write(), kasan_chec !! 79 void kasan_restore_multi_shot(bool enabled); 390 * the hardware tag-based mode that doesn't re << 391 */ << 392 bool __kasan_check_byte(const void *addr, unsi << 393 static __always_inline bool kasan_check_byte(c << 394 { << 395 if (kasan_enabled()) << 396 return __kasan_check_byte(addr << 397 return true; << 398 } << 399 80 400 #else /* CONFIG_KASAN */ 81 #else /* CONFIG_KASAN */ 401 82 402 static inline void kasan_unpoison_range(const !! 83 static inline void kasan_unpoison_shadow(const void *address, size_t size) {} 403 static inline void kasan_poison_pages(struct p << 404 bool ini << 405 static inline bool kasan_unpoison_pages(struct << 406 bool i << 407 { << 408 return false; << 409 } << 410 static inline void kasan_poison_slab(struct sl << 411 static inline void kasan_unpoison_new_object(s << 412 void * << 413 static inline void kasan_poison_new_object(str << 414 void * << 415 static inline void *kasan_init_slab_obj(struct << 416 const void *ob << 417 { << 418 return (void *)object; << 419 } << 420 << 421 static inline bool kasan_slab_pre_free(struct << 422 { << 423 return false; << 424 } << 425 << 426 static inline bool kasan_slab_free(struct kmem << 427 bool init, << 428 { << 429 return false; << 430 } << 431 static inline void kasan_kfree_large(void *ptr << 432 static inline void *kasan_slab_alloc(struct km << 433 gfp_t flags << 434 { << 435 return object; << 436 } << 437 static inline void *kasan_kmalloc(struct kmem_ << 438 size_t size, g << 439 { << 440 return (void *)object; << 441 } << 442 static inline void *kasan_kmalloc_large(const << 443 { << 444 return (void *)ptr; << 445 } << 446 static inline void *kasan_krealloc(const void << 447 gfp_t flags) << 448 { << 449 return (void *)object; << 450 } << 451 static inline bool kasan_mempool_poison_pages( << 452 { << 453 return true; << 454 } << 455 static inline void kasan_mempool_unpoison_page << 456 static inline bool kasan_mempool_poison_object << 457 { << 458 return true; << 459 } << 460 static inline void kasan_mempool_unpoison_obje << 461 << 462 static inline bool kasan_check_byte(const void << 463 { << 464 return true; << 465 } << 466 << 467 #endif /* CONFIG_KASAN */ << 468 84 469 #if defined(CONFIG_KASAN) && defined(CONFIG_KA << 470 void kasan_unpoison_task_stack(struct task_str << 471 asmlinkage void kasan_unpoison_task_stack_belo << 472 #else << 473 static inline void kasan_unpoison_task_stack(s 85 static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 474 static inline void kasan_unpoison_task_stack_b !! 86 static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} 475 #endif << 476 << 477 #ifdef CONFIG_KASAN_GENERIC << 478 << 479 struct kasan_cache { << 480 int alloc_meta_offset; << 481 int free_meta_offset; << 482 }; << 483 << 484 size_t kasan_metadata_size(struct kmem_cache * << 485 void kasan_cache_create(struct kmem_cache *cac << 486 slab_flags_t *flags); << 487 87 488 void kasan_cache_shrink(struct kmem_cache *cac !! 88 static inline void kasan_enable_current(void) {} 489 void kasan_cache_shutdown(struct kmem_cache *c !! 89 static inline void kasan_disable_current(void) {} 490 void kasan_record_aux_stack(void *ptr); << 491 void kasan_record_aux_stack_noalloc(void *ptr) << 492 90 493 #else /* CONFIG_KASAN_GENERIC */ !! 91 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} >> 92 static inline void kasan_free_pages(struct page *page, unsigned int order) {} 494 93 495 /* Tag-based KASAN modes do not use per-object << 496 static inline size_t kasan_metadata_size(struc << 497 << 498 { << 499 return 0; << 500 } << 501 /* And no cache-related metadata initializatio << 502 static inline void kasan_cache_create(struct k 94 static inline void kasan_cache_create(struct kmem_cache *cache, 503 unsigned 95 unsigned int *size, 504 slab_fla 96 slab_flags_t *flags) {} 505 << 506 static inline void kasan_cache_shrink(struct k 97 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 507 static inline void kasan_cache_shutdown(struct 98 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 508 static inline void kasan_record_aux_stack(void << 509 static inline void kasan_record_aux_stack_noal << 510 << 511 #endif /* CONFIG_KASAN_GENERIC */ << 512 << 513 #if defined(CONFIG_KASAN_SW_TAGS) || defined(C << 514 << 515 static inline void *kasan_reset_tag(const void << 516 { << 517 return (void *)arch_kasan_reset_tag(ad << 518 } << 519 << 520 /** << 521 * kasan_report - print a report about a bad m << 522 * @addr: address of the bad access << 523 * @size: size of the bad access << 524 * @is_write: whether the bad access is a writ << 525 * @ip: instruction pointer for the accessibil << 526 */ << 527 bool kasan_report(const void *addr, size_t siz << 528 bool is_write, unsigned long i << 529 << 530 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_ << 531 << 532 static inline void *kasan_reset_tag(const void << 533 { << 534 return (void *)addr; << 535 } << 536 << 537 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN << 538 << 539 #ifdef CONFIG_KASAN_HW_TAGS << 540 << 541 void kasan_report_async(void); << 542 << 543 #endif /* CONFIG_KASAN_HW_TAGS */ << 544 99 545 #ifdef CONFIG_KASAN_SW_TAGS !! 100 static inline void kasan_poison_slab(struct page *page) {} 546 void __init kasan_init_sw_tags(void); !! 101 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 547 #else !! 102 void *object) {} 548 static inline void kasan_init_sw_tags(void) { !! 103 static inline void kasan_poison_object_data(struct kmem_cache *cache, 549 #endif !! 104 void *object) {} 550 !! 105 static inline void kasan_init_slab_obj(struct kmem_cache *cache, 551 #ifdef CONFIG_KASAN_HW_TAGS !! 106 const void *object) {} 552 void kasan_init_hw_tags_cpu(void); << 553 void __init kasan_init_hw_tags(void); << 554 #else << 555 static inline void kasan_init_hw_tags_cpu(void << 556 static inline void kasan_init_hw_tags(void) { << 557 #endif << 558 << 559 #ifdef CONFIG_KASAN_VMALLOC << 560 << 561 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 562 << 563 void kasan_populate_early_vm_area_shadow(void << 564 int kasan_populate_vmalloc(unsigned long addr, << 565 void kasan_release_vmalloc(unsigned long start << 566 unsigned long free_ << 567 unsigned long free_ << 568 << 569 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ << 570 << 571 static inline void kasan_populate_early_vm_are << 572 << 573 { } << 574 static inline int kasan_populate_vmalloc(unsig << 575 unsign << 576 { << 577 return 0; << 578 } << 579 static inline void kasan_release_vmalloc(unsig << 580 unsig << 581 unsig << 582 unsig << 583 << 584 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN << 585 << 586 void *__kasan_unpoison_vmalloc(const void *sta << 587 kasan_vmalloc_f << 588 static __always_inline void *kasan_unpoison_vm << 589 << 590 << 591 { << 592 if (kasan_enabled()) << 593 return __kasan_unpoison_vmallo << 594 return (void *)start; << 595 } << 596 << 597 void __kasan_poison_vmalloc(const void *start, << 598 static __always_inline void kasan_poison_vmall << 599 << 600 { << 601 if (kasan_enabled()) << 602 __kasan_poison_vmalloc(start, << 603 } << 604 107 605 #else /* CONFIG_KASAN_VMALLOC */ !! 108 static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} >> 109 static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} >> 110 static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} >> 111 static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, >> 112 size_t size, gfp_t flags) {} >> 113 static inline void kasan_krealloc(const void *object, size_t new_size, >> 114 gfp_t flags) {} 606 115 607 static inline void kasan_populate_early_vm_are !! 116 static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, 608 !! 117 gfp_t flags) {} 609 static inline int kasan_populate_vmalloc(unsig !! 118 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, 610 unsign !! 119 unsigned long ip) 611 { << 612 return 0; << 613 } << 614 static inline void kasan_release_vmalloc(unsig << 615 unsig << 616 unsig << 617 unsig << 618 << 619 static inline void *kasan_unpoison_vmalloc(con << 620 uns << 621 kas << 622 { 120 { 623 return (void *)start; !! 121 return false; 624 } 122 } 625 static inline void kasan_poison_vmalloc(const << 626 { } << 627 123 628 #endif /* CONFIG_KASAN_VMALLOC */ !! 124 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } >> 125 static inline void kasan_free_shadow(const struct vm_struct *vm) {} 629 126 630 #if (defined(CONFIG_KASAN_GENERIC) || defined( !! 127 static inline void kasan_unpoison_slab(const void *ptr) { } 631 !defined(CONFIG_KASAN_VMALLOC) !! 128 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } 632 129 633 /* !! 130 #endif /* CONFIG_KASAN */ 634 * These functions allocate and free shadow me << 635 * They are only required when KASAN_VMALLOC i << 636 * shadow memory is allocated by the generic v << 637 */ << 638 int kasan_alloc_module_shadow(void *addr, size << 639 void kasan_free_module_shadow(const struct vm_ << 640 << 641 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN << 642 << 643 static inline int kasan_alloc_module_shadow(vo << 644 static inline void kasan_free_module_shadow(co << 645 << 646 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASA << 647 << 648 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 649 void kasan_non_canonical_hook(unsigned long ad << 650 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ << 651 static inline void kasan_non_canonical_hook(un << 652 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN << 653 131 654 #endif /* LINUX_KASAN_H */ 132 #endif /* LINUX_KASAN_H */ 655 133
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.