1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KASAN_H 2 #ifndef _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 4 4 5 #include <linux/bug.h> 5 #include <linux/bug.h> 6 #include <linux/kasan-enabled.h> << 7 #include <linux/kasan-tags.h> << 8 #include <linux/kernel.h> 6 #include <linux/kernel.h> 9 #include <linux/static_key.h> 7 #include <linux/static_key.h> 10 #include <linux/types.h> 8 #include <linux/types.h> 11 9 12 struct kmem_cache; 10 struct kmem_cache; 13 struct page; 11 struct page; 14 struct slab; 12 struct slab; 15 struct vm_struct; 13 struct vm_struct; 16 struct task_struct; 14 struct task_struct; 17 15 18 #ifdef CONFIG_KASAN 16 #ifdef CONFIG_KASAN 19 17 20 #include <linux/linkage.h> 18 #include <linux/linkage.h> 21 #include <asm/kasan.h> 19 #include <asm/kasan.h> 22 20 23 #endif !! 21 /* kasan_data struct is used in KUnit tests for KASAN expected failures */ 24 !! 22 struct kunit_kasan_expectation { 25 typedef unsigned int __bitwise kasan_vmalloc_f !! 23 bool report_found; >> 24 }; 26 25 27 #define KASAN_VMALLOC_NONE ((__fo !! 26 #endif 28 #define KASAN_VMALLOC_INIT ((__fo << 29 #define KASAN_VMALLOC_VM_ALLOC ((__fo << 30 #define KASAN_VMALLOC_PROT_NORMAL ((__fo << 31 27 32 #if defined(CONFIG_KASAN_GENERIC) || defined(C 28 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 33 29 34 #include <linux/pgtable.h> 30 #include <linux/pgtable.h> 35 31 36 /* Software KASAN implementations use shadow m 32 /* Software KASAN implementations use shadow memory. */ 37 33 38 #ifdef CONFIG_KASAN_SW_TAGS 34 #ifdef CONFIG_KASAN_SW_TAGS 39 /* This matches KASAN_TAG_INVALID. */ 35 /* This matches KASAN_TAG_INVALID. */ 40 #define KASAN_SHADOW_INIT 0xFE 36 #define KASAN_SHADOW_INIT 0xFE 41 #else 37 #else 42 #define KASAN_SHADOW_INIT 0 38 #define KASAN_SHADOW_INIT 0 43 #endif 39 #endif 44 40 45 #ifndef PTE_HWTABLE_PTRS 41 #ifndef PTE_HWTABLE_PTRS 46 #define PTE_HWTABLE_PTRS 0 42 #define PTE_HWTABLE_PTRS 0 47 #endif 43 #endif 48 44 49 extern unsigned char kasan_early_shadow_page[P 45 extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_P 46 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; 51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_P 47 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; 52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_P 48 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; 53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_P 49 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; 54 50 55 int kasan_populate_early_shadow(const void *sh 51 int kasan_populate_early_shadow(const void *shadow_start, 56 const void *sh 52 const void *shadow_end); 57 53 58 #ifndef kasan_mem_to_shadow << 59 static inline void *kasan_mem_to_shadow(const 54 static inline void *kasan_mem_to_shadow(const void *addr) 60 { 55 { 61 return (void *)((unsigned long)addr >> 56 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 62 + KASAN_SHADOW_OFFSET; 57 + KASAN_SHADOW_OFFSET; 63 } 58 } 64 #endif << 65 59 66 int kasan_add_zero_shadow(void *start, unsigne 60 int kasan_add_zero_shadow(void *start, unsigned long size); 67 void kasan_remove_zero_shadow(void *start, uns 61 void kasan_remove_zero_shadow(void *start, unsigned long size); 68 62 69 /* Enable reporting bugs after kasan_disable_c 63 /* Enable reporting bugs after kasan_disable_current() */ 70 extern void kasan_enable_current(void); 64 extern void kasan_enable_current(void); 71 65 72 /* Disable reporting bugs for current task */ 66 /* Disable reporting bugs for current task */ 73 extern void kasan_disable_current(void); 67 extern void kasan_disable_current(void); 74 68 75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ 69 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 76 70 77 static inline int kasan_add_zero_shadow(void * 71 static inline int kasan_add_zero_shadow(void *start, unsigned long size) 78 { 72 { 79 return 0; 73 return 0; 80 } 74 } 81 static inline void kasan_remove_zero_shadow(vo 75 static inline void kasan_remove_zero_shadow(void *start, 82 unsign 76 unsigned long size) 83 {} 77 {} 84 78 85 static inline void kasan_enable_current(void) 79 static inline void kasan_enable_current(void) {} 86 static inline void kasan_disable_current(void) 80 static inline void kasan_disable_current(void) {} 87 81 88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN 82 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 89 83 90 #ifdef CONFIG_KASAN_HW_TAGS 84 #ifdef CONFIG_KASAN_HW_TAGS 91 85 >> 86 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); >> 87 >> 88 static __always_inline bool kasan_enabled(void) >> 89 { >> 90 return static_branch_likely(&kasan_flag_enabled); >> 91 } >> 92 >> 93 static inline bool kasan_hw_tags_enabled(void) >> 94 { >> 95 return kasan_enabled(); >> 96 } >> 97 >> 98 void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); >> 99 void kasan_free_pages(struct page *page, unsigned int order); >> 100 92 #else /* CONFIG_KASAN_HW_TAGS */ 101 #else /* CONFIG_KASAN_HW_TAGS */ 93 102 >> 103 static inline bool kasan_enabled(void) >> 104 { >> 105 return IS_ENABLED(CONFIG_KASAN); >> 106 } >> 107 >> 108 static inline bool kasan_hw_tags_enabled(void) >> 109 { >> 110 return false; >> 111 } >> 112 >> 113 static __always_inline void kasan_alloc_pages(struct page *page, >> 114 unsigned int order, gfp_t flags) >> 115 { >> 116 /* Only available for integrated init. */ >> 117 BUILD_BUG(); >> 118 } >> 119 >> 120 static __always_inline void kasan_free_pages(struct page *page, >> 121 unsigned int order) >> 122 { >> 123 /* Only available for integrated init. */ >> 124 BUILD_BUG(); >> 125 } >> 126 94 #endif /* CONFIG_KASAN_HW_TAGS */ 127 #endif /* CONFIG_KASAN_HW_TAGS */ 95 128 96 static inline bool kasan_has_integrated_init(v 129 static inline bool kasan_has_integrated_init(void) 97 { 130 { 98 return kasan_hw_tags_enabled(); 131 return kasan_hw_tags_enabled(); 99 } 132 } 100 133 101 #ifdef CONFIG_KASAN 134 #ifdef CONFIG_KASAN >> 135 >> 136 struct kasan_cache { >> 137 int alloc_meta_offset; >> 138 int free_meta_offset; >> 139 bool is_kmalloc; >> 140 }; >> 141 >> 142 slab_flags_t __kasan_never_merge(void); >> 143 static __always_inline slab_flags_t kasan_never_merge(void) >> 144 { >> 145 if (kasan_enabled()) >> 146 return __kasan_never_merge(); >> 147 return 0; >> 148 } >> 149 102 void __kasan_unpoison_range(const void *addr, 150 void __kasan_unpoison_range(const void *addr, size_t size); 103 static __always_inline void kasan_unpoison_ran 151 static __always_inline void kasan_unpoison_range(const void *addr, size_t size) 104 { 152 { 105 if (kasan_enabled()) 153 if (kasan_enabled()) 106 __kasan_unpoison_range(addr, s 154 __kasan_unpoison_range(addr, size); 107 } 155 } 108 156 109 void __kasan_poison_pages(struct page *page, u 157 void __kasan_poison_pages(struct page *page, unsigned int order, bool init); 110 static __always_inline void kasan_poison_pages 158 static __always_inline void kasan_poison_pages(struct page *page, 111 159 unsigned int order, bool init) 112 { 160 { 113 if (kasan_enabled()) 161 if (kasan_enabled()) 114 __kasan_poison_pages(page, ord 162 __kasan_poison_pages(page, order, init); 115 } 163 } 116 164 117 bool __kasan_unpoison_pages(struct page *page, !! 165 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); 118 static __always_inline bool kasan_unpoison_pag !! 166 static __always_inline void kasan_unpoison_pages(struct page *page, 119 167 unsigned int order, bool init) 120 { 168 { 121 if (kasan_enabled()) 169 if (kasan_enabled()) 122 return __kasan_unpoison_pages( !! 170 __kasan_unpoison_pages(page, order, init); 123 return false; !! 171 } >> 172 >> 173 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, >> 174 slab_flags_t *flags); >> 175 static __always_inline void kasan_cache_create(struct kmem_cache *cache, >> 176 unsigned int *size, slab_flags_t *flags) >> 177 { >> 178 if (kasan_enabled()) >> 179 __kasan_cache_create(cache, size, flags); >> 180 } >> 181 >> 182 void __kasan_cache_create_kmalloc(struct kmem_cache *cache); >> 183 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) >> 184 { >> 185 if (kasan_enabled()) >> 186 __kasan_cache_create_kmalloc(cache); >> 187 } >> 188 >> 189 size_t __kasan_metadata_size(struct kmem_cache *cache); >> 190 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) >> 191 { >> 192 if (kasan_enabled()) >> 193 return __kasan_metadata_size(cache); >> 194 return 0; 124 } 195 } 125 196 126 void __kasan_poison_slab(struct slab *slab); 197 void __kasan_poison_slab(struct slab *slab); 127 static __always_inline void kasan_poison_slab( 198 static __always_inline void kasan_poison_slab(struct slab *slab) 128 { 199 { 129 if (kasan_enabled()) 200 if (kasan_enabled()) 130 __kasan_poison_slab(slab); 201 __kasan_poison_slab(slab); 131 } 202 } 132 203 133 void __kasan_unpoison_new_object(struct kmem_c !! 204 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 134 /** !! 205 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, 135 * kasan_unpoison_new_object - Temporarily unp << 136 * @cache: Cache the object belong to. << 137 * @object: Pointer to the object. << 138 * << 139 * This function is intended for the slab allo << 140 * temporarily unpoisons an object from a newl << 141 * anything else. The object must later be rep << 142 * kasan_poison_new_object(). << 143 */ << 144 static __always_inline void kasan_unpoison_new << 145 206 void *object) 146 { 207 { 147 if (kasan_enabled()) 208 if (kasan_enabled()) 148 __kasan_unpoison_new_object(ca !! 209 __kasan_unpoison_object_data(cache, object); 149 } 210 } 150 211 151 void __kasan_poison_new_object(struct kmem_cac !! 212 void __kasan_poison_object_data(struct kmem_cache *cache, void *object); 152 /** !! 213 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, 153 * kasan_unpoison_new_object - Repoison a new << 154 * @cache: Cache the object belong to. << 155 * @object: Pointer to the object. << 156 * << 157 * This function is intended for the slab allo << 158 * repoisons an object that was previously unp << 159 * kasan_unpoison_new_object() without doing a << 160 */ << 161 static __always_inline void kasan_poison_new_o << 162 214 void *object) 163 { 215 { 164 if (kasan_enabled()) 216 if (kasan_enabled()) 165 __kasan_poison_new_object(cach !! 217 __kasan_poison_object_data(cache, object); 166 } 218 } 167 219 168 void * __must_check __kasan_init_slab_obj(stru 220 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, 169 cons 221 const void *object); 170 static __always_inline void * __must_check kas 222 static __always_inline void * __must_check kasan_init_slab_obj( 171 struct kmem_ca 223 struct kmem_cache *cache, const void *object) 172 { 224 { 173 if (kasan_enabled()) 225 if (kasan_enabled()) 174 return __kasan_init_slab_obj(c 226 return __kasan_init_slab_obj(cache, object); 175 return (void *)object; 227 return (void *)object; 176 } 228 } 177 229 178 bool __kasan_slab_pre_free(struct kmem_cache * !! 230 bool __kasan_slab_free(struct kmem_cache *s, void *object, 179 unsigned long ip); !! 231 unsigned long ip, bool init); 180 /** << 181 * kasan_slab_pre_free - Check whether freeing << 182 * @object: Object to be freed. << 183 * << 184 * This function checks whether freeing the gi << 185 * check for double-free and invalid-free bugs << 186 * << 187 * This function is intended only for use by t << 188 * << 189 * @Return true if freeing the object is unsaf << 190 */ << 191 static __always_inline bool kasan_slab_pre_fre << 192 << 193 { << 194 if (kasan_enabled()) << 195 return __kasan_slab_pre_free(s << 196 return false; << 197 } << 198 << 199 bool __kasan_slab_free(struct kmem_cache *s, v << 200 bool still_accessible); << 201 /** << 202 * kasan_slab_free - Poison, initialize, and q << 203 * @object: Object to be freed. << 204 * @init: Whether to initialize the object. << 205 * @still_accessible: Whether the object conte << 206 * << 207 * This function informs that a slab object ha << 208 * supposed to be accessed anymore, except whe << 209 * (indicating that the object is in a SLAB_TY << 210 * grace period might not have passed yet). << 211 * << 212 * For KASAN modes that have integrated memory << 213 * (kasan_has_integrated_init() == true), this << 214 * the object's memory. For other modes, the @ << 215 * << 216 * This function might also take ownership of << 217 * When this happens, KASAN will defer freeing << 218 * stage and handle it internally until then. << 219 * whether KASAN took ownership of the object. << 220 * << 221 * This function is intended only for use by t << 222 * << 223 * @Return true if KASAN took ownership of the << 224 */ << 225 static __always_inline bool kasan_slab_free(st 232 static __always_inline bool kasan_slab_free(struct kmem_cache *s, 226 !! 233 void *object, bool init) 227 << 228 { 234 { 229 if (kasan_enabled()) 235 if (kasan_enabled()) 230 return __kasan_slab_free(s, ob !! 236 return __kasan_slab_free(s, object, _RET_IP_, init); 231 return false; 237 return false; 232 } 238 } 233 239 234 void __kasan_kfree_large(void *ptr, unsigned l 240 void __kasan_kfree_large(void *ptr, unsigned long ip); 235 static __always_inline void kasan_kfree_large( 241 static __always_inline void kasan_kfree_large(void *ptr) 236 { 242 { 237 if (kasan_enabled()) 243 if (kasan_enabled()) 238 __kasan_kfree_large(ptr, _RET_ 244 __kasan_kfree_large(ptr, _RET_IP_); 239 } 245 } 240 246 >> 247 void __kasan_slab_free_mempool(void *ptr, unsigned long ip); >> 248 static __always_inline void kasan_slab_free_mempool(void *ptr) >> 249 { >> 250 if (kasan_enabled()) >> 251 __kasan_slab_free_mempool(ptr, _RET_IP_); >> 252 } >> 253 241 void * __must_check __kasan_slab_alloc(struct 254 void * __must_check __kasan_slab_alloc(struct kmem_cache *s, 242 void *o 255 void *object, gfp_t flags, bool init); 243 static __always_inline void * __must_check kas 256 static __always_inline void * __must_check kasan_slab_alloc( 244 struct kmem_cache *s, void *ob 257 struct kmem_cache *s, void *object, gfp_t flags, bool init) 245 { 258 { 246 if (kasan_enabled()) 259 if (kasan_enabled()) 247 return __kasan_slab_alloc(s, o 260 return __kasan_slab_alloc(s, object, flags, init); 248 return object; 261 return object; 249 } 262 } 250 263 251 void * __must_check __kasan_kmalloc(struct kme 264 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, 252 size_t siz 265 size_t size, gfp_t flags); 253 static __always_inline void * __must_check kas 266 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, 254 const void *ob 267 const void *object, size_t size, gfp_t flags) 255 { 268 { 256 if (kasan_enabled()) 269 if (kasan_enabled()) 257 return __kasan_kmalloc(s, obje 270 return __kasan_kmalloc(s, object, size, flags); 258 return (void *)object; 271 return (void *)object; 259 } 272 } 260 273 261 void * __must_check __kasan_kmalloc_large(cons 274 void * __must_check __kasan_kmalloc_large(const void *ptr, 262 size 275 size_t size, gfp_t flags); 263 static __always_inline void * __must_check kas 276 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, 264 277 size_t size, gfp_t flags) 265 { 278 { 266 if (kasan_enabled()) 279 if (kasan_enabled()) 267 return __kasan_kmalloc_large(p 280 return __kasan_kmalloc_large(ptr, size, flags); 268 return (void *)ptr; 281 return (void *)ptr; 269 } 282 } 270 283 271 void * __must_check __kasan_krealloc(const voi 284 void * __must_check __kasan_krealloc(const void *object, 272 size_t ne 285 size_t new_size, gfp_t flags); 273 static __always_inline void * __must_check kas 286 static __always_inline void * __must_check kasan_krealloc(const void *object, 274 287 size_t new_size, gfp_t flags) 275 { 288 { 276 if (kasan_enabled()) 289 if (kasan_enabled()) 277 return __kasan_krealloc(object 290 return __kasan_krealloc(object, new_size, flags); 278 return (void *)object; 291 return (void *)object; 279 } 292 } 280 293 281 bool __kasan_mempool_poison_pages(struct page << 282 unsigned lon << 283 /** << 284 * kasan_mempool_poison_pages - Check and pois << 285 * @page: Pointer to the page allocation. << 286 * @order: Order of the allocation. << 287 * << 288 * This function is intended for kernel subsys << 289 * to reuse them instead of freeing them back << 290 * << 291 * This function is similar to kasan_mempool_p << 292 * page allocations. << 293 * << 294 * Before the poisoned allocation can be reuse << 295 * kasan_mempool_unpoison_pages(). << 296 * << 297 * Return: true if the allocation can be safel << 298 */ << 299 static __always_inline bool kasan_mempool_pois << 300 << 301 { << 302 if (kasan_enabled()) << 303 return __kasan_mempool_poison_ << 304 return true; << 305 } << 306 << 307 void __kasan_mempool_unpoison_pages(struct pag << 308 unsigned l << 309 /** << 310 * kasan_mempool_unpoison_pages - Unpoison a m << 311 * @page: Pointer to the page allocation. << 312 * @order: Order of the allocation. << 313 * << 314 * This function is intended for kernel subsys << 315 * to reuse them instead of freeing them back << 316 * << 317 * This function unpoisons a page allocation t << 318 * kasan_mempool_poison_pages() without zeroin << 319 * the tag-based modes, this function assigns << 320 */ << 321 static __always_inline void kasan_mempool_unpo << 322 << 323 { << 324 if (kasan_enabled()) << 325 __kasan_mempool_unpoison_pages << 326 } << 327 << 328 bool __kasan_mempool_poison_object(void *ptr, << 329 /** << 330 * kasan_mempool_poison_object - Check and poi << 331 * @ptr: Pointer to the slab allocation. << 332 * << 333 * This function is intended for kernel subsys << 334 * to reuse them instead of freeing them back << 335 * mempool). << 336 * << 337 * This function poisons a slab allocation and << 338 * without initializing the allocation's memor << 339 * quarantine (for the Generic mode). << 340 * << 341 * This function also performs checks to detec << 342 * bugs and reports them. The caller can use t << 343 * to find out if the allocation is buggy. << 344 * << 345 * Before the poisoned allocation can be reuse << 346 * kasan_mempool_unpoison_object(). << 347 * << 348 * This function operates on all slab allocati << 349 * allocations (the ones returned by kmalloc_l << 350 * size > KMALLOC_MAX_SIZE). << 351 * << 352 * Return: true if the allocation can be safel << 353 */ << 354 static __always_inline bool kasan_mempool_pois << 355 { << 356 if (kasan_enabled()) << 357 return __kasan_mempool_poison_ << 358 return true; << 359 } << 360 << 361 void __kasan_mempool_unpoison_object(void *ptr << 362 /** << 363 * kasan_mempool_unpoison_object - Unpoison a << 364 * @ptr: Pointer to the slab allocation. << 365 * @size: Size to be unpoisoned. << 366 * << 367 * This function is intended for kernel subsys << 368 * to reuse them instead of freeing them back << 369 * mempool). << 370 * << 371 * This function unpoisons a slab allocation t << 372 * kasan_mempool_poison_object() and saves an << 373 * initializing the allocation's memory. For t << 374 * does not assign a new tag to the allocation << 375 * original tags based on the pointer value. << 376 * << 377 * This function operates on all slab allocati << 378 * allocations (the ones returned by kmalloc_l << 379 * size > KMALLOC_MAX_SIZE). << 380 */ << 381 static __always_inline void kasan_mempool_unpo << 382 << 383 { << 384 if (kasan_enabled()) << 385 __kasan_mempool_unpoison_objec << 386 } << 387 << 388 /* 294 /* 389 * Unlike kasan_check_read/write(), kasan_chec 295 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for 390 * the hardware tag-based mode that doesn't re 296 * the hardware tag-based mode that doesn't rely on compiler instrumentation. 391 */ 297 */ 392 bool __kasan_check_byte(const void *addr, unsi 298 bool __kasan_check_byte(const void *addr, unsigned long ip); 393 static __always_inline bool kasan_check_byte(c 299 static __always_inline bool kasan_check_byte(const void *addr) 394 { 300 { 395 if (kasan_enabled()) 301 if (kasan_enabled()) 396 return __kasan_check_byte(addr 302 return __kasan_check_byte(addr, _RET_IP_); 397 return true; 303 return true; 398 } 304 } 399 305 >> 306 >> 307 bool kasan_save_enable_multi_shot(void); >> 308 void kasan_restore_multi_shot(bool enabled); >> 309 400 #else /* CONFIG_KASAN */ 310 #else /* CONFIG_KASAN */ 401 311 >> 312 static inline slab_flags_t kasan_never_merge(void) >> 313 { >> 314 return 0; >> 315 } 402 static inline void kasan_unpoison_range(const 316 static inline void kasan_unpoison_range(const void *address, size_t size) {} 403 static inline void kasan_poison_pages(struct p 317 static inline void kasan_poison_pages(struct page *page, unsigned int order, 404 bool ini 318 bool init) {} 405 static inline bool kasan_unpoison_pages(struct !! 319 static inline void kasan_unpoison_pages(struct page *page, unsigned int order, 406 bool i !! 320 bool init) {} 407 { !! 321 static inline void kasan_cache_create(struct kmem_cache *cache, 408 return false; !! 322 unsigned int *size, 409 } !! 323 slab_flags_t *flags) {} >> 324 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} >> 325 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } 410 static inline void kasan_poison_slab(struct sl 326 static inline void kasan_poison_slab(struct slab *slab) {} 411 static inline void kasan_unpoison_new_object(s !! 327 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 412 void * 328 void *object) {} 413 static inline void kasan_poison_new_object(str !! 329 static inline void kasan_poison_object_data(struct kmem_cache *cache, 414 void * 330 void *object) {} 415 static inline void *kasan_init_slab_obj(struct 331 static inline void *kasan_init_slab_obj(struct kmem_cache *cache, 416 const void *ob 332 const void *object) 417 { 333 { 418 return (void *)object; 334 return (void *)object; 419 } 335 } 420 !! 336 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) 421 static inline bool kasan_slab_pre_free(struct << 422 { << 423 return false; << 424 } << 425 << 426 static inline bool kasan_slab_free(struct kmem << 427 bool init, << 428 { 337 { 429 return false; 338 return false; 430 } 339 } 431 static inline void kasan_kfree_large(void *ptr 340 static inline void kasan_kfree_large(void *ptr) {} >> 341 static inline void kasan_slab_free_mempool(void *ptr) {} 432 static inline void *kasan_slab_alloc(struct km 342 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, 433 gfp_t flags 343 gfp_t flags, bool init) 434 { 344 { 435 return object; 345 return object; 436 } 346 } 437 static inline void *kasan_kmalloc(struct kmem_ 347 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, 438 size_t size, g 348 size_t size, gfp_t flags) 439 { 349 { 440 return (void *)object; 350 return (void *)object; 441 } 351 } 442 static inline void *kasan_kmalloc_large(const 352 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) 443 { 353 { 444 return (void *)ptr; 354 return (void *)ptr; 445 } 355 } 446 static inline void *kasan_krealloc(const void 356 static inline void *kasan_krealloc(const void *object, size_t new_size, 447 gfp_t flags) 357 gfp_t flags) 448 { 358 { 449 return (void *)object; 359 return (void *)object; 450 } 360 } 451 static inline bool kasan_mempool_poison_pages( << 452 { << 453 return true; << 454 } << 455 static inline void kasan_mempool_unpoison_page << 456 static inline bool kasan_mempool_poison_object << 457 { << 458 return true; << 459 } << 460 static inline void kasan_mempool_unpoison_obje << 461 << 462 static inline bool kasan_check_byte(const void 361 static inline bool kasan_check_byte(const void *address) 463 { 362 { 464 return true; 363 return true; 465 } 364 } 466 365 467 #endif /* CONFIG_KASAN */ 366 #endif /* CONFIG_KASAN */ 468 367 469 #if defined(CONFIG_KASAN) && defined(CONFIG_KA 368 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) 470 void kasan_unpoison_task_stack(struct task_str 369 void kasan_unpoison_task_stack(struct task_struct *task); 471 asmlinkage void kasan_unpoison_task_stack_belo << 472 #else 370 #else 473 static inline void kasan_unpoison_task_stack(s 371 static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 474 static inline void kasan_unpoison_task_stack_b << 475 #endif 372 #endif 476 373 477 #ifdef CONFIG_KASAN_GENERIC 374 #ifdef CONFIG_KASAN_GENERIC 478 375 479 struct kasan_cache { << 480 int alloc_meta_offset; << 481 int free_meta_offset; << 482 }; << 483 << 484 size_t kasan_metadata_size(struct kmem_cache * << 485 void kasan_cache_create(struct kmem_cache *cac << 486 slab_flags_t *flags); << 487 << 488 void kasan_cache_shrink(struct kmem_cache *cac 376 void kasan_cache_shrink(struct kmem_cache *cache); 489 void kasan_cache_shutdown(struct kmem_cache *c 377 void kasan_cache_shutdown(struct kmem_cache *cache); 490 void kasan_record_aux_stack(void *ptr); 378 void kasan_record_aux_stack(void *ptr); 491 void kasan_record_aux_stack_noalloc(void *ptr) 379 void kasan_record_aux_stack_noalloc(void *ptr); 492 380 493 #else /* CONFIG_KASAN_GENERIC */ 381 #else /* CONFIG_KASAN_GENERIC */ 494 382 495 /* Tag-based KASAN modes do not use per-object << 496 static inline size_t kasan_metadata_size(struc << 497 << 498 { << 499 return 0; << 500 } << 501 /* And no cache-related metadata initializatio << 502 static inline void kasan_cache_create(struct k << 503 unsigned << 504 slab_fla << 505 << 506 static inline void kasan_cache_shrink(struct k 383 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 507 static inline void kasan_cache_shutdown(struct 384 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 508 static inline void kasan_record_aux_stack(void 385 static inline void kasan_record_aux_stack(void *ptr) {} 509 static inline void kasan_record_aux_stack_noal 386 static inline void kasan_record_aux_stack_noalloc(void *ptr) {} 510 387 511 #endif /* CONFIG_KASAN_GENERIC */ 388 #endif /* CONFIG_KASAN_GENERIC */ 512 389 513 #if defined(CONFIG_KASAN_SW_TAGS) || defined(C 390 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 514 391 515 static inline void *kasan_reset_tag(const void 392 static inline void *kasan_reset_tag(const void *addr) 516 { 393 { 517 return (void *)arch_kasan_reset_tag(ad 394 return (void *)arch_kasan_reset_tag(addr); 518 } 395 } 519 396 520 /** 397 /** 521 * kasan_report - print a report about a bad m 398 * kasan_report - print a report about a bad memory access detected by KASAN 522 * @addr: address of the bad access 399 * @addr: address of the bad access 523 * @size: size of the bad access 400 * @size: size of the bad access 524 * @is_write: whether the bad access is a writ 401 * @is_write: whether the bad access is a write or a read 525 * @ip: instruction pointer for the accessibil 402 * @ip: instruction pointer for the accessibility check or the bad access itself 526 */ 403 */ 527 bool kasan_report(const void *addr, size_t siz !! 404 bool kasan_report(unsigned long addr, size_t size, 528 bool is_write, unsigned long i 405 bool is_write, unsigned long ip); 529 406 530 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_ 407 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 531 408 532 static inline void *kasan_reset_tag(const void 409 static inline void *kasan_reset_tag(const void *addr) 533 { 410 { 534 return (void *)addr; 411 return (void *)addr; 535 } 412 } 536 413 537 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN 414 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ 538 415 539 #ifdef CONFIG_KASAN_HW_TAGS 416 #ifdef CONFIG_KASAN_HW_TAGS 540 417 541 void kasan_report_async(void); 418 void kasan_report_async(void); 542 419 543 #endif /* CONFIG_KASAN_HW_TAGS */ 420 #endif /* CONFIG_KASAN_HW_TAGS */ 544 421 545 #ifdef CONFIG_KASAN_SW_TAGS 422 #ifdef CONFIG_KASAN_SW_TAGS 546 void __init kasan_init_sw_tags(void); 423 void __init kasan_init_sw_tags(void); 547 #else 424 #else 548 static inline void kasan_init_sw_tags(void) { 425 static inline void kasan_init_sw_tags(void) { } 549 #endif 426 #endif 550 427 551 #ifdef CONFIG_KASAN_HW_TAGS 428 #ifdef CONFIG_KASAN_HW_TAGS 552 void kasan_init_hw_tags_cpu(void); 429 void kasan_init_hw_tags_cpu(void); 553 void __init kasan_init_hw_tags(void); 430 void __init kasan_init_hw_tags(void); 554 #else 431 #else 555 static inline void kasan_init_hw_tags_cpu(void 432 static inline void kasan_init_hw_tags_cpu(void) { } 556 static inline void kasan_init_hw_tags(void) { 433 static inline void kasan_init_hw_tags(void) { } 557 #endif 434 #endif 558 435 559 #ifdef CONFIG_KASAN_VMALLOC 436 #ifdef CONFIG_KASAN_VMALLOC 560 437 561 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 562 << 563 void kasan_populate_early_vm_area_shadow(void << 564 int kasan_populate_vmalloc(unsigned long addr, 438 int kasan_populate_vmalloc(unsigned long addr, unsigned long size); >> 439 void kasan_poison_vmalloc(const void *start, unsigned long size); >> 440 void kasan_unpoison_vmalloc(const void *start, unsigned long size); 565 void kasan_release_vmalloc(unsigned long start 441 void kasan_release_vmalloc(unsigned long start, unsigned long end, 566 unsigned long free_ 442 unsigned long free_region_start, 567 unsigned long free_ 443 unsigned long free_region_end); 568 444 569 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ !! 445 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); 570 << 571 static inline void kasan_populate_early_vm_are << 572 << 573 { } << 574 static inline int kasan_populate_vmalloc(unsig << 575 unsign << 576 { << 577 return 0; << 578 } << 579 static inline void kasan_release_vmalloc(unsig << 580 unsig << 581 unsig << 582 unsig << 583 << 584 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN << 585 << 586 void *__kasan_unpoison_vmalloc(const void *sta << 587 kasan_vmalloc_f << 588 static __always_inline void *kasan_unpoison_vm << 589 << 590 << 591 { << 592 if (kasan_enabled()) << 593 return __kasan_unpoison_vmallo << 594 return (void *)start; << 595 } << 596 << 597 void __kasan_poison_vmalloc(const void *start, << 598 static __always_inline void kasan_poison_vmall << 599 << 600 { << 601 if (kasan_enabled()) << 602 __kasan_poison_vmalloc(start, << 603 } << 604 446 605 #else /* CONFIG_KASAN_VMALLOC */ 447 #else /* CONFIG_KASAN_VMALLOC */ 606 448 607 static inline void kasan_populate_early_vm_are << 608 << 609 static inline int kasan_populate_vmalloc(unsig 449 static inline int kasan_populate_vmalloc(unsigned long start, 610 unsign 450 unsigned long size) 611 { 451 { 612 return 0; 452 return 0; 613 } 453 } >> 454 >> 455 static inline void kasan_poison_vmalloc(const void *start, unsigned long size) >> 456 { } >> 457 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) >> 458 { } 614 static inline void kasan_release_vmalloc(unsig 459 static inline void kasan_release_vmalloc(unsigned long start, 615 unsig 460 unsigned long end, 616 unsig 461 unsigned long free_region_start, 617 unsig !! 462 unsigned long free_region_end) {} 618 463 619 static inline void *kasan_unpoison_vmalloc(con !! 464 static inline void kasan_populate_early_vm_area_shadow(void *start, 620 uns !! 465 unsigned long size) 621 kas << 622 { << 623 return (void *)start; << 624 } << 625 static inline void kasan_poison_vmalloc(const << 626 { } 466 { } 627 467 628 #endif /* CONFIG_KASAN_VMALLOC */ 468 #endif /* CONFIG_KASAN_VMALLOC */ 629 469 630 #if (defined(CONFIG_KASAN_GENERIC) || defined( 470 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 631 !defined(CONFIG_KASAN_VMALLOC) 471 !defined(CONFIG_KASAN_VMALLOC) 632 472 633 /* 473 /* 634 * These functions allocate and free shadow me !! 474 * These functions provide a special case to support backing module 635 * They are only required when KASAN_VMALLOC i !! 475 * allocations with real shadow memory. With KASAN vmalloc, the special 636 * shadow memory is allocated by the generic v !! 476 * case is unnecessary, as the work is handled in the generic case. 637 */ 477 */ 638 int kasan_alloc_module_shadow(void *addr, size !! 478 int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask); 639 void kasan_free_module_shadow(const struct vm_ !! 479 void kasan_free_shadow(const struct vm_struct *vm); 640 480 641 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN 481 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 642 482 643 static inline int kasan_alloc_module_shadow(vo !! 483 static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; } 644 static inline void kasan_free_module_shadow(co !! 484 static inline void kasan_free_shadow(const struct vm_struct *vm) {} 645 485 646 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASA 486 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 647 487 648 #if defined(CONFIG_KASAN_GENERIC) || defined(C !! 488 #ifdef CONFIG_KASAN_INLINE 649 void kasan_non_canonical_hook(unsigned long ad 489 void kasan_non_canonical_hook(unsigned long addr); 650 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ !! 490 #else /* CONFIG_KASAN_INLINE */ 651 static inline void kasan_non_canonical_hook(un 491 static inline void kasan_non_canonical_hook(unsigned long addr) { } 652 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN !! 492 #endif /* CONFIG_KASAN_INLINE */ 653 493 654 #endif /* LINUX_KASAN_H */ 494 #endif /* LINUX_KASAN_H */ 655 495
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.