1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KASAN_H 2 #ifndef _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 4 4 5 #include <linux/bug.h> 5 #include <linux/bug.h> 6 #include <linux/kasan-enabled.h> 6 #include <linux/kasan-enabled.h> 7 #include <linux/kasan-tags.h> << 8 #include <linux/kernel.h> 7 #include <linux/kernel.h> 9 #include <linux/static_key.h> 8 #include <linux/static_key.h> 10 #include <linux/types.h> 9 #include <linux/types.h> 11 10 12 struct kmem_cache; 11 struct kmem_cache; 13 struct page; 12 struct page; 14 struct slab; 13 struct slab; 15 struct vm_struct; 14 struct vm_struct; 16 struct task_struct; 15 struct task_struct; 17 16 18 #ifdef CONFIG_KASAN 17 #ifdef CONFIG_KASAN 19 18 20 #include <linux/linkage.h> 19 #include <linux/linkage.h> 21 #include <asm/kasan.h> 20 #include <asm/kasan.h> 22 21 23 #endif 22 #endif 24 23 25 typedef unsigned int __bitwise kasan_vmalloc_f 24 typedef unsigned int __bitwise kasan_vmalloc_flags_t; 26 25 27 #define KASAN_VMALLOC_NONE ((__fo !! 26 #define KASAN_VMALLOC_NONE 0x00u 28 #define KASAN_VMALLOC_INIT ((__fo !! 27 #define KASAN_VMALLOC_INIT 0x01u 29 #define KASAN_VMALLOC_VM_ALLOC ((__fo !! 28 #define KASAN_VMALLOC_VM_ALLOC 0x02u 30 #define KASAN_VMALLOC_PROT_NORMAL ((__fo !! 29 #define KASAN_VMALLOC_PROT_NORMAL 0x04u 31 30 32 #if defined(CONFIG_KASAN_GENERIC) || defined(C 31 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 33 32 34 #include <linux/pgtable.h> 33 #include <linux/pgtable.h> 35 34 36 /* Software KASAN implementations use shadow m 35 /* Software KASAN implementations use shadow memory. */ 37 36 38 #ifdef CONFIG_KASAN_SW_TAGS 37 #ifdef CONFIG_KASAN_SW_TAGS 39 /* This matches KASAN_TAG_INVALID. */ 38 /* This matches KASAN_TAG_INVALID. */ 40 #define KASAN_SHADOW_INIT 0xFE 39 #define KASAN_SHADOW_INIT 0xFE 41 #else 40 #else 42 #define KASAN_SHADOW_INIT 0 41 #define KASAN_SHADOW_INIT 0 43 #endif 42 #endif 44 43 45 #ifndef PTE_HWTABLE_PTRS 44 #ifndef PTE_HWTABLE_PTRS 46 #define PTE_HWTABLE_PTRS 0 45 #define PTE_HWTABLE_PTRS 0 47 #endif 46 #endif 48 47 49 extern unsigned char kasan_early_shadow_page[P 48 extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_P 49 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; 51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_P 50 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; 52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_P 51 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; 53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_P 52 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; 54 53 55 int kasan_populate_early_shadow(const void *sh 54 int kasan_populate_early_shadow(const void *shadow_start, 56 const void *sh 55 const void *shadow_end); 57 56 58 #ifndef kasan_mem_to_shadow << 59 static inline void *kasan_mem_to_shadow(const 57 static inline void *kasan_mem_to_shadow(const void *addr) 60 { 58 { 61 return (void *)((unsigned long)addr >> 59 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 62 + KASAN_SHADOW_OFFSET; 60 + KASAN_SHADOW_OFFSET; 63 } 61 } 64 #endif << 65 62 66 int kasan_add_zero_shadow(void *start, unsigne 63 int kasan_add_zero_shadow(void *start, unsigned long size); 67 void kasan_remove_zero_shadow(void *start, uns 64 void kasan_remove_zero_shadow(void *start, unsigned long size); 68 65 69 /* Enable reporting bugs after kasan_disable_c 66 /* Enable reporting bugs after kasan_disable_current() */ 70 extern void kasan_enable_current(void); 67 extern void kasan_enable_current(void); 71 68 72 /* Disable reporting bugs for current task */ 69 /* Disable reporting bugs for current task */ 73 extern void kasan_disable_current(void); 70 extern void kasan_disable_current(void); 74 71 75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ 72 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 76 73 77 static inline int kasan_add_zero_shadow(void * 74 static inline int kasan_add_zero_shadow(void *start, unsigned long size) 78 { 75 { 79 return 0; 76 return 0; 80 } 77 } 81 static inline void kasan_remove_zero_shadow(vo 78 static inline void kasan_remove_zero_shadow(void *start, 82 unsign 79 unsigned long size) 83 {} 80 {} 84 81 85 static inline void kasan_enable_current(void) 82 static inline void kasan_enable_current(void) {} 86 static inline void kasan_disable_current(void) 83 static inline void kasan_disable_current(void) {} 87 84 88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN 85 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 89 86 90 #ifdef CONFIG_KASAN_HW_TAGS 87 #ifdef CONFIG_KASAN_HW_TAGS 91 88 92 #else /* CONFIG_KASAN_HW_TAGS */ 89 #else /* CONFIG_KASAN_HW_TAGS */ 93 90 94 #endif /* CONFIG_KASAN_HW_TAGS */ 91 #endif /* CONFIG_KASAN_HW_TAGS */ 95 92 96 static inline bool kasan_has_integrated_init(v 93 static inline bool kasan_has_integrated_init(void) 97 { 94 { 98 return kasan_hw_tags_enabled(); 95 return kasan_hw_tags_enabled(); 99 } 96 } 100 97 101 #ifdef CONFIG_KASAN 98 #ifdef CONFIG_KASAN >> 99 >> 100 struct kasan_cache { >> 101 int alloc_meta_offset; >> 102 int free_meta_offset; >> 103 bool is_kmalloc; >> 104 }; >> 105 >> 106 slab_flags_t __kasan_never_merge(void); >> 107 static __always_inline slab_flags_t kasan_never_merge(void) >> 108 { >> 109 if (kasan_enabled()) >> 110 return __kasan_never_merge(); >> 111 return 0; >> 112 } >> 113 102 void __kasan_unpoison_range(const void *addr, 114 void __kasan_unpoison_range(const void *addr, size_t size); 103 static __always_inline void kasan_unpoison_ran 115 static __always_inline void kasan_unpoison_range(const void *addr, size_t size) 104 { 116 { 105 if (kasan_enabled()) 117 if (kasan_enabled()) 106 __kasan_unpoison_range(addr, s 118 __kasan_unpoison_range(addr, size); 107 } 119 } 108 120 109 void __kasan_poison_pages(struct page *page, u 121 void __kasan_poison_pages(struct page *page, unsigned int order, bool init); 110 static __always_inline void kasan_poison_pages 122 static __always_inline void kasan_poison_pages(struct page *page, 111 123 unsigned int order, bool init) 112 { 124 { 113 if (kasan_enabled()) 125 if (kasan_enabled()) 114 __kasan_poison_pages(page, ord 126 __kasan_poison_pages(page, order, init); 115 } 127 } 116 128 117 bool __kasan_unpoison_pages(struct page *page, !! 129 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); 118 static __always_inline bool kasan_unpoison_pag !! 130 static __always_inline void kasan_unpoison_pages(struct page *page, 119 131 unsigned int order, bool init) 120 { 132 { 121 if (kasan_enabled()) 133 if (kasan_enabled()) 122 return __kasan_unpoison_pages( !! 134 __kasan_unpoison_pages(page, order, init); 123 return false; !! 135 } >> 136 >> 137 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, >> 138 slab_flags_t *flags); >> 139 static __always_inline void kasan_cache_create(struct kmem_cache *cache, >> 140 unsigned int *size, slab_flags_t *flags) >> 141 { >> 142 if (kasan_enabled()) >> 143 __kasan_cache_create(cache, size, flags); >> 144 } >> 145 >> 146 void __kasan_cache_create_kmalloc(struct kmem_cache *cache); >> 147 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) >> 148 { >> 149 if (kasan_enabled()) >> 150 __kasan_cache_create_kmalloc(cache); >> 151 } >> 152 >> 153 size_t __kasan_metadata_size(struct kmem_cache *cache); >> 154 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) >> 155 { >> 156 if (kasan_enabled()) >> 157 return __kasan_metadata_size(cache); >> 158 return 0; 124 } 159 } 125 160 126 void __kasan_poison_slab(struct slab *slab); 161 void __kasan_poison_slab(struct slab *slab); 127 static __always_inline void kasan_poison_slab( 162 static __always_inline void kasan_poison_slab(struct slab *slab) 128 { 163 { 129 if (kasan_enabled()) 164 if (kasan_enabled()) 130 __kasan_poison_slab(slab); 165 __kasan_poison_slab(slab); 131 } 166 } 132 167 133 void __kasan_unpoison_new_object(struct kmem_c !! 168 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 134 /** !! 169 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, 135 * kasan_unpoison_new_object - Temporarily unp << 136 * @cache: Cache the object belong to. << 137 * @object: Pointer to the object. << 138 * << 139 * This function is intended for the slab allo << 140 * temporarily unpoisons an object from a newl << 141 * anything else. The object must later be rep << 142 * kasan_poison_new_object(). << 143 */ << 144 static __always_inline void kasan_unpoison_new << 145 170 void *object) 146 { 171 { 147 if (kasan_enabled()) 172 if (kasan_enabled()) 148 __kasan_unpoison_new_object(ca !! 173 __kasan_unpoison_object_data(cache, object); 149 } 174 } 150 175 151 void __kasan_poison_new_object(struct kmem_cac !! 176 void __kasan_poison_object_data(struct kmem_cache *cache, void *object); 152 /** !! 177 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, 153 * kasan_unpoison_new_object - Repoison a new << 154 * @cache: Cache the object belong to. << 155 * @object: Pointer to the object. << 156 * << 157 * This function is intended for the slab allo << 158 * repoisons an object that was previously unp << 159 * kasan_unpoison_new_object() without doing a << 160 */ << 161 static __always_inline void kasan_poison_new_o << 162 178 void *object) 163 { 179 { 164 if (kasan_enabled()) 180 if (kasan_enabled()) 165 __kasan_poison_new_object(cach !! 181 __kasan_poison_object_data(cache, object); 166 } 182 } 167 183 168 void * __must_check __kasan_init_slab_obj(stru 184 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, 169 cons 185 const void *object); 170 static __always_inline void * __must_check kas 186 static __always_inline void * __must_check kasan_init_slab_obj( 171 struct kmem_ca 187 struct kmem_cache *cache, const void *object) 172 { 188 { 173 if (kasan_enabled()) 189 if (kasan_enabled()) 174 return __kasan_init_slab_obj(c 190 return __kasan_init_slab_obj(cache, object); 175 return (void *)object; 191 return (void *)object; 176 } 192 } 177 193 178 bool __kasan_slab_pre_free(struct kmem_cache * !! 194 bool __kasan_slab_free(struct kmem_cache *s, void *object, 179 unsigned long ip); !! 195 unsigned long ip, bool init); 180 /** << 181 * kasan_slab_pre_free - Check whether freeing << 182 * @object: Object to be freed. << 183 * << 184 * This function checks whether freeing the gi << 185 * check for double-free and invalid-free bugs << 186 * << 187 * This function is intended only for use by t << 188 * << 189 * @Return true if freeing the object is unsaf << 190 */ << 191 static __always_inline bool kasan_slab_pre_fre << 192 << 193 { << 194 if (kasan_enabled()) << 195 return __kasan_slab_pre_free(s << 196 return false; << 197 } << 198 << 199 bool __kasan_slab_free(struct kmem_cache *s, v << 200 bool still_accessible); << 201 /** << 202 * kasan_slab_free - Poison, initialize, and q << 203 * @object: Object to be freed. << 204 * @init: Whether to initialize the object. << 205 * @still_accessible: Whether the object conte << 206 * << 207 * This function informs that a slab object ha << 208 * supposed to be accessed anymore, except whe << 209 * (indicating that the object is in a SLAB_TY << 210 * grace period might not have passed yet). << 211 * << 212 * For KASAN modes that have integrated memory << 213 * (kasan_has_integrated_init() == true), this << 214 * the object's memory. For other modes, the @ << 215 * << 216 * This function might also take ownership of << 217 * When this happens, KASAN will defer freeing << 218 * stage and handle it internally until then. << 219 * whether KASAN took ownership of the object. << 220 * << 221 * This function is intended only for use by t << 222 * << 223 * @Return true if KASAN took ownership of the << 224 */ << 225 static __always_inline bool kasan_slab_free(st 196 static __always_inline bool kasan_slab_free(struct kmem_cache *s, 226 !! 197 void *object, bool init) 227 << 228 { 198 { 229 if (kasan_enabled()) 199 if (kasan_enabled()) 230 return __kasan_slab_free(s, ob !! 200 return __kasan_slab_free(s, object, _RET_IP_, init); 231 return false; 201 return false; 232 } 202 } 233 203 234 void __kasan_kfree_large(void *ptr, unsigned l 204 void __kasan_kfree_large(void *ptr, unsigned long ip); 235 static __always_inline void kasan_kfree_large( 205 static __always_inline void kasan_kfree_large(void *ptr) 236 { 206 { 237 if (kasan_enabled()) 207 if (kasan_enabled()) 238 __kasan_kfree_large(ptr, _RET_ 208 __kasan_kfree_large(ptr, _RET_IP_); 239 } 209 } 240 210 >> 211 void __kasan_slab_free_mempool(void *ptr, unsigned long ip); >> 212 static __always_inline void kasan_slab_free_mempool(void *ptr) >> 213 { >> 214 if (kasan_enabled()) >> 215 __kasan_slab_free_mempool(ptr, _RET_IP_); >> 216 } >> 217 241 void * __must_check __kasan_slab_alloc(struct 218 void * __must_check __kasan_slab_alloc(struct kmem_cache *s, 242 void *o 219 void *object, gfp_t flags, bool init); 243 static __always_inline void * __must_check kas 220 static __always_inline void * __must_check kasan_slab_alloc( 244 struct kmem_cache *s, void *ob 221 struct kmem_cache *s, void *object, gfp_t flags, bool init) 245 { 222 { 246 if (kasan_enabled()) 223 if (kasan_enabled()) 247 return __kasan_slab_alloc(s, o 224 return __kasan_slab_alloc(s, object, flags, init); 248 return object; 225 return object; 249 } 226 } 250 227 251 void * __must_check __kasan_kmalloc(struct kme 228 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, 252 size_t siz 229 size_t size, gfp_t flags); 253 static __always_inline void * __must_check kas 230 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, 254 const void *ob 231 const void *object, size_t size, gfp_t flags) 255 { 232 { 256 if (kasan_enabled()) 233 if (kasan_enabled()) 257 return __kasan_kmalloc(s, obje 234 return __kasan_kmalloc(s, object, size, flags); 258 return (void *)object; 235 return (void *)object; 259 } 236 } 260 237 261 void * __must_check __kasan_kmalloc_large(cons 238 void * __must_check __kasan_kmalloc_large(const void *ptr, 262 size 239 size_t size, gfp_t flags); 263 static __always_inline void * __must_check kas 240 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, 264 241 size_t size, gfp_t flags) 265 { 242 { 266 if (kasan_enabled()) 243 if (kasan_enabled()) 267 return __kasan_kmalloc_large(p 244 return __kasan_kmalloc_large(ptr, size, flags); 268 return (void *)ptr; 245 return (void *)ptr; 269 } 246 } 270 247 271 void * __must_check __kasan_krealloc(const voi 248 void * __must_check __kasan_krealloc(const void *object, 272 size_t ne 249 size_t new_size, gfp_t flags); 273 static __always_inline void * __must_check kas 250 static __always_inline void * __must_check kasan_krealloc(const void *object, 274 251 size_t new_size, gfp_t flags) 275 { 252 { 276 if (kasan_enabled()) 253 if (kasan_enabled()) 277 return __kasan_krealloc(object 254 return __kasan_krealloc(object, new_size, flags); 278 return (void *)object; 255 return (void *)object; 279 } 256 } 280 257 281 bool __kasan_mempool_poison_pages(struct page << 282 unsigned lon << 283 /** << 284 * kasan_mempool_poison_pages - Check and pois << 285 * @page: Pointer to the page allocation. << 286 * @order: Order of the allocation. << 287 * << 288 * This function is intended for kernel subsys << 289 * to reuse them instead of freeing them back << 290 * << 291 * This function is similar to kasan_mempool_p << 292 * page allocations. << 293 * << 294 * Before the poisoned allocation can be reuse << 295 * kasan_mempool_unpoison_pages(). << 296 * << 297 * Return: true if the allocation can be safel << 298 */ << 299 static __always_inline bool kasan_mempool_pois << 300 << 301 { << 302 if (kasan_enabled()) << 303 return __kasan_mempool_poison_ << 304 return true; << 305 } << 306 << 307 void __kasan_mempool_unpoison_pages(struct pag << 308 unsigned l << 309 /** << 310 * kasan_mempool_unpoison_pages - Unpoison a m << 311 * @page: Pointer to the page allocation. << 312 * @order: Order of the allocation. << 313 * << 314 * This function is intended for kernel subsys << 315 * to reuse them instead of freeing them back << 316 * << 317 * This function unpoisons a page allocation t << 318 * kasan_mempool_poison_pages() without zeroin << 319 * the tag-based modes, this function assigns << 320 */ << 321 static __always_inline void kasan_mempool_unpo << 322 << 323 { << 324 if (kasan_enabled()) << 325 __kasan_mempool_unpoison_pages << 326 } << 327 << 328 bool __kasan_mempool_poison_object(void *ptr, << 329 /** << 330 * kasan_mempool_poison_object - Check and poi << 331 * @ptr: Pointer to the slab allocation. << 332 * << 333 * This function is intended for kernel subsys << 334 * to reuse them instead of freeing them back << 335 * mempool). << 336 * << 337 * This function poisons a slab allocation and << 338 * without initializing the allocation's memor << 339 * quarantine (for the Generic mode). << 340 * << 341 * This function also performs checks to detec << 342 * bugs and reports them. The caller can use t << 343 * to find out if the allocation is buggy. << 344 * << 345 * Before the poisoned allocation can be reuse << 346 * kasan_mempool_unpoison_object(). << 347 * << 348 * This function operates on all slab allocati << 349 * allocations (the ones returned by kmalloc_l << 350 * size > KMALLOC_MAX_SIZE). << 351 * << 352 * Return: true if the allocation can be safel << 353 */ << 354 static __always_inline bool kasan_mempool_pois << 355 { << 356 if (kasan_enabled()) << 357 return __kasan_mempool_poison_ << 358 return true; << 359 } << 360 << 361 void __kasan_mempool_unpoison_object(void *ptr << 362 /** << 363 * kasan_mempool_unpoison_object - Unpoison a << 364 * @ptr: Pointer to the slab allocation. << 365 * @size: Size to be unpoisoned. << 366 * << 367 * This function is intended for kernel subsys << 368 * to reuse them instead of freeing them back << 369 * mempool). << 370 * << 371 * This function unpoisons a slab allocation t << 372 * kasan_mempool_poison_object() and saves an << 373 * initializing the allocation's memory. For t << 374 * does not assign a new tag to the allocation << 375 * original tags based on the pointer value. << 376 * << 377 * This function operates on all slab allocati << 378 * allocations (the ones returned by kmalloc_l << 379 * size > KMALLOC_MAX_SIZE). << 380 */ << 381 static __always_inline void kasan_mempool_unpo << 382 << 383 { << 384 if (kasan_enabled()) << 385 __kasan_mempool_unpoison_objec << 386 } << 387 << 388 /* 258 /* 389 * Unlike kasan_check_read/write(), kasan_chec 259 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for 390 * the hardware tag-based mode that doesn't re 260 * the hardware tag-based mode that doesn't rely on compiler instrumentation. 391 */ 261 */ 392 bool __kasan_check_byte(const void *addr, unsi 262 bool __kasan_check_byte(const void *addr, unsigned long ip); 393 static __always_inline bool kasan_check_byte(c 263 static __always_inline bool kasan_check_byte(const void *addr) 394 { 264 { 395 if (kasan_enabled()) 265 if (kasan_enabled()) 396 return __kasan_check_byte(addr 266 return __kasan_check_byte(addr, _RET_IP_); 397 return true; 267 return true; 398 } 268 } 399 269 400 #else /* CONFIG_KASAN */ 270 #else /* CONFIG_KASAN */ 401 271 >> 272 static inline slab_flags_t kasan_never_merge(void) >> 273 { >> 274 return 0; >> 275 } 402 static inline void kasan_unpoison_range(const 276 static inline void kasan_unpoison_range(const void *address, size_t size) {} 403 static inline void kasan_poison_pages(struct p 277 static inline void kasan_poison_pages(struct page *page, unsigned int order, 404 bool ini 278 bool init) {} 405 static inline bool kasan_unpoison_pages(struct !! 279 static inline void kasan_unpoison_pages(struct page *page, unsigned int order, 406 bool i !! 280 bool init) {} 407 { !! 281 static inline void kasan_cache_create(struct kmem_cache *cache, 408 return false; !! 282 unsigned int *size, 409 } !! 283 slab_flags_t *flags) {} >> 284 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} >> 285 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } 410 static inline void kasan_poison_slab(struct sl 286 static inline void kasan_poison_slab(struct slab *slab) {} 411 static inline void kasan_unpoison_new_object(s !! 287 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 412 void * 288 void *object) {} 413 static inline void kasan_poison_new_object(str !! 289 static inline void kasan_poison_object_data(struct kmem_cache *cache, 414 void * 290 void *object) {} 415 static inline void *kasan_init_slab_obj(struct 291 static inline void *kasan_init_slab_obj(struct kmem_cache *cache, 416 const void *ob 292 const void *object) 417 { 293 { 418 return (void *)object; 294 return (void *)object; 419 } 295 } 420 !! 296 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) 421 static inline bool kasan_slab_pre_free(struct << 422 { << 423 return false; << 424 } << 425 << 426 static inline bool kasan_slab_free(struct kmem << 427 bool init, << 428 { 297 { 429 return false; 298 return false; 430 } 299 } 431 static inline void kasan_kfree_large(void *ptr 300 static inline void kasan_kfree_large(void *ptr) {} >> 301 static inline void kasan_slab_free_mempool(void *ptr) {} 432 static inline void *kasan_slab_alloc(struct km 302 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, 433 gfp_t flags 303 gfp_t flags, bool init) 434 { 304 { 435 return object; 305 return object; 436 } 306 } 437 static inline void *kasan_kmalloc(struct kmem_ 307 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, 438 size_t size, g 308 size_t size, gfp_t flags) 439 { 309 { 440 return (void *)object; 310 return (void *)object; 441 } 311 } 442 static inline void *kasan_kmalloc_large(const 312 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) 443 { 313 { 444 return (void *)ptr; 314 return (void *)ptr; 445 } 315 } 446 static inline void *kasan_krealloc(const void 316 static inline void *kasan_krealloc(const void *object, size_t new_size, 447 gfp_t flags) 317 gfp_t flags) 448 { 318 { 449 return (void *)object; 319 return (void *)object; 450 } 320 } 451 static inline bool kasan_mempool_poison_pages( << 452 { << 453 return true; << 454 } << 455 static inline void kasan_mempool_unpoison_page << 456 static inline bool kasan_mempool_poison_object << 457 { << 458 return true; << 459 } << 460 static inline void kasan_mempool_unpoison_obje << 461 << 462 static inline bool kasan_check_byte(const void 321 static inline bool kasan_check_byte(const void *address) 463 { 322 { 464 return true; 323 return true; 465 } 324 } 466 325 467 #endif /* CONFIG_KASAN */ 326 #endif /* CONFIG_KASAN */ 468 327 469 #if defined(CONFIG_KASAN) && defined(CONFIG_KA 328 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) 470 void kasan_unpoison_task_stack(struct task_str 329 void kasan_unpoison_task_stack(struct task_struct *task); 471 asmlinkage void kasan_unpoison_task_stack_belo << 472 #else 330 #else 473 static inline void kasan_unpoison_task_stack(s 331 static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 474 static inline void kasan_unpoison_task_stack_b << 475 #endif 332 #endif 476 333 477 #ifdef CONFIG_KASAN_GENERIC 334 #ifdef CONFIG_KASAN_GENERIC 478 335 479 struct kasan_cache { << 480 int alloc_meta_offset; << 481 int free_meta_offset; << 482 }; << 483 << 484 size_t kasan_metadata_size(struct kmem_cache * << 485 void kasan_cache_create(struct kmem_cache *cac << 486 slab_flags_t *flags); << 487 << 488 void kasan_cache_shrink(struct kmem_cache *cac 336 void kasan_cache_shrink(struct kmem_cache *cache); 489 void kasan_cache_shutdown(struct kmem_cache *c 337 void kasan_cache_shutdown(struct kmem_cache *cache); 490 void kasan_record_aux_stack(void *ptr); 338 void kasan_record_aux_stack(void *ptr); 491 void kasan_record_aux_stack_noalloc(void *ptr) 339 void kasan_record_aux_stack_noalloc(void *ptr); 492 340 493 #else /* CONFIG_KASAN_GENERIC */ 341 #else /* CONFIG_KASAN_GENERIC */ 494 342 495 /* Tag-based KASAN modes do not use per-object << 496 static inline size_t kasan_metadata_size(struc << 497 << 498 { << 499 return 0; << 500 } << 501 /* And no cache-related metadata initializatio << 502 static inline void kasan_cache_create(struct k << 503 unsigned << 504 slab_fla << 505 << 506 static inline void kasan_cache_shrink(struct k 343 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 507 static inline void kasan_cache_shutdown(struct 344 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 508 static inline void kasan_record_aux_stack(void 345 static inline void kasan_record_aux_stack(void *ptr) {} 509 static inline void kasan_record_aux_stack_noal 346 static inline void kasan_record_aux_stack_noalloc(void *ptr) {} 510 347 511 #endif /* CONFIG_KASAN_GENERIC */ 348 #endif /* CONFIG_KASAN_GENERIC */ 512 349 513 #if defined(CONFIG_KASAN_SW_TAGS) || defined(C 350 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 514 351 515 static inline void *kasan_reset_tag(const void 352 static inline void *kasan_reset_tag(const void *addr) 516 { 353 { 517 return (void *)arch_kasan_reset_tag(ad 354 return (void *)arch_kasan_reset_tag(addr); 518 } 355 } 519 356 520 /** 357 /** 521 * kasan_report - print a report about a bad m 358 * kasan_report - print a report about a bad memory access detected by KASAN 522 * @addr: address of the bad access 359 * @addr: address of the bad access 523 * @size: size of the bad access 360 * @size: size of the bad access 524 * @is_write: whether the bad access is a writ 361 * @is_write: whether the bad access is a write or a read 525 * @ip: instruction pointer for the accessibil 362 * @ip: instruction pointer for the accessibility check or the bad access itself 526 */ 363 */ 527 bool kasan_report(const void *addr, size_t siz !! 364 bool kasan_report(unsigned long addr, size_t size, 528 bool is_write, unsigned long i 365 bool is_write, unsigned long ip); 529 366 530 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_ 367 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 531 368 532 static inline void *kasan_reset_tag(const void 369 static inline void *kasan_reset_tag(const void *addr) 533 { 370 { 534 return (void *)addr; 371 return (void *)addr; 535 } 372 } 536 373 537 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN 374 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ 538 375 539 #ifdef CONFIG_KASAN_HW_TAGS 376 #ifdef CONFIG_KASAN_HW_TAGS 540 377 541 void kasan_report_async(void); 378 void kasan_report_async(void); 542 379 543 #endif /* CONFIG_KASAN_HW_TAGS */ 380 #endif /* CONFIG_KASAN_HW_TAGS */ 544 381 545 #ifdef CONFIG_KASAN_SW_TAGS 382 #ifdef CONFIG_KASAN_SW_TAGS 546 void __init kasan_init_sw_tags(void); 383 void __init kasan_init_sw_tags(void); 547 #else 384 #else 548 static inline void kasan_init_sw_tags(void) { 385 static inline void kasan_init_sw_tags(void) { } 549 #endif 386 #endif 550 387 551 #ifdef CONFIG_KASAN_HW_TAGS 388 #ifdef CONFIG_KASAN_HW_TAGS 552 void kasan_init_hw_tags_cpu(void); 389 void kasan_init_hw_tags_cpu(void); 553 void __init kasan_init_hw_tags(void); 390 void __init kasan_init_hw_tags(void); 554 #else 391 #else 555 static inline void kasan_init_hw_tags_cpu(void 392 static inline void kasan_init_hw_tags_cpu(void) { } 556 static inline void kasan_init_hw_tags(void) { 393 static inline void kasan_init_hw_tags(void) { } 557 #endif 394 #endif 558 395 559 #ifdef CONFIG_KASAN_VMALLOC 396 #ifdef CONFIG_KASAN_VMALLOC 560 397 561 #if defined(CONFIG_KASAN_GENERIC) || defined(C 398 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 562 399 563 void kasan_populate_early_vm_area_shadow(void 400 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); 564 int kasan_populate_vmalloc(unsigned long addr, 401 int kasan_populate_vmalloc(unsigned long addr, unsigned long size); 565 void kasan_release_vmalloc(unsigned long start 402 void kasan_release_vmalloc(unsigned long start, unsigned long end, 566 unsigned long free_ 403 unsigned long free_region_start, 567 unsigned long free_ 404 unsigned long free_region_end); 568 405 569 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ 406 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 570 407 571 static inline void kasan_populate_early_vm_are 408 static inline void kasan_populate_early_vm_area_shadow(void *start, 572 409 unsigned long size) 573 { } 410 { } 574 static inline int kasan_populate_vmalloc(unsig 411 static inline int kasan_populate_vmalloc(unsigned long start, 575 unsign 412 unsigned long size) 576 { 413 { 577 return 0; 414 return 0; 578 } 415 } 579 static inline void kasan_release_vmalloc(unsig 416 static inline void kasan_release_vmalloc(unsigned long start, 580 unsig 417 unsigned long end, 581 unsig 418 unsigned long free_region_start, 582 unsig 419 unsigned long free_region_end) { } 583 420 584 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN 421 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 585 422 586 void *__kasan_unpoison_vmalloc(const void *sta 423 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, 587 kasan_vmalloc_f 424 kasan_vmalloc_flags_t flags); 588 static __always_inline void *kasan_unpoison_vm 425 static __always_inline void *kasan_unpoison_vmalloc(const void *start, 589 426 unsigned long size, 590 427 kasan_vmalloc_flags_t flags) 591 { 428 { 592 if (kasan_enabled()) 429 if (kasan_enabled()) 593 return __kasan_unpoison_vmallo 430 return __kasan_unpoison_vmalloc(start, size, flags); 594 return (void *)start; 431 return (void *)start; 595 } 432 } 596 433 597 void __kasan_poison_vmalloc(const void *start, 434 void __kasan_poison_vmalloc(const void *start, unsigned long size); 598 static __always_inline void kasan_poison_vmall 435 static __always_inline void kasan_poison_vmalloc(const void *start, 599 436 unsigned long size) 600 { 437 { 601 if (kasan_enabled()) 438 if (kasan_enabled()) 602 __kasan_poison_vmalloc(start, 439 __kasan_poison_vmalloc(start, size); 603 } 440 } 604 441 605 #else /* CONFIG_KASAN_VMALLOC */ 442 #else /* CONFIG_KASAN_VMALLOC */ 606 443 607 static inline void kasan_populate_early_vm_are 444 static inline void kasan_populate_early_vm_area_shadow(void *start, 608 445 unsigned long size) { } 609 static inline int kasan_populate_vmalloc(unsig 446 static inline int kasan_populate_vmalloc(unsigned long start, 610 unsign 447 unsigned long size) 611 { 448 { 612 return 0; 449 return 0; 613 } 450 } 614 static inline void kasan_release_vmalloc(unsig 451 static inline void kasan_release_vmalloc(unsigned long start, 615 unsig 452 unsigned long end, 616 unsig 453 unsigned long free_region_start, 617 unsig 454 unsigned long free_region_end) { } 618 455 619 static inline void *kasan_unpoison_vmalloc(con 456 static inline void *kasan_unpoison_vmalloc(const void *start, 620 uns 457 unsigned long size, 621 kas 458 kasan_vmalloc_flags_t flags) 622 { 459 { 623 return (void *)start; 460 return (void *)start; 624 } 461 } 625 static inline void kasan_poison_vmalloc(const 462 static inline void kasan_poison_vmalloc(const void *start, unsigned long size) 626 { } 463 { } 627 464 628 #endif /* CONFIG_KASAN_VMALLOC */ 465 #endif /* CONFIG_KASAN_VMALLOC */ 629 466 630 #if (defined(CONFIG_KASAN_GENERIC) || defined( 467 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 631 !defined(CONFIG_KASAN_VMALLOC) 468 !defined(CONFIG_KASAN_VMALLOC) 632 469 633 /* 470 /* 634 * These functions allocate and free shadow me 471 * These functions allocate and free shadow memory for kernel modules. 635 * They are only required when KASAN_VMALLOC i 472 * They are only required when KASAN_VMALLOC is not supported, as otherwise 636 * shadow memory is allocated by the generic v 473 * shadow memory is allocated by the generic vmalloc handlers. 637 */ 474 */ 638 int kasan_alloc_module_shadow(void *addr, size 475 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask); 639 void kasan_free_module_shadow(const struct vm_ 476 void kasan_free_module_shadow(const struct vm_struct *vm); 640 477 641 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN 478 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 642 479 643 static inline int kasan_alloc_module_shadow(vo 480 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } 644 static inline void kasan_free_module_shadow(co 481 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} 645 482 646 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASA 483 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 647 484 648 #if defined(CONFIG_KASAN_GENERIC) || defined(C !! 485 #ifdef CONFIG_KASAN_INLINE 649 void kasan_non_canonical_hook(unsigned long ad 486 void kasan_non_canonical_hook(unsigned long addr); 650 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_ !! 487 #else /* CONFIG_KASAN_INLINE */ 651 static inline void kasan_non_canonical_hook(un 488 static inline void kasan_non_canonical_hook(unsigned long addr) { } 652 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN !! 489 #endif /* CONFIG_KASAN_INLINE */ 653 490 654 #endif /* LINUX_KASAN_H */ 491 #endif /* LINUX_KASAN_H */ 655 492
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.