~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/kasan.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/kasan.h (Architecture sparc) and /include/linux/kasan.h (Architecture alpha)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_KASAN_H                              2 #ifndef _LINUX_KASAN_H
  3 #define _LINUX_KASAN_H                              3 #define _LINUX_KASAN_H
  4                                                     4 
  5 #include <linux/bug.h>                              5 #include <linux/bug.h>
  6 #include <linux/kasan-enabled.h>                    6 #include <linux/kasan-enabled.h>
  7 #include <linux/kasan-tags.h>                       7 #include <linux/kasan-tags.h>
  8 #include <linux/kernel.h>                           8 #include <linux/kernel.h>
  9 #include <linux/static_key.h>                       9 #include <linux/static_key.h>
 10 #include <linux/types.h>                           10 #include <linux/types.h>
 11                                                    11 
 12 struct kmem_cache;                                 12 struct kmem_cache;
 13 struct page;                                       13 struct page;
 14 struct slab;                                       14 struct slab;
 15 struct vm_struct;                                  15 struct vm_struct;
 16 struct task_struct;                                16 struct task_struct;
 17                                                    17 
 18 #ifdef CONFIG_KASAN                                18 #ifdef CONFIG_KASAN
 19                                                    19 
 20 #include <linux/linkage.h>                         20 #include <linux/linkage.h>
 21 #include <asm/kasan.h>                             21 #include <asm/kasan.h>
 22                                                    22 
 23 #endif                                             23 #endif
 24                                                    24 
 25 typedef unsigned int __bitwise kasan_vmalloc_f     25 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
 26                                                    26 
 27 #define KASAN_VMALLOC_NONE              ((__fo     27 #define KASAN_VMALLOC_NONE              ((__force kasan_vmalloc_flags_t)0x00u)
 28 #define KASAN_VMALLOC_INIT              ((__fo     28 #define KASAN_VMALLOC_INIT              ((__force kasan_vmalloc_flags_t)0x01u)
 29 #define KASAN_VMALLOC_VM_ALLOC          ((__fo     29 #define KASAN_VMALLOC_VM_ALLOC          ((__force kasan_vmalloc_flags_t)0x02u)
 30 #define KASAN_VMALLOC_PROT_NORMAL       ((__fo     30 #define KASAN_VMALLOC_PROT_NORMAL       ((__force kasan_vmalloc_flags_t)0x04u)
 31                                                    31 
 32 #if defined(CONFIG_KASAN_GENERIC) || defined(C     32 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 33                                                    33 
 34 #include <linux/pgtable.h>                         34 #include <linux/pgtable.h>
 35                                                    35 
 36 /* Software KASAN implementations use shadow m     36 /* Software KASAN implementations use shadow memory. */
 37                                                    37 
 38 #ifdef CONFIG_KASAN_SW_TAGS                        38 #ifdef CONFIG_KASAN_SW_TAGS
 39 /* This matches KASAN_TAG_INVALID. */              39 /* This matches KASAN_TAG_INVALID. */
 40 #define KASAN_SHADOW_INIT 0xFE                     40 #define KASAN_SHADOW_INIT 0xFE
 41 #else                                              41 #else
 42 #define KASAN_SHADOW_INIT 0                        42 #define KASAN_SHADOW_INIT 0
 43 #endif                                             43 #endif
 44                                                    44 
 45 #ifndef PTE_HWTABLE_PTRS                           45 #ifndef PTE_HWTABLE_PTRS
 46 #define PTE_HWTABLE_PTRS 0                         46 #define PTE_HWTABLE_PTRS 0
 47 #endif                                             47 #endif
 48                                                    48 
 49 extern unsigned char kasan_early_shadow_page[P     49 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
 50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_P     50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
 51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_P     51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
 52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_P     52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
 53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_P     53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
 54                                                    54 
 55 int kasan_populate_early_shadow(const void *sh     55 int kasan_populate_early_shadow(const void *shadow_start,
 56                                 const void *sh     56                                 const void *shadow_end);
 57                                                    57 
 58 #ifndef kasan_mem_to_shadow                        58 #ifndef kasan_mem_to_shadow
 59 static inline void *kasan_mem_to_shadow(const      59 static inline void *kasan_mem_to_shadow(const void *addr)
 60 {                                                  60 {
 61         return (void *)((unsigned long)addr >>     61         return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
 62                 + KASAN_SHADOW_OFFSET;             62                 + KASAN_SHADOW_OFFSET;
 63 }                                                  63 }
 64 #endif                                             64 #endif
 65                                                    65 
 66 int kasan_add_zero_shadow(void *start, unsigne     66 int kasan_add_zero_shadow(void *start, unsigned long size);
 67 void kasan_remove_zero_shadow(void *start, uns     67 void kasan_remove_zero_shadow(void *start, unsigned long size);
 68                                                    68 
 69 /* Enable reporting bugs after kasan_disable_c     69 /* Enable reporting bugs after kasan_disable_current() */
 70 extern void kasan_enable_current(void);            70 extern void kasan_enable_current(void);
 71                                                    71 
 72 /* Disable reporting bugs for current task */      72 /* Disable reporting bugs for current task */
 73 extern void kasan_disable_current(void);           73 extern void kasan_disable_current(void);
 74                                                    74 
 75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_     75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 76                                                    76 
 77 static inline int kasan_add_zero_shadow(void *     77 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
 78 {                                                  78 {
 79         return 0;                                  79         return 0;
 80 }                                                  80 }
 81 static inline void kasan_remove_zero_shadow(vo     81 static inline void kasan_remove_zero_shadow(void *start,
 82                                         unsign     82                                         unsigned long size)
 83 {}                                                 83 {}
 84                                                    84 
 85 static inline void kasan_enable_current(void)      85 static inline void kasan_enable_current(void) {}
 86 static inline void kasan_disable_current(void)     86 static inline void kasan_disable_current(void) {}
 87                                                    87 
 88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN     88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 89                                                    89 
 90 #ifdef CONFIG_KASAN_HW_TAGS                        90 #ifdef CONFIG_KASAN_HW_TAGS
 91                                                    91 
 92 #else /* CONFIG_KASAN_HW_TAGS */                   92 #else /* CONFIG_KASAN_HW_TAGS */
 93                                                    93 
 94 #endif /* CONFIG_KASAN_HW_TAGS */                  94 #endif /* CONFIG_KASAN_HW_TAGS */
 95                                                    95 
 96 static inline bool kasan_has_integrated_init(v     96 static inline bool kasan_has_integrated_init(void)
 97 {                                                  97 {
 98         return kasan_hw_tags_enabled();            98         return kasan_hw_tags_enabled();
 99 }                                                  99 }
100                                                   100 
101 #ifdef CONFIG_KASAN                               101 #ifdef CONFIG_KASAN
102 void __kasan_unpoison_range(const void *addr,     102 void __kasan_unpoison_range(const void *addr, size_t size);
103 static __always_inline void kasan_unpoison_ran    103 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
104 {                                                 104 {
105         if (kasan_enabled())                      105         if (kasan_enabled())
106                 __kasan_unpoison_range(addr, s    106                 __kasan_unpoison_range(addr, size);
107 }                                                 107 }
108                                                   108 
109 void __kasan_poison_pages(struct page *page, u    109 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
110 static __always_inline void kasan_poison_pages    110 static __always_inline void kasan_poison_pages(struct page *page,
111                                                   111                                                 unsigned int order, bool init)
112 {                                                 112 {
113         if (kasan_enabled())                      113         if (kasan_enabled())
114                 __kasan_poison_pages(page, ord    114                 __kasan_poison_pages(page, order, init);
115 }                                                 115 }
116                                                   116 
117 bool __kasan_unpoison_pages(struct page *page,    117 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
118 static __always_inline bool kasan_unpoison_pag    118 static __always_inline bool kasan_unpoison_pages(struct page *page,
119                                                   119                                                  unsigned int order, bool init)
120 {                                                 120 {
121         if (kasan_enabled())                      121         if (kasan_enabled())
122                 return __kasan_unpoison_pages(    122                 return __kasan_unpoison_pages(page, order, init);
123         return false;                             123         return false;
124 }                                                 124 }
125                                                   125 
126 void __kasan_poison_slab(struct slab *slab);      126 void __kasan_poison_slab(struct slab *slab);
127 static __always_inline void kasan_poison_slab(    127 static __always_inline void kasan_poison_slab(struct slab *slab)
128 {                                                 128 {
129         if (kasan_enabled())                      129         if (kasan_enabled())
130                 __kasan_poison_slab(slab);        130                 __kasan_poison_slab(slab);
131 }                                                 131 }
132                                                   132 
133 void __kasan_unpoison_new_object(struct kmem_c    133 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
134 /**                                               134 /**
135  * kasan_unpoison_new_object - Temporarily unp    135  * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
136  * @cache: Cache the object belong to.            136  * @cache: Cache the object belong to.
137  * @object: Pointer to the object.                137  * @object: Pointer to the object.
138  *                                                138  *
139  * This function is intended for the slab allo    139  * This function is intended for the slab allocator's internal use. It
140  * temporarily unpoisons an object from a newl    140  * temporarily unpoisons an object from a newly allocated slab without doing
141  * anything else. The object must later be rep    141  * anything else. The object must later be repoisoned by
142  * kasan_poison_new_object().                     142  * kasan_poison_new_object().
143  */                                               143  */
144 static __always_inline void kasan_unpoison_new    144 static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
145                                                   145                                                         void *object)
146 {                                                 146 {
147         if (kasan_enabled())                      147         if (kasan_enabled())
148                 __kasan_unpoison_new_object(ca    148                 __kasan_unpoison_new_object(cache, object);
149 }                                                 149 }
150                                                   150 
151 void __kasan_poison_new_object(struct kmem_cac    151 void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
152 /**                                               152 /**
153  * kasan_unpoison_new_object - Repoison a new     153  * kasan_unpoison_new_object - Repoison a new slab object.
154  * @cache: Cache the object belong to.            154  * @cache: Cache the object belong to.
155  * @object: Pointer to the object.                155  * @object: Pointer to the object.
156  *                                                156  *
157  * This function is intended for the slab allo    157  * This function is intended for the slab allocator's internal use. It
158  * repoisons an object that was previously unp    158  * repoisons an object that was previously unpoisoned by
159  * kasan_unpoison_new_object() without doing a    159  * kasan_unpoison_new_object() without doing anything else.
160  */                                               160  */
161 static __always_inline void kasan_poison_new_o    161 static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
162                                                   162                                                         void *object)
163 {                                                 163 {
164         if (kasan_enabled())                      164         if (kasan_enabled())
165                 __kasan_poison_new_object(cach    165                 __kasan_poison_new_object(cache, object);
166 }                                                 166 }
167                                                   167 
168 void * __must_check __kasan_init_slab_obj(stru    168 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
169                                           cons    169                                           const void *object);
170 static __always_inline void * __must_check kas    170 static __always_inline void * __must_check kasan_init_slab_obj(
171                                 struct kmem_ca    171                                 struct kmem_cache *cache, const void *object)
172 {                                                 172 {
173         if (kasan_enabled())                      173         if (kasan_enabled())
174                 return __kasan_init_slab_obj(c    174                 return __kasan_init_slab_obj(cache, object);
175         return (void *)object;                    175         return (void *)object;
176 }                                                 176 }
177                                                   177 
178 bool __kasan_slab_pre_free(struct kmem_cache *    178 bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
179                         unsigned long ip);        179                         unsigned long ip);
180 /**                                               180 /**
181  * kasan_slab_pre_free - Check whether freeing    181  * kasan_slab_pre_free - Check whether freeing a slab object is safe.
182  * @object: Object to be freed.                   182  * @object: Object to be freed.
183  *                                                183  *
184  * This function checks whether freeing the gi    184  * This function checks whether freeing the given object is safe. It may
185  * check for double-free and invalid-free bugs    185  * check for double-free and invalid-free bugs and report them.
186  *                                                186  *
187  * This function is intended only for use by t    187  * This function is intended only for use by the slab allocator.
188  *                                                188  *
189  * @Return true if freeing the object is unsaf    189  * @Return true if freeing the object is unsafe; false otherwise.
190  */                                               190  */
191 static __always_inline bool kasan_slab_pre_fre    191 static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
192                                                   192                                                 void *object)
193 {                                                 193 {
194         if (kasan_enabled())                      194         if (kasan_enabled())
195                 return __kasan_slab_pre_free(s    195                 return __kasan_slab_pre_free(s, object, _RET_IP_);
196         return false;                             196         return false;
197 }                                                 197 }
198                                                   198 
199 bool __kasan_slab_free(struct kmem_cache *s, v    199 bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
200                        bool still_accessible);    200                        bool still_accessible);
201 /**                                               201 /**
202  * kasan_slab_free - Poison, initialize, and q    202  * kasan_slab_free - Poison, initialize, and quarantine a slab object.
203  * @object: Object to be freed.                   203  * @object: Object to be freed.
204  * @init: Whether to initialize the object.       204  * @init: Whether to initialize the object.
205  * @still_accessible: Whether the object conte    205  * @still_accessible: Whether the object contents are still accessible.
206  *                                                206  *
207  * This function informs that a slab object ha    207  * This function informs that a slab object has been freed and is not
208  * supposed to be accessed anymore, except whe    208  * supposed to be accessed anymore, except when @still_accessible is set
209  * (indicating that the object is in a SLAB_TY    209  * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
210  * grace period might not have passed yet).       210  * grace period might not have passed yet).
211  *                                                211  *
212  * For KASAN modes that have integrated memory    212  * For KASAN modes that have integrated memory initialization
213  * (kasan_has_integrated_init() == true), this    213  * (kasan_has_integrated_init() == true), this function also initializes
214  * the object's memory. For other modes, the @    214  * the object's memory. For other modes, the @init argument is ignored.
215  *                                                215  *
216  * This function might also take ownership of     216  * This function might also take ownership of the object to quarantine it.
217  * When this happens, KASAN will defer freeing    217  * When this happens, KASAN will defer freeing the object to a later
218  * stage and handle it internally until then.     218  * stage and handle it internally until then. The return value indicates
219  * whether KASAN took ownership of the object.    219  * whether KASAN took ownership of the object.
220  *                                                220  *
221  * This function is intended only for use by t    221  * This function is intended only for use by the slab allocator.
222  *                                                222  *
223  * @Return true if KASAN took ownership of the    223  * @Return true if KASAN took ownership of the object; false otherwise.
224  */                                               224  */
225 static __always_inline bool kasan_slab_free(st    225 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
226                                                   226                                                 void *object, bool init,
227                                                   227                                                 bool still_accessible)
228 {                                                 228 {
229         if (kasan_enabled())                      229         if (kasan_enabled())
230                 return __kasan_slab_free(s, ob    230                 return __kasan_slab_free(s, object, init, still_accessible);
231         return false;                             231         return false;
232 }                                                 232 }
233                                                   233 
234 void __kasan_kfree_large(void *ptr, unsigned l    234 void __kasan_kfree_large(void *ptr, unsigned long ip);
235 static __always_inline void kasan_kfree_large(    235 static __always_inline void kasan_kfree_large(void *ptr)
236 {                                                 236 {
237         if (kasan_enabled())                      237         if (kasan_enabled())
238                 __kasan_kfree_large(ptr, _RET_    238                 __kasan_kfree_large(ptr, _RET_IP_);
239 }                                                 239 }
240                                                   240 
241 void * __must_check __kasan_slab_alloc(struct     241 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
242                                        void *o    242                                        void *object, gfp_t flags, bool init);
243 static __always_inline void * __must_check kas    243 static __always_inline void * __must_check kasan_slab_alloc(
244                 struct kmem_cache *s, void *ob    244                 struct kmem_cache *s, void *object, gfp_t flags, bool init)
245 {                                                 245 {
246         if (kasan_enabled())                      246         if (kasan_enabled())
247                 return __kasan_slab_alloc(s, o    247                 return __kasan_slab_alloc(s, object, flags, init);
248         return object;                            248         return object;
249 }                                                 249 }
250                                                   250 
251 void * __must_check __kasan_kmalloc(struct kme    251 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
252                                     size_t siz    252                                     size_t size, gfp_t flags);
253 static __always_inline void * __must_check kas    253 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
254                                 const void *ob    254                                 const void *object, size_t size, gfp_t flags)
255 {                                                 255 {
256         if (kasan_enabled())                      256         if (kasan_enabled())
257                 return __kasan_kmalloc(s, obje    257                 return __kasan_kmalloc(s, object, size, flags);
258         return (void *)object;                    258         return (void *)object;
259 }                                                 259 }
260                                                   260 
261 void * __must_check __kasan_kmalloc_large(cons    261 void * __must_check __kasan_kmalloc_large(const void *ptr,
262                                           size    262                                           size_t size, gfp_t flags);
263 static __always_inline void * __must_check kas    263 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
264                                                   264                                                       size_t size, gfp_t flags)
265 {                                                 265 {
266         if (kasan_enabled())                      266         if (kasan_enabled())
267                 return __kasan_kmalloc_large(p    267                 return __kasan_kmalloc_large(ptr, size, flags);
268         return (void *)ptr;                       268         return (void *)ptr;
269 }                                                 269 }
270                                                   270 
271 void * __must_check __kasan_krealloc(const voi    271 void * __must_check __kasan_krealloc(const void *object,
272                                      size_t ne    272                                      size_t new_size, gfp_t flags);
273 static __always_inline void * __must_check kas    273 static __always_inline void * __must_check kasan_krealloc(const void *object,
274                                                   274                                                  size_t new_size, gfp_t flags)
275 {                                                 275 {
276         if (kasan_enabled())                      276         if (kasan_enabled())
277                 return __kasan_krealloc(object    277                 return __kasan_krealloc(object, new_size, flags);
278         return (void *)object;                    278         return (void *)object;
279 }                                                 279 }
280                                                   280 
281 bool __kasan_mempool_poison_pages(struct page     281 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
282                                   unsigned lon    282                                   unsigned long ip);
283 /**                                               283 /**
284  * kasan_mempool_poison_pages - Check and pois    284  * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
285  * @page: Pointer to the page allocation.         285  * @page: Pointer to the page allocation.
286  * @order: Order of the allocation.               286  * @order: Order of the allocation.
287  *                                                287  *
288  * This function is intended for kernel subsys    288  * This function is intended for kernel subsystems that cache page allocations
289  * to reuse them instead of freeing them back     289  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
290  *                                                290  *
291  * This function is similar to kasan_mempool_p    291  * This function is similar to kasan_mempool_poison_object() but operates on
292  * page allocations.                              292  * page allocations.
293  *                                                293  *
294  * Before the poisoned allocation can be reuse    294  * Before the poisoned allocation can be reused, it must be unpoisoned via
295  * kasan_mempool_unpoison_pages().                295  * kasan_mempool_unpoison_pages().
296  *                                                296  *
297  * Return: true if the allocation can be safel    297  * Return: true if the allocation can be safely reused; false otherwise.
298  */                                               298  */
299 static __always_inline bool kasan_mempool_pois    299 static __always_inline bool kasan_mempool_poison_pages(struct page *page,
300                                                   300                                                        unsigned int order)
301 {                                                 301 {
302         if (kasan_enabled())                      302         if (kasan_enabled())
303                 return __kasan_mempool_poison_    303                 return __kasan_mempool_poison_pages(page, order, _RET_IP_);
304         return true;                              304         return true;
305 }                                                 305 }
306                                                   306 
307 void __kasan_mempool_unpoison_pages(struct pag    307 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
308                                     unsigned l    308                                     unsigned long ip);
309 /**                                               309 /**
310  * kasan_mempool_unpoison_pages - Unpoison a m    310  * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
311  * @page: Pointer to the page allocation.         311  * @page: Pointer to the page allocation.
312  * @order: Order of the allocation.               312  * @order: Order of the allocation.
313  *                                                313  *
314  * This function is intended for kernel subsys    314  * This function is intended for kernel subsystems that cache page allocations
315  * to reuse them instead of freeing them back     315  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
316  *                                                316  *
317  * This function unpoisons a page allocation t    317  * This function unpoisons a page allocation that was previously poisoned by
318  * kasan_mempool_poison_pages() without zeroin    318  * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
319  * the tag-based modes, this function assigns     319  * the tag-based modes, this function assigns a new tag to the allocation.
320  */                                               320  */
321 static __always_inline void kasan_mempool_unpo    321 static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
322                                                   322                                                          unsigned int order)
323 {                                                 323 {
324         if (kasan_enabled())                      324         if (kasan_enabled())
325                 __kasan_mempool_unpoison_pages    325                 __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
326 }                                                 326 }
327                                                   327 
328 bool __kasan_mempool_poison_object(void *ptr,     328 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
329 /**                                               329 /**
330  * kasan_mempool_poison_object - Check and poi    330  * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
331  * @ptr: Pointer to the slab allocation.          331  * @ptr: Pointer to the slab allocation.
332  *                                                332  *
333  * This function is intended for kernel subsys    333  * This function is intended for kernel subsystems that cache slab allocations
334  * to reuse them instead of freeing them back     334  * to reuse them instead of freeing them back to the slab allocator (e.g.
335  * mempool).                                      335  * mempool).
336  *                                                336  *
337  * This function poisons a slab allocation and    337  * This function poisons a slab allocation and saves a free stack trace for it
338  * without initializing the allocation's memor    338  * without initializing the allocation's memory and without putting it into the
339  * quarantine (for the Generic mode).             339  * quarantine (for the Generic mode).
340  *                                                340  *
341  * This function also performs checks to detec    341  * This function also performs checks to detect double-free and invalid-free
342  * bugs and reports them. The caller can use t    342  * bugs and reports them. The caller can use the return value of this function
343  * to find out if the allocation is buggy.        343  * to find out if the allocation is buggy.
344  *                                                344  *
345  * Before the poisoned allocation can be reuse    345  * Before the poisoned allocation can be reused, it must be unpoisoned via
346  * kasan_mempool_unpoison_object().               346  * kasan_mempool_unpoison_object().
347  *                                                347  *
348  * This function operates on all slab allocati    348  * This function operates on all slab allocations including large kmalloc
349  * allocations (the ones returned by kmalloc_l    349  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
350  * size > KMALLOC_MAX_SIZE).                      350  * size > KMALLOC_MAX_SIZE).
351  *                                                351  *
352  * Return: true if the allocation can be safel    352  * Return: true if the allocation can be safely reused; false otherwise.
353  */                                               353  */
354 static __always_inline bool kasan_mempool_pois    354 static __always_inline bool kasan_mempool_poison_object(void *ptr)
355 {                                                 355 {
356         if (kasan_enabled())                      356         if (kasan_enabled())
357                 return __kasan_mempool_poison_    357                 return __kasan_mempool_poison_object(ptr, _RET_IP_);
358         return true;                              358         return true;
359 }                                                 359 }
360                                                   360 
361 void __kasan_mempool_unpoison_object(void *ptr    361 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
362 /**                                               362 /**
363  * kasan_mempool_unpoison_object - Unpoison a     363  * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
364  * @ptr: Pointer to the slab allocation.          364  * @ptr: Pointer to the slab allocation.
365  * @size: Size to be unpoisoned.                  365  * @size: Size to be unpoisoned.
366  *                                                366  *
367  * This function is intended for kernel subsys    367  * This function is intended for kernel subsystems that cache slab allocations
368  * to reuse them instead of freeing them back     368  * to reuse them instead of freeing them back to the slab allocator (e.g.
369  * mempool).                                      369  * mempool).
370  *                                                370  *
371  * This function unpoisons a slab allocation t    371  * This function unpoisons a slab allocation that was previously poisoned via
372  * kasan_mempool_poison_object() and saves an     372  * kasan_mempool_poison_object() and saves an alloc stack trace for it without
373  * initializing the allocation's memory. For t    373  * initializing the allocation's memory. For the tag-based modes, this function
374  * does not assign a new tag to the allocation    374  * does not assign a new tag to the allocation and instead restores the
375  * original tags based on the pointer value.      375  * original tags based on the pointer value.
376  *                                                376  *
377  * This function operates on all slab allocati    377  * This function operates on all slab allocations including large kmalloc
378  * allocations (the ones returned by kmalloc_l    378  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
379  * size > KMALLOC_MAX_SIZE).                      379  * size > KMALLOC_MAX_SIZE).
380  */                                               380  */
381 static __always_inline void kasan_mempool_unpo    381 static __always_inline void kasan_mempool_unpoison_object(void *ptr,
382                                                   382                                                           size_t size)
383 {                                                 383 {
384         if (kasan_enabled())                      384         if (kasan_enabled())
385                 __kasan_mempool_unpoison_objec    385                 __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
386 }                                                 386 }
387                                                   387 
388 /*                                                388 /*
389  * Unlike kasan_check_read/write(), kasan_chec    389  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
390  * the hardware tag-based mode that doesn't re    390  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
391  */                                               391  */
392 bool __kasan_check_byte(const void *addr, unsi    392 bool __kasan_check_byte(const void *addr, unsigned long ip);
393 static __always_inline bool kasan_check_byte(c    393 static __always_inline bool kasan_check_byte(const void *addr)
394 {                                                 394 {
395         if (kasan_enabled())                      395         if (kasan_enabled())
396                 return __kasan_check_byte(addr    396                 return __kasan_check_byte(addr, _RET_IP_);
397         return true;                              397         return true;
398 }                                                 398 }
399                                                   399 
400 #else /* CONFIG_KASAN */                          400 #else /* CONFIG_KASAN */
401                                                   401 
402 static inline void kasan_unpoison_range(const     402 static inline void kasan_unpoison_range(const void *address, size_t size) {}
403 static inline void kasan_poison_pages(struct p    403 static inline void kasan_poison_pages(struct page *page, unsigned int order,
404                                       bool ini    404                                       bool init) {}
405 static inline bool kasan_unpoison_pages(struct    405 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
406                                         bool i    406                                         bool init)
407 {                                                 407 {
408         return false;                             408         return false;
409 }                                                 409 }
410 static inline void kasan_poison_slab(struct sl    410 static inline void kasan_poison_slab(struct slab *slab) {}
411 static inline void kasan_unpoison_new_object(s    411 static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
412                                         void *    412                                         void *object) {}
413 static inline void kasan_poison_new_object(str    413 static inline void kasan_poison_new_object(struct kmem_cache *cache,
414                                         void *    414                                         void *object) {}
415 static inline void *kasan_init_slab_obj(struct    415 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
416                                 const void *ob    416                                 const void *object)
417 {                                                 417 {
418         return (void *)object;                    418         return (void *)object;
419 }                                                 419 }
420                                                   420 
421 static inline bool kasan_slab_pre_free(struct     421 static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
422 {                                                 422 {
423         return false;                             423         return false;
424 }                                                 424 }
425                                                   425 
426 static inline bool kasan_slab_free(struct kmem    426 static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
427                                    bool init,     427                                    bool init, bool still_accessible)
428 {                                                 428 {
429         return false;                             429         return false;
430 }                                                 430 }
431 static inline void kasan_kfree_large(void *ptr    431 static inline void kasan_kfree_large(void *ptr) {}
432 static inline void *kasan_slab_alloc(struct km    432 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
433                                    gfp_t flags    433                                    gfp_t flags, bool init)
434 {                                                 434 {
435         return object;                            435         return object;
436 }                                                 436 }
437 static inline void *kasan_kmalloc(struct kmem_    437 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
438                                 size_t size, g    438                                 size_t size, gfp_t flags)
439 {                                                 439 {
440         return (void *)object;                    440         return (void *)object;
441 }                                                 441 }
442 static inline void *kasan_kmalloc_large(const     442 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
443 {                                                 443 {
444         return (void *)ptr;                       444         return (void *)ptr;
445 }                                                 445 }
446 static inline void *kasan_krealloc(const void     446 static inline void *kasan_krealloc(const void *object, size_t new_size,
447                                  gfp_t flags)     447                                  gfp_t flags)
448 {                                                 448 {
449         return (void *)object;                    449         return (void *)object;
450 }                                                 450 }
451 static inline bool kasan_mempool_poison_pages(    451 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
452 {                                                 452 {
453         return true;                              453         return true;
454 }                                                 454 }
455 static inline void kasan_mempool_unpoison_page    455 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
456 static inline bool kasan_mempool_poison_object    456 static inline bool kasan_mempool_poison_object(void *ptr)
457 {                                                 457 {
458         return true;                              458         return true;
459 }                                                 459 }
460 static inline void kasan_mempool_unpoison_obje    460 static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
461                                                   461 
462 static inline bool kasan_check_byte(const void    462 static inline bool kasan_check_byte(const void *address)
463 {                                                 463 {
464         return true;                              464         return true;
465 }                                                 465 }
466                                                   466 
467 #endif /* CONFIG_KASAN */                         467 #endif /* CONFIG_KASAN */
468                                                   468 
469 #if defined(CONFIG_KASAN) && defined(CONFIG_KA    469 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
470 void kasan_unpoison_task_stack(struct task_str    470 void kasan_unpoison_task_stack(struct task_struct *task);
471 asmlinkage void kasan_unpoison_task_stack_belo    471 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
472 #else                                             472 #else
473 static inline void kasan_unpoison_task_stack(s    473 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
474 static inline void kasan_unpoison_task_stack_b    474 static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
475 #endif                                            475 #endif
476                                                   476 
477 #ifdef CONFIG_KASAN_GENERIC                       477 #ifdef CONFIG_KASAN_GENERIC
478                                                   478 
479 struct kasan_cache {                              479 struct kasan_cache {
480         int alloc_meta_offset;                    480         int alloc_meta_offset;
481         int free_meta_offset;                     481         int free_meta_offset;
482 };                                                482 };
483                                                   483 
484 size_t kasan_metadata_size(struct kmem_cache *    484 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
485 void kasan_cache_create(struct kmem_cache *cac    485 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
486                         slab_flags_t *flags);     486                         slab_flags_t *flags);
487                                                   487 
488 void kasan_cache_shrink(struct kmem_cache *cac    488 void kasan_cache_shrink(struct kmem_cache *cache);
489 void kasan_cache_shutdown(struct kmem_cache *c    489 void kasan_cache_shutdown(struct kmem_cache *cache);
490 void kasan_record_aux_stack(void *ptr);           490 void kasan_record_aux_stack(void *ptr);
491 void kasan_record_aux_stack_noalloc(void *ptr)    491 void kasan_record_aux_stack_noalloc(void *ptr);
492                                                   492 
493 #else /* CONFIG_KASAN_GENERIC */                  493 #else /* CONFIG_KASAN_GENERIC */
494                                                   494 
495 /* Tag-based KASAN modes do not use per-object    495 /* Tag-based KASAN modes do not use per-object metadata. */
496 static inline size_t kasan_metadata_size(struc    496 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
497                                                   497                                                 bool in_object)
498 {                                                 498 {
499         return 0;                                 499         return 0;
500 }                                                 500 }
501 /* And no cache-related metadata initializatio    501 /* And no cache-related metadata initialization is required. */
502 static inline void kasan_cache_create(struct k    502 static inline void kasan_cache_create(struct kmem_cache *cache,
503                                       unsigned    503                                       unsigned int *size,
504                                       slab_fla    504                                       slab_flags_t *flags) {}
505                                                   505 
506 static inline void kasan_cache_shrink(struct k    506 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
507 static inline void kasan_cache_shutdown(struct    507 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
508 static inline void kasan_record_aux_stack(void    508 static inline void kasan_record_aux_stack(void *ptr) {}
509 static inline void kasan_record_aux_stack_noal    509 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
510                                                   510 
511 #endif /* CONFIG_KASAN_GENERIC */                 511 #endif /* CONFIG_KASAN_GENERIC */
512                                                   512 
513 #if defined(CONFIG_KASAN_SW_TAGS) || defined(C    513 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
514                                                   514 
515 static inline void *kasan_reset_tag(const void    515 static inline void *kasan_reset_tag(const void *addr)
516 {                                                 516 {
517         return (void *)arch_kasan_reset_tag(ad    517         return (void *)arch_kasan_reset_tag(addr);
518 }                                                 518 }
519                                                   519 
520 /**                                               520 /**
521  * kasan_report - print a report about a bad m    521  * kasan_report - print a report about a bad memory access detected by KASAN
522  * @addr: address of the bad access               522  * @addr: address of the bad access
523  * @size: size of the bad access                  523  * @size: size of the bad access
524  * @is_write: whether the bad access is a writ    524  * @is_write: whether the bad access is a write or a read
525  * @ip: instruction pointer for the accessibil    525  * @ip: instruction pointer for the accessibility check or the bad access itself
526  */                                               526  */
527 bool kasan_report(const void *addr, size_t siz    527 bool kasan_report(const void *addr, size_t size,
528                 bool is_write, unsigned long i    528                 bool is_write, unsigned long ip);
529                                                   529 
530 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_    530 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
531                                                   531 
532 static inline void *kasan_reset_tag(const void    532 static inline void *kasan_reset_tag(const void *addr)
533 {                                                 533 {
534         return (void *)addr;                      534         return (void *)addr;
535 }                                                 535 }
536                                                   536 
537 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN    537 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
538                                                   538 
539 #ifdef CONFIG_KASAN_HW_TAGS                       539 #ifdef CONFIG_KASAN_HW_TAGS
540                                                   540 
541 void kasan_report_async(void);                    541 void kasan_report_async(void);
542                                                   542 
543 #endif /* CONFIG_KASAN_HW_TAGS */                 543 #endif /* CONFIG_KASAN_HW_TAGS */
544                                                   544 
545 #ifdef CONFIG_KASAN_SW_TAGS                       545 #ifdef CONFIG_KASAN_SW_TAGS
546 void __init kasan_init_sw_tags(void);             546 void __init kasan_init_sw_tags(void);
547 #else                                             547 #else
548 static inline void kasan_init_sw_tags(void) {     548 static inline void kasan_init_sw_tags(void) { }
549 #endif                                            549 #endif
550                                                   550 
551 #ifdef CONFIG_KASAN_HW_TAGS                       551 #ifdef CONFIG_KASAN_HW_TAGS
552 void kasan_init_hw_tags_cpu(void);                552 void kasan_init_hw_tags_cpu(void);
553 void __init kasan_init_hw_tags(void);             553 void __init kasan_init_hw_tags(void);
554 #else                                             554 #else
555 static inline void kasan_init_hw_tags_cpu(void    555 static inline void kasan_init_hw_tags_cpu(void) { }
556 static inline void kasan_init_hw_tags(void) {     556 static inline void kasan_init_hw_tags(void) { }
557 #endif                                            557 #endif
558                                                   558 
559 #ifdef CONFIG_KASAN_VMALLOC                       559 #ifdef CONFIG_KASAN_VMALLOC
560                                                   560 
561 #if defined(CONFIG_KASAN_GENERIC) || defined(C    561 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
562                                                   562 
563 void kasan_populate_early_vm_area_shadow(void     563 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
564 int kasan_populate_vmalloc(unsigned long addr,    564 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
565 void kasan_release_vmalloc(unsigned long start    565 void kasan_release_vmalloc(unsigned long start, unsigned long end,
566                            unsigned long free_    566                            unsigned long free_region_start,
567                            unsigned long free_    567                            unsigned long free_region_end);
568                                                   568 
569 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_    569 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
570                                                   570 
571 static inline void kasan_populate_early_vm_are    571 static inline void kasan_populate_early_vm_area_shadow(void *start,
572                                                   572                                                        unsigned long size)
573 { }                                               573 { }
574 static inline int kasan_populate_vmalloc(unsig    574 static inline int kasan_populate_vmalloc(unsigned long start,
575                                         unsign    575                                         unsigned long size)
576 {                                                 576 {
577         return 0;                                 577         return 0;
578 }                                                 578 }
579 static inline void kasan_release_vmalloc(unsig    579 static inline void kasan_release_vmalloc(unsigned long start,
580                                          unsig    580                                          unsigned long end,
581                                          unsig    581                                          unsigned long free_region_start,
582                                          unsig    582                                          unsigned long free_region_end) { }
583                                                   583 
584 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN    584 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
585                                                   585 
586 void *__kasan_unpoison_vmalloc(const void *sta    586 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
587                                kasan_vmalloc_f    587                                kasan_vmalloc_flags_t flags);
588 static __always_inline void *kasan_unpoison_vm    588 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
589                                                   589                                                 unsigned long size,
590                                                   590                                                 kasan_vmalloc_flags_t flags)
591 {                                                 591 {
592         if (kasan_enabled())                      592         if (kasan_enabled())
593                 return __kasan_unpoison_vmallo    593                 return __kasan_unpoison_vmalloc(start, size, flags);
594         return (void *)start;                     594         return (void *)start;
595 }                                                 595 }
596                                                   596 
597 void __kasan_poison_vmalloc(const void *start,    597 void __kasan_poison_vmalloc(const void *start, unsigned long size);
598 static __always_inline void kasan_poison_vmall    598 static __always_inline void kasan_poison_vmalloc(const void *start,
599                                                   599                                                  unsigned long size)
600 {                                                 600 {
601         if (kasan_enabled())                      601         if (kasan_enabled())
602                 __kasan_poison_vmalloc(start,     602                 __kasan_poison_vmalloc(start, size);
603 }                                                 603 }
604                                                   604 
605 #else /* CONFIG_KASAN_VMALLOC */                  605 #else /* CONFIG_KASAN_VMALLOC */
606                                                   606 
607 static inline void kasan_populate_early_vm_are    607 static inline void kasan_populate_early_vm_area_shadow(void *start,
608                                                   608                                                        unsigned long size) { }
609 static inline int kasan_populate_vmalloc(unsig    609 static inline int kasan_populate_vmalloc(unsigned long start,
610                                         unsign    610                                         unsigned long size)
611 {                                                 611 {
612         return 0;                                 612         return 0;
613 }                                                 613 }
614 static inline void kasan_release_vmalloc(unsig    614 static inline void kasan_release_vmalloc(unsigned long start,
615                                          unsig    615                                          unsigned long end,
616                                          unsig    616                                          unsigned long free_region_start,
617                                          unsig    617                                          unsigned long free_region_end) { }
618                                                   618 
619 static inline void *kasan_unpoison_vmalloc(con    619 static inline void *kasan_unpoison_vmalloc(const void *start,
620                                            uns    620                                            unsigned long size,
621                                            kas    621                                            kasan_vmalloc_flags_t flags)
622 {                                                 622 {
623         return (void *)start;                     623         return (void *)start;
624 }                                                 624 }
625 static inline void kasan_poison_vmalloc(const     625 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
626 { }                                               626 { }
627                                                   627 
628 #endif /* CONFIG_KASAN_VMALLOC */                 628 #endif /* CONFIG_KASAN_VMALLOC */
629                                                   629 
630 #if (defined(CONFIG_KASAN_GENERIC) || defined(    630 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
631                 !defined(CONFIG_KASAN_VMALLOC)    631                 !defined(CONFIG_KASAN_VMALLOC)
632                                                   632 
633 /*                                                633 /*
634  * These functions allocate and free shadow me    634  * These functions allocate and free shadow memory for kernel modules.
635  * They are only required when KASAN_VMALLOC i    635  * They are only required when KASAN_VMALLOC is not supported, as otherwise
636  * shadow memory is allocated by the generic v    636  * shadow memory is allocated by the generic vmalloc handlers.
637  */                                               637  */
638 int kasan_alloc_module_shadow(void *addr, size    638 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
639 void kasan_free_module_shadow(const struct vm_    639 void kasan_free_module_shadow(const struct vm_struct *vm);
640                                                   640 
641 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN    641 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
642                                                   642 
643 static inline int kasan_alloc_module_shadow(vo    643 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
644 static inline void kasan_free_module_shadow(co    644 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
645                                                   645 
646 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASA    646 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
647                                                   647 
648 #if defined(CONFIG_KASAN_GENERIC) || defined(C    648 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
649 void kasan_non_canonical_hook(unsigned long ad    649 void kasan_non_canonical_hook(unsigned long addr);
650 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_    650 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
651 static inline void kasan_non_canonical_hook(un    651 static inline void kasan_non_canonical_hook(unsigned long addr) { }
652 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN    652 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
653                                                   653 
654 #endif /* LINUX_KASAN_H */                        654 #endif /* LINUX_KASAN_H */
655                                                   655 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php