~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/kmsan/hooks.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * KMSAN hooks for kernel subsystems.
  4  *
  5  * These functions handle creation of KMSAN metadata for memory allocations.
  6  *
  7  * Copyright (C) 2018-2022 Google LLC
  8  * Author: Alexander Potapenko <glider@google.com>
  9  *
 10  */
 11 
 12 #include <linux/cacheflush.h>
 13 #include <linux/dma-direction.h>
 14 #include <linux/gfp.h>
 15 #include <linux/kmsan.h>
 16 #include <linux/mm.h>
 17 #include <linux/mm_types.h>
 18 #include <linux/scatterlist.h>
 19 #include <linux/slab.h>
 20 #include <linux/uaccess.h>
 21 #include <linux/usb.h>
 22 
 23 #include "../internal.h"
 24 #include "../slab.h"
 25 #include "kmsan.h"
 26 
 27 /*
 28  * Instrumented functions shouldn't be called under
 29  * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to
 30  * skipping effects of functions like memset() inside instrumented code.
 31  */
 32 
 33 void kmsan_task_create(struct task_struct *task)
 34 {
 35         kmsan_enter_runtime();
 36         kmsan_internal_task_create(task);
 37         kmsan_leave_runtime();
 38 }
 39 
 40 void kmsan_task_exit(struct task_struct *task)
 41 {
 42         if (!kmsan_enabled || kmsan_in_runtime())
 43                 return;
 44 
 45         kmsan_disable_current();
 46 }
 47 
 48 void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
 49 {
 50         if (unlikely(object == NULL))
 51                 return;
 52         if (!kmsan_enabled || kmsan_in_runtime())
 53                 return;
 54         /*
 55          * There's a ctor or this is an RCU cache - do nothing. The memory
 56          * status hasn't changed since last use.
 57          */
 58         if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
 59                 return;
 60 
 61         kmsan_enter_runtime();
 62         if (flags & __GFP_ZERO)
 63                 kmsan_internal_unpoison_memory(object, s->object_size,
 64                                                KMSAN_POISON_CHECK);
 65         else
 66                 kmsan_internal_poison_memory(object, s->object_size, flags,
 67                                              KMSAN_POISON_CHECK);
 68         kmsan_leave_runtime();
 69 }
 70 
 71 void kmsan_slab_free(struct kmem_cache *s, void *object)
 72 {
 73         if (!kmsan_enabled || kmsan_in_runtime())
 74                 return;
 75 
 76         /* RCU slabs could be legally used after free within the RCU period */
 77         if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
 78                 return;
 79         /*
 80          * If there's a constructor, freed memory must remain in the same state
 81          * until the next allocation. We cannot save its state to detect
 82          * use-after-free bugs, instead we just keep it unpoisoned.
 83          */
 84         if (s->ctor)
 85                 return;
 86         kmsan_enter_runtime();
 87         kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
 88                                      KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
 89         kmsan_leave_runtime();
 90 }
 91 
 92 void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
 93 {
 94         if (unlikely(ptr == NULL))
 95                 return;
 96         if (!kmsan_enabled || kmsan_in_runtime())
 97                 return;
 98         kmsan_enter_runtime();
 99         if (flags & __GFP_ZERO)
100                 kmsan_internal_unpoison_memory((void *)ptr, size,
101                                                /*checked*/ true);
102         else
103                 kmsan_internal_poison_memory((void *)ptr, size, flags,
104                                              KMSAN_POISON_CHECK);
105         kmsan_leave_runtime();
106 }
107 
108 void kmsan_kfree_large(const void *ptr)
109 {
110         struct page *page;
111 
112         if (!kmsan_enabled || kmsan_in_runtime())
113                 return;
114         kmsan_enter_runtime();
115         page = virt_to_head_page((void *)ptr);
116         KMSAN_WARN_ON(ptr != page_address(page));
117         kmsan_internal_poison_memory((void *)ptr,
118                                      page_size(page),
119                                      GFP_KERNEL,
120                                      KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
121         kmsan_leave_runtime();
122 }
123 
124 static unsigned long vmalloc_shadow(unsigned long addr)
125 {
126         return (unsigned long)kmsan_get_metadata((void *)addr,
127                                                  KMSAN_META_SHADOW);
128 }
129 
130 static unsigned long vmalloc_origin(unsigned long addr)
131 {
132         return (unsigned long)kmsan_get_metadata((void *)addr,
133                                                  KMSAN_META_ORIGIN);
134 }
135 
136 void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
137 {
138         __vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
139         __vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
140         flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
141         flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
142 }
143 
144 /*
145  * This function creates new shadow/origin pages for the physical pages mapped
146  * into the virtual memory. If those physical pages already had shadow/origin,
147  * those are ignored.
148  */
149 int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
150                              phys_addr_t phys_addr, pgprot_t prot,
151                              unsigned int page_shift)
152 {
153         gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
154         struct page *shadow, *origin;
155         unsigned long off = 0;
156         int nr, err = 0, clean = 0, mapped;
157 
158         if (!kmsan_enabled || kmsan_in_runtime())
159                 return 0;
160 
161         nr = (end - start) / PAGE_SIZE;
162         kmsan_enter_runtime();
163         for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
164                 shadow = alloc_pages(gfp_mask, 1);
165                 origin = alloc_pages(gfp_mask, 1);
166                 if (!shadow || !origin) {
167                         err = -ENOMEM;
168                         goto ret;
169                 }
170                 mapped = __vmap_pages_range_noflush(
171                         vmalloc_shadow(start + off),
172                         vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
173                         PAGE_SHIFT);
174                 if (mapped) {
175                         err = mapped;
176                         goto ret;
177                 }
178                 shadow = NULL;
179                 mapped = __vmap_pages_range_noflush(
180                         vmalloc_origin(start + off),
181                         vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
182                         PAGE_SHIFT);
183                 if (mapped) {
184                         __vunmap_range_noflush(
185                                 vmalloc_shadow(start + off),
186                                 vmalloc_shadow(start + off + PAGE_SIZE));
187                         err = mapped;
188                         goto ret;
189                 }
190                 origin = NULL;
191         }
192         /* Page mapping loop finished normally, nothing to clean up. */
193         clean = 0;
194 
195 ret:
196         if (clean > 0) {
197                 /*
198                  * Something went wrong. Clean up shadow/origin pages allocated
199                  * on the last loop iteration, then delete mappings created
200                  * during the previous iterations.
201                  */
202                 if (shadow)
203                         __free_pages(shadow, 1);
204                 if (origin)
205                         __free_pages(origin, 1);
206                 __vunmap_range_noflush(
207                         vmalloc_shadow(start),
208                         vmalloc_shadow(start + clean * PAGE_SIZE));
209                 __vunmap_range_noflush(
210                         vmalloc_origin(start),
211                         vmalloc_origin(start + clean * PAGE_SIZE));
212         }
213         flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
214         flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
215         kmsan_leave_runtime();
216         return err;
217 }
218 
219 void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
220 {
221         unsigned long v_shadow, v_origin;
222         struct page *shadow, *origin;
223         int nr;
224 
225         if (!kmsan_enabled || kmsan_in_runtime())
226                 return;
227 
228         nr = (end - start) / PAGE_SIZE;
229         kmsan_enter_runtime();
230         v_shadow = (unsigned long)vmalloc_shadow(start);
231         v_origin = (unsigned long)vmalloc_origin(start);
232         for (int i = 0; i < nr;
233              i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
234                 shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
235                 origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
236                 __vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
237                 __vunmap_range_noflush(v_origin, vmalloc_origin(end));
238                 if (shadow)
239                         __free_pages(shadow, 1);
240                 if (origin)
241                         __free_pages(origin, 1);
242         }
243         flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
244         flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
245         kmsan_leave_runtime();
246 }
247 
248 void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
249                         size_t left)
250 {
251         unsigned long ua_flags;
252 
253         if (!kmsan_enabled || kmsan_in_runtime())
254                 return;
255         /*
256          * At this point we've copied the memory already. It's hard to check it
257          * before copying, as the size of actually copied buffer is unknown.
258          */
259 
260         /* copy_to_user() may copy zero bytes. No need to check. */
261         if (!to_copy)
262                 return;
263         /* Or maybe copy_to_user() failed to copy anything. */
264         if (to_copy <= left)
265                 return;
266 
267         ua_flags = user_access_save();
268         if (!IS_ENABLED(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) ||
269             (u64)to < TASK_SIZE) {
270                 /* This is a user memory access, check it. */
271                 kmsan_internal_check_memory((void *)from, to_copy - left, to,
272                                             REASON_COPY_TO_USER);
273         } else {
274                 /* Otherwise this is a kernel memory access. This happens when a
275                  * compat syscall passes an argument allocated on the kernel
276                  * stack to a real syscall.
277                  * Don't check anything, just copy the shadow of the copied
278                  * bytes.
279                  */
280                 kmsan_internal_memmove_metadata((void *)to, (void *)from,
281                                                 to_copy - left);
282         }
283         user_access_restore(ua_flags);
284 }
285 EXPORT_SYMBOL(kmsan_copy_to_user);
286 
287 void kmsan_memmove(void *to, const void *from, size_t size)
288 {
289         if (!kmsan_enabled || kmsan_in_runtime())
290                 return;
291 
292         kmsan_enter_runtime();
293         kmsan_internal_memmove_metadata(to, (void *)from, size);
294         kmsan_leave_runtime();
295 }
296 EXPORT_SYMBOL(kmsan_memmove);
297 
298 /* Helper function to check an URB. */
299 void kmsan_handle_urb(const struct urb *urb, bool is_out)
300 {
301         if (!urb)
302                 return;
303         if (is_out)
304                 kmsan_internal_check_memory(urb->transfer_buffer,
305                                             urb->transfer_buffer_length,
306                                             /*user_addr*/ NULL,
307                                             REASON_SUBMIT_URB);
308         else
309                 kmsan_internal_unpoison_memory(urb->transfer_buffer,
310                                                urb->transfer_buffer_length,
311                                                /*checked*/ false);
312 }
313 EXPORT_SYMBOL_GPL(kmsan_handle_urb);
314 
315 static void kmsan_handle_dma_page(const void *addr, size_t size,
316                                   enum dma_data_direction dir)
317 {
318         switch (dir) {
319         case DMA_BIDIRECTIONAL:
320                 kmsan_internal_check_memory((void *)addr, size,
321                                             /*user_addr*/ NULL, REASON_ANY);
322                 kmsan_internal_unpoison_memory((void *)addr, size,
323                                                /*checked*/ false);
324                 break;
325         case DMA_TO_DEVICE:
326                 kmsan_internal_check_memory((void *)addr, size,
327                                             /*user_addr*/ NULL, REASON_ANY);
328                 break;
329         case DMA_FROM_DEVICE:
330                 kmsan_internal_unpoison_memory((void *)addr, size,
331                                                /*checked*/ false);
332                 break;
333         case DMA_NONE:
334                 break;
335         }
336 }
337 
338 /* Helper function to handle DMA data transfers. */
339 void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
340                       enum dma_data_direction dir)
341 {
342         u64 page_offset, to_go, addr;
343 
344         if (PageHighMem(page))
345                 return;
346         addr = (u64)page_address(page) + offset;
347         /*
348          * The kernel may occasionally give us adjacent DMA pages not belonging
349          * to the same allocation. Process them separately to avoid triggering
350          * internal KMSAN checks.
351          */
352         while (size > 0) {
353                 page_offset = offset_in_page(addr);
354                 to_go = min(PAGE_SIZE - page_offset, (u64)size);
355                 kmsan_handle_dma_page((void *)addr, to_go, dir);
356                 addr += to_go;
357                 size -= to_go;
358         }
359 }
360 
361 void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
362                          enum dma_data_direction dir)
363 {
364         struct scatterlist *item;
365         int i;
366 
367         for_each_sg(sg, item, nents, i)
368                 kmsan_handle_dma(sg_page(item), item->offset, item->length,
369                                  dir);
370 }
371 
372 /* Functions from kmsan-checks.h follow. */
373 
374 /*
375  * To create an origin, kmsan_poison_memory() unwinds the stacks and stores it
376  * into the stack depot. This may cause deadlocks if done from within KMSAN
377  * runtime, therefore we bail out if kmsan_in_runtime().
378  */
379 void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
380 {
381         if (!kmsan_enabled || kmsan_in_runtime())
382                 return;
383         kmsan_enter_runtime();
384         /* The users may want to poison/unpoison random memory. */
385         kmsan_internal_poison_memory((void *)address, size, flags,
386                                      KMSAN_POISON_NOCHECK);
387         kmsan_leave_runtime();
388 }
389 EXPORT_SYMBOL(kmsan_poison_memory);
390 
391 /*
392  * Unlike kmsan_poison_memory(), this function can be used from within KMSAN
393  * runtime, because it does not trigger allocations or call instrumented code.
394  */
395 void kmsan_unpoison_memory(const void *address, size_t size)
396 {
397         unsigned long ua_flags;
398 
399         if (!kmsan_enabled)
400                 return;
401 
402         ua_flags = user_access_save();
403         /* The users may want to poison/unpoison random memory. */
404         kmsan_internal_unpoison_memory((void *)address, size,
405                                        KMSAN_POISON_NOCHECK);
406         user_access_restore(ua_flags);
407 }
408 EXPORT_SYMBOL(kmsan_unpoison_memory);
409 
410 /*
411  * Version of kmsan_unpoison_memory() called from IRQ entry functions.
412  */
413 void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
414 {
415         kmsan_unpoison_memory((void *)regs, sizeof(*regs));
416 }
417 
418 void kmsan_check_memory(const void *addr, size_t size)
419 {
420         if (!kmsan_enabled)
421                 return;
422         return kmsan_internal_check_memory((void *)addr, size,
423                                            /*user_addr*/ NULL, REASON_ANY);
424 }
425 EXPORT_SYMBOL(kmsan_check_memory);
426 
427 void kmsan_enable_current(void)
428 {
429         KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
430         current->kmsan_ctx.depth--;
431 }
432 EXPORT_SYMBOL(kmsan_enable_current);
433 
434 void kmsan_disable_current(void)
435 {
436         current->kmsan_ctx.depth++;
437         KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
438 }
439 EXPORT_SYMBOL(kmsan_disable_current);
440 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php