~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/kmsan/kmsan_test.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Test cases for KMSAN.
  4  * For each test case checks the presence (or absence) of generated reports.
  5  * Relies on 'console' tracepoint to capture reports as they appear in the
  6  * kernel log.
  7  *
  8  * Copyright (C) 2021-2022, Google LLC.
  9  * Author: Alexander Potapenko <glider@google.com>
 10  *
 11  */
 12 
 13 #include <kunit/test.h>
 14 #include "kmsan.h"
 15 
 16 #include <linux/jiffies.h>
 17 #include <linux/kernel.h>
 18 #include <linux/kmsan.h>
 19 #include <linux/mm.h>
 20 #include <linux/random.h>
 21 #include <linux/slab.h>
 22 #include <linux/spinlock.h>
 23 #include <linux/string.h>
 24 #include <linux/tracepoint.h>
 25 #include <linux/vmalloc.h>
 26 #include <trace/events/printk.h>
 27 
 28 static DEFINE_PER_CPU(int, per_cpu_var);
 29 
 30 /* Report as observed from console. */
 31 static struct {
 32         spinlock_t lock;
 33         bool available;
 34         bool ignore; /* Stop console output collection. */
 35         char header[256];
 36 } observed = {
 37         .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
 38 };
 39 
 40 /* Probe for console output: obtains observed lines of interest. */
 41 static void probe_console(void *ignore, const char *buf, size_t len)
 42 {
 43         unsigned long flags;
 44 
 45         if (observed.ignore)
 46                 return;
 47         spin_lock_irqsave(&observed.lock, flags);
 48 
 49         if (strnstr(buf, "BUG: KMSAN: ", len)) {
 50                 /*
 51                  * KMSAN report and related to the test.
 52                  *
 53                  * The provided @buf is not NUL-terminated; copy no more than
 54                  * @len bytes and let strscpy() add the missing NUL-terminator.
 55                  */
 56                 strscpy(observed.header, buf,
 57                         min(len + 1, sizeof(observed.header)));
 58                 WRITE_ONCE(observed.available, true);
 59                 observed.ignore = true;
 60         }
 61         spin_unlock_irqrestore(&observed.lock, flags);
 62 }
 63 
 64 /* Check if a report related to the test exists. */
 65 static bool report_available(void)
 66 {
 67         return READ_ONCE(observed.available);
 68 }
 69 
 70 /* Reset observed.available, so that the test can trigger another report. */
 71 static void report_reset(void)
 72 {
 73         unsigned long flags;
 74 
 75         spin_lock_irqsave(&observed.lock, flags);
 76         WRITE_ONCE(observed.available, false);
 77         observed.ignore = false;
 78         spin_unlock_irqrestore(&observed.lock, flags);
 79 }
 80 
 81 /* Information we expect in a report. */
 82 struct expect_report {
 83         const char *error_type; /* Error type. */
 84         /*
 85          * Kernel symbol from the error header, or NULL if no report is
 86          * expected.
 87          */
 88         const char *symbol;
 89 };
 90 
 91 /* Check observed report matches information in @r. */
 92 static bool report_matches(const struct expect_report *r)
 93 {
 94         typeof(observed.header) expected_header;
 95         unsigned long flags;
 96         bool ret = false;
 97         const char *end;
 98         char *cur;
 99 
100         /* Doubled-checked locking. */
101         if (!report_available() || !r->symbol)
102                 return (!report_available() && !r->symbol);
103 
104         /* Generate expected report contents. */
105 
106         /* Title */
107         cur = expected_header;
108         end = &expected_header[sizeof(expected_header) - 1];
109 
110         cur += scnprintf(cur, end - cur, "BUG: KMSAN: %s", r->error_type);
111 
112         scnprintf(cur, end - cur, " in %s", r->symbol);
113         /* The exact offset won't match, remove it; also strip module name. */
114         cur = strchr(expected_header, '+');
115         if (cur)
116                 *cur = '\0';
117 
118         spin_lock_irqsave(&observed.lock, flags);
119         if (!report_available())
120                 goto out; /* A new report is being captured. */
121 
122         /* Finally match expected output to what we actually observed. */
123         ret = strstr(observed.header, expected_header);
124 out:
125         spin_unlock_irqrestore(&observed.lock, flags);
126 
127         return ret;
128 }
129 
130 /* ===== Test cases ===== */
131 
132 /* Prevent replacing branch with select in LLVM. */
133 static noinline void check_true(char *arg)
134 {
135         pr_info("%s is true\n", arg);
136 }
137 
138 static noinline void check_false(char *arg)
139 {
140         pr_info("%s is false\n", arg);
141 }
142 
143 #define USE(x)                           \
144         do {                             \
145                 if (x)                   \
146                         check_true(#x);  \
147                 else                     \
148                         check_false(#x); \
149         } while (0)
150 
151 #define EXPECTATION_ETYPE_FN(e, reason, fn) \
152         struct expect_report e = {          \
153                 .error_type = reason,       \
154                 .symbol = fn,               \
155         }
156 
157 #define EXPECTATION_NO_REPORT(e) EXPECTATION_ETYPE_FN(e, NULL, NULL)
158 #define EXPECTATION_UNINIT_VALUE_FN(e, fn) \
159         EXPECTATION_ETYPE_FN(e, "uninit-value", fn)
160 #define EXPECTATION_UNINIT_VALUE(e) EXPECTATION_UNINIT_VALUE_FN(e, __func__)
161 #define EXPECTATION_USE_AFTER_FREE(e) \
162         EXPECTATION_ETYPE_FN(e, "use-after-free", __func__)
163 
164 /* Test case: ensure that kmalloc() returns uninitialized memory. */
165 static void test_uninit_kmalloc(struct kunit *test)
166 {
167         EXPECTATION_UNINIT_VALUE(expect);
168         int *ptr;
169 
170         kunit_info(test, "uninitialized kmalloc test (UMR report)\n");
171         ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
172         USE(*ptr);
173         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
174 }
175 
176 /*
177  * Test case: ensure that kmalloc'ed memory becomes initialized after memset().
178  */
179 static void test_init_kmalloc(struct kunit *test)
180 {
181         EXPECTATION_NO_REPORT(expect);
182         int *ptr;
183 
184         kunit_info(test, "initialized kmalloc test (no reports)\n");
185         ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
186         memset(ptr, 0, sizeof(*ptr));
187         USE(*ptr);
188         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
189 }
190 
191 /* Test case: ensure that kzalloc() returns initialized memory. */
192 static void test_init_kzalloc(struct kunit *test)
193 {
194         EXPECTATION_NO_REPORT(expect);
195         int *ptr;
196 
197         kunit_info(test, "initialized kzalloc test (no reports)\n");
198         ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
199         USE(*ptr);
200         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
201 }
202 
203 /* Test case: ensure that local variables are uninitialized by default. */
204 static void test_uninit_stack_var(struct kunit *test)
205 {
206         EXPECTATION_UNINIT_VALUE(expect);
207         volatile int cond;
208 
209         kunit_info(test, "uninitialized stack variable (UMR report)\n");
210         USE(cond);
211         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
212 }
213 
214 /* Test case: ensure that local variables with initializers are initialized. */
215 static void test_init_stack_var(struct kunit *test)
216 {
217         EXPECTATION_NO_REPORT(expect);
218         volatile int cond = 1;
219 
220         kunit_info(test, "initialized stack variable (no reports)\n");
221         USE(cond);
222         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
223 }
224 
225 static noinline void two_param_fn_2(int arg1, int arg2)
226 {
227         USE(arg1);
228         USE(arg2);
229 }
230 
231 static noinline void one_param_fn(int arg)
232 {
233         two_param_fn_2(arg, arg);
234         USE(arg);
235 }
236 
237 static noinline void two_param_fn(int arg1, int arg2)
238 {
239         int init = 0;
240 
241         one_param_fn(init);
242         USE(arg1);
243         USE(arg2);
244 }
245 
246 static void test_params(struct kunit *test)
247 {
248 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL
249         /*
250          * With eager param/retval checking enabled, KMSAN will report an error
251          * before the call to two_param_fn().
252          */
253         EXPECTATION_UNINIT_VALUE_FN(expect, "test_params");
254 #else
255         EXPECTATION_UNINIT_VALUE_FN(expect, "two_param_fn");
256 #endif
257         volatile int uninit, init = 1;
258 
259         kunit_info(test,
260                    "uninit passed through a function parameter (UMR report)\n");
261         two_param_fn(uninit, init);
262         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
263 }
264 
265 static int signed_sum3(int a, int b, int c)
266 {
267         return a + b + c;
268 }
269 
270 /*
271  * Test case: ensure that uninitialized values are tracked through function
272  * arguments.
273  */
274 static void test_uninit_multiple_params(struct kunit *test)
275 {
276         EXPECTATION_UNINIT_VALUE(expect);
277         volatile char b = 3, c;
278         volatile int a;
279 
280         kunit_info(test, "uninitialized local passed to fn (UMR report)\n");
281         USE(signed_sum3(a, b, c));
282         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
283 }
284 
285 /* Helper function to make an array uninitialized. */
286 static noinline void do_uninit_local_array(char *array, int start, int stop)
287 {
288         volatile char uninit;
289 
290         for (int i = start; i < stop; i++)
291                 array[i] = uninit;
292 }
293 
294 /*
295  * Test case: ensure kmsan_check_memory() reports an error when checking
296  * uninitialized memory.
297  */
298 static void test_uninit_kmsan_check_memory(struct kunit *test)
299 {
300         EXPECTATION_UNINIT_VALUE_FN(expect, "test_uninit_kmsan_check_memory");
301         volatile char local_array[8];
302 
303         kunit_info(
304                 test,
305                 "kmsan_check_memory() called on uninit local (UMR report)\n");
306         do_uninit_local_array((char *)local_array, 5, 7);
307 
308         kmsan_check_memory((char *)local_array, 8);
309         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
310 }
311 
312 /*
313  * Test case: check that a virtual memory range created with vmap() from
314  * initialized pages is still considered as initialized.
315  */
316 static void test_init_kmsan_vmap_vunmap(struct kunit *test)
317 {
318         EXPECTATION_NO_REPORT(expect);
319         const int npages = 2;
320         struct page **pages;
321         void *vbuf;
322 
323         kunit_info(test, "pages initialized via vmap (no reports)\n");
324 
325         pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
326         for (int i = 0; i < npages; i++)
327                 pages[i] = alloc_page(GFP_KERNEL);
328         vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
329         memset(vbuf, 0xfe, npages * PAGE_SIZE);
330         for (int i = 0; i < npages; i++)
331                 kmsan_check_memory(page_address(pages[i]), PAGE_SIZE);
332 
333         if (vbuf)
334                 vunmap(vbuf);
335         for (int i = 0; i < npages; i++) {
336                 if (pages[i])
337                         __free_page(pages[i]);
338         }
339         kfree(pages);
340         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
341 }
342 
343 /*
344  * Test case: ensure that memset() can initialize a buffer allocated via
345  * vmalloc().
346  */
347 static void test_init_vmalloc(struct kunit *test)
348 {
349         EXPECTATION_NO_REPORT(expect);
350         int npages = 8;
351         char *buf;
352 
353         kunit_info(test, "vmalloc buffer can be initialized (no reports)\n");
354         buf = vmalloc(PAGE_SIZE * npages);
355         buf[0] = 1;
356         memset(buf, 0xfe, PAGE_SIZE * npages);
357         USE(buf[0]);
358         for (int i = 0; i < npages; i++)
359                 kmsan_check_memory(&buf[PAGE_SIZE * i], PAGE_SIZE);
360         vfree(buf);
361         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
362 }
363 
364 /* Test case: ensure that use-after-free reporting works. */
365 static void test_uaf(struct kunit *test)
366 {
367         EXPECTATION_USE_AFTER_FREE(expect);
368         volatile int value;
369         volatile int *var;
370 
371         kunit_info(test, "use-after-free in kmalloc-ed buffer (UMR report)\n");
372         var = kmalloc(80, GFP_KERNEL);
373         var[3] = 0xfeedface;
374         kfree((int *)var);
375         /* Copy the invalid value before checking it. */
376         value = var[3];
377         USE(value);
378         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
379 }
380 
381 /*
382  * Test case: ensure that uninitialized values are propagated through per-CPU
383  * memory.
384  */
385 static void test_percpu_propagate(struct kunit *test)
386 {
387         EXPECTATION_UNINIT_VALUE(expect);
388         volatile int uninit, check;
389 
390         kunit_info(test,
391                    "uninit local stored to per_cpu memory (UMR report)\n");
392 
393         this_cpu_write(per_cpu_var, uninit);
394         check = this_cpu_read(per_cpu_var);
395         USE(check);
396         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
397 }
398 
399 /*
400  * Test case: ensure that passing uninitialized values to printk() leads to an
401  * error report.
402  */
403 static void test_printk(struct kunit *test)
404 {
405 #ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL
406         /*
407          * With eager param/retval checking enabled, KMSAN will report an error
408          * before the call to pr_info().
409          */
410         EXPECTATION_UNINIT_VALUE_FN(expect, "test_printk");
411 #else
412         EXPECTATION_UNINIT_VALUE_FN(expect, "number");
413 #endif
414         volatile int uninit;
415 
416         kunit_info(test, "uninit local passed to pr_info() (UMR report)\n");
417         pr_info("%px contains %d\n", &uninit, uninit);
418         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
419 }
420 
421 /* Prevent the compiler from inlining a memcpy() call. */
422 static noinline void *memcpy_noinline(volatile void *dst,
423                                       const volatile void *src, size_t size)
424 {
425         return memcpy((void *)dst, (const void *)src, size);
426 }
427 
428 /* Test case: ensure that memcpy() correctly copies initialized values. */
429 static void test_init_memcpy(struct kunit *test)
430 {
431         EXPECTATION_NO_REPORT(expect);
432         volatile long long src;
433         volatile long long dst = 0;
434 
435         src = 1;
436         kunit_info(
437                 test,
438                 "memcpy()ing aligned initialized src to aligned dst (no reports)\n");
439         memcpy_noinline((void *)&dst, (void *)&src, sizeof(src));
440         kmsan_check_memory((void *)&dst, sizeof(dst));
441         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
442 }
443 
444 /*
445  * Test case: ensure that memcpy() correctly copies uninitialized values between
446  * aligned `src` and `dst`.
447  */
448 static void test_memcpy_aligned_to_aligned(struct kunit *test)
449 {
450         EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_aligned");
451         volatile int uninit_src;
452         volatile int dst = 0;
453 
454         kunit_info(
455                 test,
456                 "memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
457         memcpy_noinline((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
458         kmsan_check_memory((void *)&dst, sizeof(dst));
459         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
460 }
461 
462 /*
463  * Test case: ensure that memcpy() correctly copies uninitialized values between
464  * aligned `src` and unaligned `dst`.
465  *
466  * Copying aligned 4-byte value to an unaligned one leads to touching two
467  * aligned 4-byte values. This test case checks that KMSAN correctly reports an
468  * error on the mentioned two values.
469  */
470 static void test_memcpy_aligned_to_unaligned(struct kunit *test)
471 {
472         EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_unaligned");
473         volatile int uninit_src;
474         volatile char dst[8] = { 0 };
475 
476         kunit_info(
477                 test,
478                 "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
479         kmsan_check_memory((void *)&uninit_src, sizeof(uninit_src));
480         memcpy_noinline((void *)&dst[1], (void *)&uninit_src,
481                         sizeof(uninit_src));
482         kmsan_check_memory((void *)dst, 4);
483         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
484         report_reset();
485         kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
486         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
487 }
488 
489 /*
490  * Test case: ensure that origin slots do not accidentally get overwritten with
491  * zeroes during memcpy().
492  *
493  * Previously, when copying memory from an aligned buffer to an unaligned one,
494  * if there were zero origins corresponding to zero shadow values in the source
495  * buffer, they could have ended up being copied to nonzero shadow values in the
496  * destination buffer:
497  *
498  *  memcpy(0xffff888080a00000, 0xffff888080900002, 8)
499  *
500  *  src (0xffff888080900002): ..xx .... xx..
501  *  src origins:              o111 0000 o222
502  *  dst (0xffff888080a00000): xx.. ..xx
503  *  dst origins:              o111 0000
504  *                        (or 0000 o222)
505  *
506  * (here . stands for an initialized byte, and x for an uninitialized one.
507  *
508  * Ensure that this does not happen anymore, and for both destination bytes
509  * the origin is nonzero (i.e. KMSAN reports an error).
510  */
511 static void test_memcpy_initialized_gap(struct kunit *test)
512 {
513         EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_initialized_gap");
514         volatile char uninit_src[12];
515         volatile char dst[8] = { 0 };
516 
517         kunit_info(
518                 test,
519                 "unaligned 4-byte initialized value gets a nonzero origin after memcpy() - (2 UMR reports)\n");
520 
521         uninit_src[0] = 42;
522         uninit_src[1] = 42;
523         uninit_src[4] = 42;
524         uninit_src[5] = 42;
525         uninit_src[6] = 42;
526         uninit_src[7] = 42;
527         uninit_src[10] = 42;
528         uninit_src[11] = 42;
529         memcpy_noinline((void *)&dst[0], (void *)&uninit_src[2], 8);
530 
531         kmsan_check_memory((void *)&dst[0], 4);
532         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
533         report_reset();
534         kmsan_check_memory((void *)&dst[2], 4);
535         KUNIT_EXPECT_FALSE(test, report_matches(&expect));
536         report_reset();
537         kmsan_check_memory((void *)&dst[4], 4);
538         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
539 }
540 
541 /* Generate test cases for memset16(), memset32(), memset64(). */
542 #define DEFINE_TEST_MEMSETXX(size)                                          \
543         static void test_memset##size(struct kunit *test)                   \
544         {                                                                   \
545                 EXPECTATION_NO_REPORT(expect);                              \
546                 volatile uint##size##_t uninit;                             \
547                                                                             \
548                 kunit_info(test,                                            \
549                            "memset" #size "() should initialize memory\n"); \
550                 memset##size((uint##size##_t *)&uninit, 0, 1);              \
551                 kmsan_check_memory((void *)&uninit, sizeof(uninit));        \
552                 KUNIT_EXPECT_TRUE(test, report_matches(&expect));           \
553         }
554 
555 DEFINE_TEST_MEMSETXX(16)
556 DEFINE_TEST_MEMSETXX(32)
557 DEFINE_TEST_MEMSETXX(64)
558 
559 static noinline void fibonacci(int *array, int size, int start)
560 {
561         if (start < 2 || (start == size))
562                 return;
563         array[start] = array[start - 1] + array[start - 2];
564         fibonacci(array, size, start + 1);
565 }
566 
567 static void test_long_origin_chain(struct kunit *test)
568 {
569         EXPECTATION_UNINIT_VALUE_FN(expect, "test_long_origin_chain");
570         /* (KMSAN_MAX_ORIGIN_DEPTH * 2) recursive calls to fibonacci(). */
571         volatile int accum[KMSAN_MAX_ORIGIN_DEPTH * 2 + 2];
572         int last = ARRAY_SIZE(accum) - 1;
573 
574         kunit_info(
575                 test,
576                 "origin chain exceeding KMSAN_MAX_ORIGIN_DEPTH (UMR report)\n");
577         /*
578          * We do not set accum[1] to 0, so the uninitializedness will be carried
579          * over to accum[2..last].
580          */
581         accum[0] = 1;
582         fibonacci((int *)accum, ARRAY_SIZE(accum), 2);
583         kmsan_check_memory((void *)&accum[last], sizeof(int));
584         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
585 }
586 
587 /*
588  * Test case: ensure that saving/restoring/printing stacks to/from stackdepot
589  * does not trigger errors.
590  *
591  * KMSAN uses stackdepot to store origin stack traces, that's why we do not
592  * instrument lib/stackdepot.c. Yet it must properly mark its outputs as
593  * initialized because other kernel features (e.g. netdev tracker) may also
594  * access stackdepot from instrumented code.
595  */
596 static void test_stackdepot_roundtrip(struct kunit *test)
597 {
598         unsigned long src_entries[16], *dst_entries;
599         unsigned int src_nentries, dst_nentries;
600         EXPECTATION_NO_REPORT(expect);
601         depot_stack_handle_t handle;
602 
603         kunit_info(test, "testing stackdepot roundtrip (no reports)\n");
604 
605         src_nentries =
606                 stack_trace_save(src_entries, ARRAY_SIZE(src_entries), 1);
607         handle = stack_depot_save(src_entries, src_nentries, GFP_KERNEL);
608         stack_depot_print(handle);
609         dst_nentries = stack_depot_fetch(handle, &dst_entries);
610         KUNIT_EXPECT_TRUE(test, src_nentries == dst_nentries);
611 
612         kmsan_check_memory((void *)dst_entries,
613                            sizeof(*dst_entries) * dst_nentries);
614         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
615 }
616 
617 /*
618  * Test case: ensure that kmsan_unpoison_memory() and the instrumentation work
619  * the same.
620  */
621 static void test_unpoison_memory(struct kunit *test)
622 {
623         EXPECTATION_UNINIT_VALUE_FN(expect, "test_unpoison_memory");
624         volatile char a[4], b[4];
625 
626         kunit_info(
627                 test,
628                 "unpoisoning via the instrumentation vs. kmsan_unpoison_memory() (2 UMR reports)\n");
629 
630         /* Initialize a[0] and check a[1]--a[3]. */
631         a[0] = 0;
632         kmsan_check_memory((char *)&a[1], 3);
633         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
634 
635         report_reset();
636 
637         /* Initialize b[0] and check b[1]--b[3]. */
638         kmsan_unpoison_memory((char *)&b[0], 1);
639         kmsan_check_memory((char *)&b[1], 3);
640         KUNIT_EXPECT_TRUE(test, report_matches(&expect));
641 }
642 
643 static struct kunit_case kmsan_test_cases[] = {
644         KUNIT_CASE(test_uninit_kmalloc),
645         KUNIT_CASE(test_init_kmalloc),
646         KUNIT_CASE(test_init_kzalloc),
647         KUNIT_CASE(test_uninit_stack_var),
648         KUNIT_CASE(test_init_stack_var),
649         KUNIT_CASE(test_params),
650         KUNIT_CASE(test_uninit_multiple_params),
651         KUNIT_CASE(test_uninit_kmsan_check_memory),
652         KUNIT_CASE(test_init_kmsan_vmap_vunmap),
653         KUNIT_CASE(test_init_vmalloc),
654         KUNIT_CASE(test_uaf),
655         KUNIT_CASE(test_percpu_propagate),
656         KUNIT_CASE(test_printk),
657         KUNIT_CASE(test_init_memcpy),
658         KUNIT_CASE(test_memcpy_aligned_to_aligned),
659         KUNIT_CASE(test_memcpy_aligned_to_unaligned),
660         KUNIT_CASE(test_memcpy_initialized_gap),
661         KUNIT_CASE(test_memset16),
662         KUNIT_CASE(test_memset32),
663         KUNIT_CASE(test_memset64),
664         KUNIT_CASE(test_long_origin_chain),
665         KUNIT_CASE(test_stackdepot_roundtrip),
666         KUNIT_CASE(test_unpoison_memory),
667         {},
668 };
669 
670 /* ===== End test cases ===== */
671 
672 static int test_init(struct kunit *test)
673 {
674         unsigned long flags;
675 
676         spin_lock_irqsave(&observed.lock, flags);
677         observed.header[0] = '\0';
678         observed.ignore = false;
679         observed.available = false;
680         spin_unlock_irqrestore(&observed.lock, flags);
681 
682         return 0;
683 }
684 
685 static void test_exit(struct kunit *test)
686 {
687 }
688 
689 static int orig_panic_on_kmsan;
690 
691 static int kmsan_suite_init(struct kunit_suite *suite)
692 {
693         register_trace_console(probe_console, NULL);
694         orig_panic_on_kmsan = panic_on_kmsan;
695         panic_on_kmsan = 0;
696         return 0;
697 }
698 
699 static void kmsan_suite_exit(struct kunit_suite *suite)
700 {
701         unregister_trace_console(probe_console, NULL);
702         tracepoint_synchronize_unregister();
703         panic_on_kmsan = orig_panic_on_kmsan;
704 }
705 
706 static struct kunit_suite kmsan_test_suite = {
707         .name = "kmsan",
708         .test_cases = kmsan_test_cases,
709         .init = test_init,
710         .exit = test_exit,
711         .suite_init = kmsan_suite_init,
712         .suite_exit = kmsan_suite_exit,
713 };
714 kunit_test_suites(&kmsan_test_suite);
715 
716 MODULE_LICENSE("GPL");
717 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>");
718 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php