1 // SPDX-License-Identifier: GPL-2.0 1 2 /* 3 * Runtime test cases for CONFIG_FORTIFY_SOURC 4 * testing see FORTIFY_MEM_* tests in LKDTM (d 5 * 6 * For corner cases with UBSAN, try testing wi 7 * 8 * ./tools/testing/kunit/kunit.py run --arch=x 9 * --kconfig_add CONFIG_FORTIFY_SOURCE=y 10 * --kconfig_add CONFIG_UBSAN=y \ 11 * --kconfig_add CONFIG_UBSAN_TRAP=y \ 12 * --kconfig_add CONFIG_UBSAN_BOUNDS=y \ 13 * --kconfig_add CONFIG_UBSAN_LOCAL_BOUND 14 * --make_options LLVM=1 fortify 15 */ 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 /* We don't need to fill dmesg with the fortif 19 #ifdef DEBUG 20 # define FORTIFY_REPORT_KUNIT(x...) __fortify_ 21 # define FORTIFY_WARN_KUNIT(x...) WARN_ONCE( 22 #else 23 # define FORTIFY_REPORT_KUNIT(x...) do { } whi 24 # define FORTIFY_WARN_KUNIT(x...) do { } whi 25 #endif 26 27 /* Redefine fortify_panic() to track failures. 28 void fortify_add_kunit_error(int write); 29 #define fortify_panic(func, write, avail, size 30 FORTIFY_REPORT_KUNIT(FORTIFY_REASON(fu 31 fortify_add_kunit_error(write); 32 return (retfail); 33 } while (0) 34 35 /* Redefine fortify_warn_once() to track memcp 36 #define fortify_warn_once(chk_func, x...) do { 37 bool __result = chk_func; 38 FORTIFY_WARN_KUNIT(__result, x); 39 if (__result) 40 fortify_add_kunit_error(1); 41 } while (0) 42 43 #include <kunit/device.h> 44 #include <kunit/test.h> 45 #include <kunit/test-bug.h> 46 #include <linux/device.h> 47 #include <linux/slab.h> 48 #include <linux/string.h> 49 #include <linux/vmalloc.h> 50 51 /* Handle being built without CONFIG_FORTIFY_S 52 #ifndef __compiletime_strlen 53 # define __compiletime_strlen __builtin_strlen 54 #endif 55 56 static struct kunit_resource read_resource; 57 static struct kunit_resource write_resource; 58 static int fortify_read_overflows; 59 static int fortify_write_overflows; 60 61 static const char array_of_10[] = "this is 10" 62 static const char *ptr_of_11 = "this is 11!"; 63 static char array_unknown[] = "compiler thinks 64 65 void fortify_add_kunit_error(int write) 66 { 67 struct kunit_resource *resource; 68 struct kunit *current_test; 69 70 current_test = kunit_get_current_test( 71 if (!current_test) 72 return; 73 74 resource = kunit_find_named_resource(c 75 write ? "fortify_write 76 : "fortify_read_ 77 if (!resource) 78 return; 79 80 (*(int *)resource->data)++; 81 kunit_put_resource(resource); 82 } 83 84 static void fortify_test_known_sizes(struct ku 85 { 86 KUNIT_EXPECT_EQ(test, __compiletime_st 87 KUNIT_EXPECT_EQ(test, __compiletime_st 88 KUNIT_EXPECT_EQ(test, __compiletime_st 89 90 KUNIT_EXPECT_EQ(test, __compiletime_st 91 /* Externally defined and dynamically 92 KUNIT_EXPECT_EQ(test, __compiletime_st 93 } 94 95 /* This is volatile so the optimizer can't per 96 static volatile int pick; 97 98 /* Not inline to keep optimizer from figuring 99 static noinline size_t want_minus_one(int pick 100 { 101 const char *str; 102 103 switch (pick) { 104 case 1: 105 str = "4444"; 106 break; 107 case 2: 108 str = "333"; 109 break; 110 default: 111 str = "1"; 112 break; 113 } 114 return __compiletime_strlen(str); 115 } 116 117 static void fortify_test_control_flow_split(st 118 { 119 KUNIT_EXPECT_EQ(test, want_minus_one(p 120 } 121 122 #define KUNIT_EXPECT_BOS(test, p, expected, na 123 KUNIT_EXPECT_EQ_MSG(test, __builtin_ob 124 expected, 125 "__alloc_size() not working wi 126 127 #if !__has_builtin(__builtin_dynamic_object_si 128 #define KUNIT_EXPECT_BDOS(test, p, expected, n 129 /* Silence "unused variable 'expected' 130 KUNIT_EXPECT_EQ(test, expected, expect 131 #else 132 #define KUNIT_EXPECT_BDOS(test, p, expected, n 133 KUNIT_EXPECT_EQ_MSG(test, __builtin_dy 134 expected, 135 "__alloc_size() not working wi 136 #endif 137 138 /* If the execpted size is a constant value, _ 139 #define check_const(_expected, alloc, free) 140 size_t expected = (_expected); 141 void *p = alloc; 142 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, 143 KUNIT_EXPECT_BOS(test, p, expected, #a 144 KUNIT_EXPECT_BDOS(test, p, expected, # 145 free; 146 } while (0) 147 148 /* If the execpted size is NOT a constant valu 149 #define check_dynamic(_expected, alloc, free) 150 size_t expected = (_expected); 151 void *p = alloc; 152 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, 153 KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #a 154 KUNIT_EXPECT_BDOS(test, p, expected, # 155 free; 156 } while (0) 157 158 /* Assortment of constant-value kinda-edge cas 159 #define CONST_TEST_BODY(TEST_alloc) do { 160 /* Special-case vmalloc()-family to sk 161 if (strcmp(#TEST_alloc, "TEST_vmalloc" 162 TEST_alloc(check_const, 0, 0); 163 TEST_alloc(check_const, 1, 1); 164 TEST_alloc(check_const, 128, 128); 165 TEST_alloc(check_const, 1023, 1023); 166 TEST_alloc(check_const, 1025, 1025); 167 TEST_alloc(check_const, 4096, 4096); 168 TEST_alloc(check_const, 4097, 4097); 169 } while (0) 170 171 static volatile size_t zero_size; 172 static volatile size_t unknown_size = 50; 173 174 #if !__has_builtin(__builtin_dynamic_object_si 175 #define DYNAMIC_TEST_BODY(TEST_alloc) 176 kunit_skip(test, "Compiler is missing 177 #else 178 #define DYNAMIC_TEST_BODY(TEST_alloc) do { 179 size_t size = unknown_size; 180 181 /* 182 * Expected size is "size" in each tes 183 * internally incremented in each test 184 * -Wunsequenced. 185 */ 186 TEST_alloc(check_dynamic, size, size++ 187 /* Make sure incrementing actually hap 188 KUNIT_EXPECT_NE(test, size, unknown_si 189 } while (0) 190 #endif 191 192 #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) 193 static void fortify_test_alloc_size_##allocato 194 { 195 CONST_TEST_BODY(TEST_##allocator); 196 } 197 static void fortify_test_alloc_size_##allocato 198 { 199 DYNAMIC_TEST_BODY(TEST_##allocator); 200 } 201 202 #define TEST_kmalloc(checker, expected_size, a 203 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 204 void *orig; 205 size_t len; 206 207 checker(expected_size, kmalloc(alloc_s 208 kfree(p)); 209 checker(expected_size, 210 kmalloc_node(alloc_size, gfp, 211 kfree(p)); 212 checker(expected_size, kzalloc(alloc_s 213 kfree(p)); 214 checker(expected_size, 215 kzalloc_node(alloc_size, gfp, 216 kfree(p)); 217 checker(expected_size, kcalloc(1, allo 218 kfree(p)); 219 checker(expected_size, kcalloc(alloc_s 220 kfree(p)); 221 checker(expected_size, 222 kcalloc_node(1, alloc_size, gf 223 kfree(p)); 224 checker(expected_size, 225 kcalloc_node(alloc_size, 1, gf 226 kfree(p)); 227 checker(expected_size, kmalloc_array(1 228 kfree(p)); 229 checker(expected_size, kmalloc_array(a 230 kfree(p)); 231 checker(expected_size, 232 kmalloc_array_node(1, alloc_si 233 kfree(p)); 234 checker(expected_size, 235 kmalloc_array_node(alloc_size, 236 kfree(p)); 237 238 orig = kmalloc(alloc_size, gfp); 239 KUNIT_EXPECT_TRUE(test, orig != NULL); 240 checker((expected_size) * 2, 241 krealloc(orig, (alloc_size) * 242 kfree(p)); 243 orig = kmalloc(alloc_size, gfp); 244 KUNIT_EXPECT_TRUE(test, orig != NULL); 245 checker((expected_size) * 2, 246 krealloc_array(orig, 1, (alloc 247 kfree(p)); 248 orig = kmalloc(alloc_size, gfp); 249 KUNIT_EXPECT_TRUE(test, orig != NULL); 250 checker((expected_size) * 2, 251 krealloc_array(orig, (alloc_si 252 kfree(p)); 253 254 len = 11; 255 /* Using memdup() with fixed size, so 256 if (!__builtin_constant_p(expected_siz 257 len += zero_size; 258 checker(len, kmemdup("hello there", le 259 } while (0) 260 DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc) 261 262 /* Sizes are in pages, not bytes. */ 263 #define TEST_vmalloc(checker, expected_pages, 264 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 265 checker((expected_pages) * PAGE_SIZE, 266 vmalloc((alloc_pages) * PAGE_S 267 checker((expected_pages) * PAGE_SIZE, 268 vzalloc((alloc_pages) * PAGE_S 269 checker((expected_pages) * PAGE_SIZE, 270 __vmalloc((alloc_pages) * PAGE 271 } while (0) 272 DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc) 273 274 /* Sizes are in pages (and open-coded for side 275 #define TEST_kvmalloc(checker, expected_pages, 276 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 277 size_t prev_size; 278 void *orig; 279 280 checker((expected_pages) * PAGE_SIZE, 281 kvmalloc((alloc_pages) * PAGE_ 282 kvfree(p)); 283 checker((expected_pages) * PAGE_SIZE, 284 kvmalloc_node((alloc_pages) * 285 kvfree(p)); 286 checker((expected_pages) * PAGE_SIZE, 287 kvzalloc((alloc_pages) * PAGE_ 288 kvfree(p)); 289 checker((expected_pages) * PAGE_SIZE, 290 kvzalloc_node((alloc_pages) * 291 kvfree(p)); 292 checker((expected_pages) * PAGE_SIZE, 293 kvcalloc(1, (alloc_pages) * PA 294 kvfree(p)); 295 checker((expected_pages) * PAGE_SIZE, 296 kvcalloc((alloc_pages) * PAGE_ 297 kvfree(p)); 298 checker((expected_pages) * PAGE_SIZE, 299 kvmalloc_array(1, (alloc_pages 300 kvfree(p)); 301 checker((expected_pages) * PAGE_SIZE, 302 kvmalloc_array((alloc_pages) * 303 kvfree(p)); 304 305 prev_size = (expected_pages) * PAGE_SI 306 orig = kvmalloc(prev_size, gfp); 307 KUNIT_EXPECT_TRUE(test, orig != NULL); 308 checker(((expected_pages) * PAGE_SIZE) 309 kvrealloc(orig, ((alloc_pages) 310 kvfree(p)); 311 } while (0) 312 DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc) 313 314 #define TEST_devm_kmalloc(checker, expected_si 315 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 316 const char dev_name[] = "fortify-test" 317 struct device *dev; 318 void *orig; 319 size_t len; 320 321 /* Create dummy device for devm_kmallo 322 dev = kunit_device_register(test, dev_ 323 KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(de 324 "Cannot registe 325 326 checker(expected_size, devm_kmalloc(de 327 devm_kfree(dev, p)); 328 checker(expected_size, devm_kzalloc(de 329 devm_kfree(dev, p)); 330 checker(expected_size, 331 devm_kmalloc_array(dev, 1, all 332 devm_kfree(dev, p)); 333 checker(expected_size, 334 devm_kmalloc_array(dev, alloc_ 335 devm_kfree(dev, p)); 336 checker(expected_size, 337 devm_kcalloc(dev, 1, alloc_siz 338 devm_kfree(dev, p)); 339 checker(expected_size, 340 devm_kcalloc(dev, alloc_size, 341 devm_kfree(dev, p)); 342 343 orig = devm_kmalloc(dev, alloc_size, g 344 KUNIT_EXPECT_TRUE(test, orig != NULL); 345 checker((expected_size) * 2, 346 devm_krealloc(dev, orig, (allo 347 devm_kfree(dev, p)); 348 349 len = 4; 350 /* Using memdup() with fixed size, so 351 if (!__builtin_constant_p(expected_siz 352 len += zero_size; 353 checker(len, devm_kmemdup(dev, "Ohai", 354 devm_kfree(dev, p)); 355 356 kunit_device_unregister(test, dev); 357 } while (0) 358 DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc) 359 360 static const char * const test_strs[] = { 361 "", 362 "Hello there", 363 "A longer string, just for variety", 364 }; 365 366 #define TEST_realloc(checker) do { 367 gfp_t gfp = GFP_KERNEL; 368 size_t len; 369 int i; 370 371 for (i = 0; i < ARRAY_SIZE(test_strs); 372 len = strlen(test_strs[i]); 373 KUNIT_EXPECT_EQ(test, __builti 374 checker(len, kmemdup_array(tes 375 kfree(p)); 376 checker(len, kmemdup(test_strs 377 kfree(p)); 378 } 379 } while (0) 380 static void fortify_test_realloc_size(struct k 381 { 382 TEST_realloc(check_dynamic); 383 } 384 385 /* 386 * We can't have an array at the end of a stru 387 * builds without -fstrict-flex-arrays=3 will 388 * being an unknown length. Additionally, add 389 * and after the string to catch over/underflo 390 * fail. 391 */ 392 struct fortify_padding { 393 unsigned long bytes_before; 394 char buf[32]; 395 unsigned long bytes_after; 396 }; 397 /* Force compiler into not being able to resol 398 static volatile int unconst; 399 400 static void fortify_test_strlen(struct kunit * 401 { 402 struct fortify_padding pad = { }; 403 int i, end = sizeof(pad.buf) - 1; 404 405 /* Fill 31 bytes with valid characters 406 for (i = 0; i < sizeof(pad.buf) - 1; i 407 pad.buf[i] = i + ''; 408 /* Trailing bytes are still %NUL. */ 409 KUNIT_EXPECT_EQ(test, pad.buf[end], '\ 410 KUNIT_EXPECT_EQ(test, pad.bytes_after, 411 412 /* String is terminated, so strlen() i 413 KUNIT_EXPECT_EQ(test, strlen(pad.buf), 414 KUNIT_EXPECT_EQ(test, fortify_read_ove 415 416 /* Make string unterminated, and recou 417 pad.buf[end] = 'A'; 418 end = sizeof(pad.buf); 419 KUNIT_EXPECT_EQ(test, strlen(pad.buf), 420 KUNIT_EXPECT_EQ(test, fortify_read_ove 421 } 422 423 static void fortify_test_strnlen(struct kunit 424 { 425 struct fortify_padding pad = { }; 426 int i, end = sizeof(pad.buf) - 1; 427 428 /* Fill 31 bytes with valid characters 429 for (i = 0; i < sizeof(pad.buf) - 1; i 430 pad.buf[i] = i + ''; 431 /* Trailing bytes are still %NUL. */ 432 KUNIT_EXPECT_EQ(test, pad.buf[end], '\ 433 KUNIT_EXPECT_EQ(test, pad.bytes_after, 434 435 /* String is terminated, so strnlen() 436 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 437 KUNIT_EXPECT_EQ(test, fortify_read_ove 438 /* A truncated strnlen() will be safe, 439 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 440 sizeof 441 KUNIT_EXPECT_EQ(test, fortify_read_ove 442 443 /* Make string unterminated, and recou 444 pad.buf[end] = 'A'; 445 end = sizeof(pad.buf); 446 /* Reading beyond with strncpy() will 447 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 448 KUNIT_EXPECT_EQ(test, fortify_read_ove 449 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 450 KUNIT_EXPECT_EQ(test, fortify_read_ove 451 452 /* Early-truncated is safe still, thou 453 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 454 KUNIT_EXPECT_EQ(test, fortify_read_ove 455 456 end = sizeof(pad.buf) / 2; 457 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 458 KUNIT_EXPECT_EQ(test, fortify_read_ove 459 } 460 461 static void fortify_test_strcpy(struct kunit * 462 { 463 struct fortify_padding pad = { }; 464 char src[sizeof(pad.buf) + 1] = { }; 465 int i; 466 467 /* Fill 31 bytes with valid characters 468 for (i = 0; i < sizeof(src) - 2; i++) 469 src[i] = i + ''; 470 471 /* Destination is %NUL-filled to start 472 KUNIT_EXPECT_EQ(test, pad.bytes_before 473 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 474 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 475 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 476 KUNIT_EXPECT_EQ(test, pad.bytes_after, 477 478 /* Legitimate strcpy() 1 less than of 479 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf 480 == pad.buf); 481 KUNIT_EXPECT_EQ(test, fortify_read_ove 482 KUNIT_EXPECT_EQ(test, fortify_write_ov 483 /* Only last byte should be %NUL */ 484 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 485 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 486 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 487 488 src[sizeof(src) - 2] = 'A'; 489 /* But now we trip the overflow checki 490 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf 491 == pad.buf); 492 KUNIT_EXPECT_EQ(test, fortify_read_ove 493 KUNIT_EXPECT_EQ(test, fortify_write_ov 494 /* Trailing %NUL -- thanks to FORTIFY. 495 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 496 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 497 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 498 /* And we will not have gone beyond. * 499 KUNIT_EXPECT_EQ(test, pad.bytes_after, 500 501 src[sizeof(src) - 1] = 'A'; 502 /* And for sure now, two bytes past. * 503 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf 504 == pad.buf); 505 /* 506 * Which trips both the strlen() on th 507 * and the resulting copy attempt. 508 */ 509 KUNIT_EXPECT_EQ(test, fortify_read_ove 510 KUNIT_EXPECT_EQ(test, fortify_write_ov 511 /* Trailing %NUL -- thanks to FORTIFY. 512 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 513 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 514 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 515 /* And we will not have gone beyond. * 516 KUNIT_EXPECT_EQ(test, pad.bytes_after, 517 } 518 519 static void fortify_test_strncpy(struct kunit 520 { 521 struct fortify_padding pad = { }; 522 char src[] = "Copy me fully into a sma 523 524 /* Destination is %NUL-filled to start 525 KUNIT_EXPECT_EQ(test, pad.bytes_before 526 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 527 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 528 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 529 KUNIT_EXPECT_EQ(test, pad.bytes_after, 530 531 /* Legitimate strncpy() 1 less than of 532 KUNIT_ASSERT_TRUE(test, strncpy(pad.bu 533 sizeof 534 == pad.buf); 535 KUNIT_EXPECT_EQ(test, fortify_write_ov 536 /* Only last byte should be %NUL */ 537 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 538 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 539 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 540 541 /* Legitimate (though unterminated) ma 542 KUNIT_ASSERT_TRUE(test, strncpy(pad.bu 543 sizeof 544 == pad.buf); 545 KUNIT_EXPECT_EQ(test, fortify_write_ov 546 /* No trailing %NUL -- thanks strncpy 547 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 548 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 549 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 550 /* But we will not have gone beyond. * 551 KUNIT_EXPECT_EQ(test, pad.bytes_after, 552 553 /* Now verify that FORTIFY is working. 554 KUNIT_ASSERT_TRUE(test, strncpy(pad.bu 555 sizeof 556 == pad.buf); 557 /* Should catch the overflow. */ 558 KUNIT_EXPECT_EQ(test, fortify_write_ov 559 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 560 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 561 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 562 /* And we will not have gone beyond. * 563 KUNIT_EXPECT_EQ(test, pad.bytes_after, 564 565 /* And further... */ 566 KUNIT_ASSERT_TRUE(test, strncpy(pad.bu 567 sizeof 568 == pad.buf); 569 /* Should catch the overflow. */ 570 KUNIT_EXPECT_EQ(test, fortify_write_ov 571 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 572 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 573 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 574 /* And we will not have gone beyond. * 575 KUNIT_EXPECT_EQ(test, pad.bytes_after, 576 } 577 578 static void fortify_test_strscpy(struct kunit 579 { 580 struct fortify_padding pad = { }; 581 char src[] = "Copy me fully into a sma 582 583 /* Destination is %NUL-filled to start 584 KUNIT_EXPECT_EQ(test, pad.bytes_before 585 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 586 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 587 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 588 KUNIT_EXPECT_EQ(test, pad.bytes_after, 589 590 /* Legitimate strscpy() 1 less than of 591 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, 592 sizeof(p 593 -E2BIG); 594 KUNIT_EXPECT_EQ(test, fortify_write_ov 595 /* Keeping space for %NUL, last two by 596 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 597 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 598 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 599 600 /* Legitimate max-size strscpy. */ 601 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, 602 sizeof(p 603 -E2BIG); 604 KUNIT_EXPECT_EQ(test, fortify_write_ov 605 /* A trailing %NUL will exist. */ 606 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 607 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 608 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 609 610 /* Now verify that FORTIFY is working. 611 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, 612 sizeof(p 613 -E2BIG); 614 /* Should catch the overflow. */ 615 KUNIT_EXPECT_EQ(test, fortify_write_ov 616 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 617 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 618 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 619 /* And we will not have gone beyond. * 620 KUNIT_EXPECT_EQ(test, pad.bytes_after, 621 622 /* And much further... */ 623 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, 624 sizeof(s 625 -E2BIG); 626 /* Should catch the overflow. */ 627 KUNIT_EXPECT_EQ(test, fortify_write_ov 628 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 629 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 630 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 631 /* And we will not have gone beyond. * 632 KUNIT_EXPECT_EQ(test, pad.bytes_after, 633 } 634 635 static void fortify_test_strcat(struct kunit * 636 { 637 struct fortify_padding pad = { }; 638 char src[sizeof(pad.buf) / 2] = { }; 639 char one[] = "A"; 640 char two[] = "BC"; 641 int i; 642 643 /* Fill 15 bytes with valid characters 644 for (i = 0; i < sizeof(src) - 1; i++) 645 src[i] = i + 'A'; 646 647 /* Destination is %NUL-filled to start 648 KUNIT_EXPECT_EQ(test, pad.bytes_before 649 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 650 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 651 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 652 KUNIT_EXPECT_EQ(test, pad.bytes_after, 653 654 /* Legitimate strcat() using less than 655 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 656 KUNIT_EXPECT_EQ(test, fortify_write_ov 657 /* Legitimate strcat() now 2 bytes shy 658 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 659 KUNIT_EXPECT_EQ(test, fortify_write_ov 660 /* Last two bytes should be %NUL */ 661 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 662 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 663 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 664 665 /* Add one more character to the end. 666 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 667 KUNIT_EXPECT_EQ(test, fortify_write_ov 668 /* Last byte should be %NUL */ 669 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 670 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 671 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 672 673 /* And this one char will overflow. */ 674 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 675 KUNIT_EXPECT_EQ(test, fortify_write_ov 676 /* Last byte should be %NUL thanks to 677 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 678 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 679 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 680 KUNIT_EXPECT_EQ(test, pad.bytes_after, 681 682 /* And adding two will overflow more. 683 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 684 KUNIT_EXPECT_EQ(test, fortify_write_ov 685 /* Last byte should be %NUL thanks to 686 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 687 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 688 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 689 KUNIT_EXPECT_EQ(test, pad.bytes_after, 690 } 691 692 static void fortify_test_strncat(struct kunit 693 { 694 struct fortify_padding pad = { }; 695 char src[sizeof(pad.buf)] = { }; 696 int i, partial; 697 698 /* Fill 31 bytes with valid characters 699 partial = sizeof(src) / 2 - 1; 700 for (i = 0; i < partial; i++) 701 src[i] = i + 'A'; 702 703 /* Destination is %NUL-filled to start 704 KUNIT_EXPECT_EQ(test, pad.bytes_before 705 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 706 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 707 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 708 KUNIT_EXPECT_EQ(test, pad.bytes_after, 709 710 /* Legitimate strncat() using less tha 711 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 712 KUNIT_EXPECT_EQ(test, fortify_read_ove 713 KUNIT_EXPECT_EQ(test, fortify_write_ov 714 /* Legitimate strncat() now 2 bytes sh 715 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 716 KUNIT_EXPECT_EQ(test, fortify_read_ove 717 KUNIT_EXPECT_EQ(test, fortify_write_ov 718 /* Last two bytes should be %NUL */ 719 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 720 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 721 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 722 723 /* Add one more character to the end. 724 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 725 KUNIT_EXPECT_EQ(test, fortify_read_ove 726 KUNIT_EXPECT_EQ(test, fortify_write_ov 727 /* Last byte should be %NUL */ 728 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 729 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 730 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 731 732 /* And this one char will overflow. */ 733 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 734 KUNIT_EXPECT_EQ(test, fortify_read_ove 735 KUNIT_EXPECT_EQ(test, fortify_write_ov 736 /* Last byte should be %NUL thanks to 737 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 738 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 739 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 740 KUNIT_EXPECT_EQ(test, pad.bytes_after, 741 742 /* And adding two will overflow more. 743 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 744 KUNIT_EXPECT_EQ(test, fortify_read_ove 745 KUNIT_EXPECT_EQ(test, fortify_write_ov 746 /* Last byte should be %NUL thanks to 747 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 748 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 749 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 750 KUNIT_EXPECT_EQ(test, pad.bytes_after, 751 752 /* Force an unterminated destination, 753 pad.buf[sizeof(pad.buf) - 1] = 'A'; 754 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 755 /* This will have tripped both strlen( 756 KUNIT_EXPECT_EQ(test, fortify_read_ove 757 KUNIT_EXPECT_EQ(test, fortify_write_ov 758 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 759 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 760 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 761 /* But we should not go beyond the end 762 KUNIT_EXPECT_EQ(test, pad.bytes_after, 763 } 764 765 static void fortify_test_strlcat(struct kunit 766 { 767 struct fortify_padding pad = { }; 768 char src[sizeof(pad.buf)] = { }; 769 int i, partial; 770 int len = sizeof(pad.buf) + unconst; 771 772 /* Fill 15 bytes with valid characters 773 partial = sizeof(src) / 2 - 1; 774 for (i = 0; i < partial; i++) 775 src[i] = i + 'A'; 776 777 /* Destination is %NUL-filled to start 778 KUNIT_EXPECT_EQ(test, pad.bytes_before 779 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 780 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 781 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 782 KUNIT_EXPECT_EQ(test, pad.bytes_after, 783 784 /* Legitimate strlcat() using less tha 785 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 786 KUNIT_EXPECT_EQ(test, fortify_read_ove 787 KUNIT_EXPECT_EQ(test, fortify_write_ov 788 /* Legitimate strlcat() now 2 bytes sh 789 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 790 KUNIT_EXPECT_EQ(test, fortify_read_ove 791 KUNIT_EXPECT_EQ(test, fortify_write_ov 792 /* Last two bytes should be %NUL */ 793 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 794 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 795 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 796 797 /* Add one more character to the end. 798 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 799 KUNIT_EXPECT_EQ(test, fortify_read_ove 800 KUNIT_EXPECT_EQ(test, fortify_write_ov 801 /* Last byte should be %NUL */ 802 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 803 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 804 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 805 806 /* And this one char will overflow. */ 807 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 808 KUNIT_EXPECT_EQ(test, fortify_read_ove 809 KUNIT_EXPECT_EQ(test, fortify_write_ov 810 /* Last byte should be %NUL thanks to 811 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 812 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 813 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 814 KUNIT_EXPECT_EQ(test, pad.bytes_after, 815 816 /* And adding two will overflow more. 817 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 818 KUNIT_EXPECT_EQ(test, fortify_read_ove 819 KUNIT_EXPECT_EQ(test, fortify_write_ov 820 /* Last byte should be %NUL thanks to 821 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 822 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 823 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 824 KUNIT_EXPECT_EQ(test, pad.bytes_after, 825 826 /* Force an unterminated destination, 827 pad.buf[sizeof(pad.buf) - 1] = 'A'; 828 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 829 /* This will have tripped both strlen( 830 KUNIT_EXPECT_EQ(test, fortify_read_ove 831 KUNIT_EXPECT_EQ(test, fortify_write_ov 832 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 833 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 834 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 835 /* But we should not go beyond the end 836 KUNIT_EXPECT_EQ(test, pad.bytes_after, 837 838 /* Force an unterminated source, and o 839 memset(src, 'B', sizeof(src)); 840 pad.buf[sizeof(pad.buf) - 1] = '\0'; 841 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 842 /* This will have tripped both strlen( 843 KUNIT_EXPECT_EQ(test, fortify_read_ove 844 KUNIT_EXPECT_EQ(test, fortify_write_ov 845 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 846 /* But we should not go beyond the end 847 KUNIT_EXPECT_EQ(test, pad.bytes_after, 848 } 849 850 /* Check for 0-sized arrays... */ 851 struct fortify_zero_sized { 852 unsigned long bytes_before; 853 char buf[0]; 854 unsigned long bytes_after; 855 }; 856 857 #define __fortify_test(memfunc) 858 static void fortify_test_##memfunc(struct kuni 859 { 860 struct fortify_zero_sized zero = { }; 861 struct fortify_padding pad = { }; 862 char srcA[sizeof(pad.buf) + 2]; 863 char srcB[sizeof(pad.buf) + 2]; 864 size_t len = sizeof(pad.buf) + unconst 865 866 memset(srcA, 'A', sizeof(srcA)); 867 KUNIT_ASSERT_EQ(test, srcA[0], 'A'); 868 memset(srcB, 'B', sizeof(srcB)); 869 KUNIT_ASSERT_EQ(test, srcB[0], 'B'); 870 871 memfunc(pad.buf, srcA, 0 + unconst); 872 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0' 873 KUNIT_EXPECT_EQ(test, fortify_read_ove 874 KUNIT_EXPECT_EQ(test, fortify_write_ov 875 memfunc(pad.buf + 1, srcB, 1 + unconst 876 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0' 877 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B') 878 KUNIT_EXPECT_EQ(test, pad.buf[2], '\0' 879 KUNIT_EXPECT_EQ(test, fortify_read_ove 880 KUNIT_EXPECT_EQ(test, fortify_write_ov 881 memfunc(pad.buf, srcA, 1 + unconst); 882 KUNIT_EXPECT_EQ(test, pad.buf[0], 'A') 883 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B') 884 KUNIT_EXPECT_EQ(test, fortify_read_ove 885 KUNIT_EXPECT_EQ(test, fortify_write_ov 886 memfunc(pad.buf, srcA, len - 1); 887 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A') 888 KUNIT_EXPECT_EQ(test, pad.buf[len - 1] 889 KUNIT_EXPECT_EQ(test, fortify_read_ove 890 KUNIT_EXPECT_EQ(test, fortify_write_ov 891 memfunc(pad.buf, srcA, len); 892 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A') 893 KUNIT_EXPECT_EQ(test, pad.buf[len - 1] 894 KUNIT_EXPECT_EQ(test, pad.bytes_after, 895 KUNIT_EXPECT_EQ(test, fortify_read_ove 896 KUNIT_EXPECT_EQ(test, fortify_write_ov 897 memfunc(pad.buf, srcA, len + 1); 898 KUNIT_EXPECT_EQ(test, fortify_read_ove 899 KUNIT_EXPECT_EQ(test, fortify_write_ov 900 memfunc(pad.buf + 1, srcB, len); 901 KUNIT_EXPECT_EQ(test, fortify_read_ove 902 KUNIT_EXPECT_EQ(test, fortify_write_ov 903 904 /* Reset error counter. */ 905 fortify_write_overflows = 0; 906 /* Copy nothing into nothing: no error 907 memfunc(zero.buf, srcB, 0 + unconst); 908 KUNIT_EXPECT_EQ(test, fortify_read_ove 909 KUNIT_EXPECT_EQ(test, fortify_write_ov 910 memfunc(zero.buf, srcB, 1 + unconst); 911 KUNIT_EXPECT_EQ(test, fortify_read_ove 912 KUNIT_EXPECT_EQ(test, fortify_write_ov 913 } 914 __fortify_test(memcpy) 915 __fortify_test(memmove) 916 917 static void fortify_test_memscan(struct kunit 918 { 919 char haystack[] = "Where oh where is m 920 char *mem = haystack + strlen("Where o 921 char needle = 'm'; 922 size_t len = sizeof(haystack) + uncons 923 924 KUNIT_ASSERT_PTR_EQ(test, memscan(hays 925 mem); 926 KUNIT_EXPECT_EQ(test, fortify_read_ove 927 /* Catch too-large range. */ 928 KUNIT_ASSERT_PTR_EQ(test, memscan(hays 929 NULL); 930 KUNIT_EXPECT_EQ(test, fortify_read_ove 931 KUNIT_ASSERT_PTR_EQ(test, memscan(hays 932 NULL); 933 KUNIT_EXPECT_EQ(test, fortify_read_ove 934 } 935 936 static void fortify_test_memchr(struct kunit * 937 { 938 char haystack[] = "Where oh where is m 939 char *mem = haystack + strlen("Where o 940 char needle = 'm'; 941 size_t len = sizeof(haystack) + uncons 942 943 KUNIT_ASSERT_PTR_EQ(test, memchr(hayst 944 mem); 945 KUNIT_EXPECT_EQ(test, fortify_read_ove 946 /* Catch too-large range. */ 947 KUNIT_ASSERT_PTR_EQ(test, memchr(hayst 948 NULL); 949 KUNIT_EXPECT_EQ(test, fortify_read_ove 950 KUNIT_ASSERT_PTR_EQ(test, memchr(hayst 951 NULL); 952 KUNIT_EXPECT_EQ(test, fortify_read_ove 953 } 954 955 static void fortify_test_memchr_inv(struct kun 956 { 957 char haystack[] = "Where oh where is m 958 char *mem = haystack + 1; 959 char needle = 'W'; 960 size_t len = sizeof(haystack) + uncons 961 962 /* Normal search is okay. */ 963 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(h 964 mem); 965 KUNIT_EXPECT_EQ(test, fortify_read_ove 966 /* Catch too-large range. */ 967 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(h 968 NULL); 969 KUNIT_EXPECT_EQ(test, fortify_read_ove 970 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(h 971 NULL); 972 KUNIT_EXPECT_EQ(test, fortify_read_ove 973 } 974 975 static void fortify_test_memcmp(struct kunit * 976 { 977 char one[] = "My mind is going ..."; 978 char two[] = "My mind is going ... I c 979 size_t one_len = sizeof(one) + unconst 980 size_t two_len = sizeof(two) + unconst 981 982 /* We match the first string (ignoring 983 KUNIT_ASSERT_EQ(test, memcmp(one, two, 984 KUNIT_EXPECT_EQ(test, fortify_read_ove 985 /* Still in bounds, but no longer matc 986 KUNIT_ASSERT_LT(test, memcmp(one, two, 987 KUNIT_EXPECT_EQ(test, fortify_read_ove 988 989 /* Catch too-large ranges. */ 990 KUNIT_ASSERT_EQ(test, memcmp(one, two, 991 KUNIT_EXPECT_EQ(test, fortify_read_ove 992 993 KUNIT_ASSERT_EQ(test, memcmp(two, one, 994 KUNIT_EXPECT_EQ(test, fortify_read_ove 995 } 996 997 static void fortify_test_kmemdup(struct kunit 998 { 999 char src[] = "I got Doom running on it 1000 char *copy; 1001 size_t len = sizeof(src) + unconst; 1002 1003 /* Copy is within bounds. */ 1004 copy = kmemdup(src, len, GFP_KERNEL); 1005 KUNIT_EXPECT_NOT_NULL(test, copy); 1006 KUNIT_EXPECT_EQ(test, fortify_read_ov 1007 kfree(copy); 1008 1009 /* Without %NUL. */ 1010 copy = kmemdup(src, len - 1, GFP_KERN 1011 KUNIT_EXPECT_NOT_NULL(test, copy); 1012 KUNIT_EXPECT_EQ(test, fortify_read_ov 1013 kfree(copy); 1014 1015 /* Tiny bounds. */ 1016 copy = kmemdup(src, 1, GFP_KERNEL); 1017 KUNIT_EXPECT_NOT_NULL(test, copy); 1018 KUNIT_EXPECT_EQ(test, fortify_read_ov 1019 kfree(copy); 1020 1021 /* Out of bounds by 1 byte. */ 1022 copy = kmemdup(src, len + 1, GFP_KERN 1023 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_ 1024 KUNIT_EXPECT_EQ(test, fortify_read_ov 1025 kfree(copy); 1026 1027 /* Way out of bounds. */ 1028 copy = kmemdup(src, len * 2, GFP_KERN 1029 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_ 1030 KUNIT_EXPECT_EQ(test, fortify_read_ov 1031 kfree(copy); 1032 1033 /* Starting offset causing out of bou 1034 copy = kmemdup(src + 1, len, GFP_KERN 1035 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_ 1036 KUNIT_EXPECT_EQ(test, fortify_read_ov 1037 kfree(copy); 1038 } 1039 1040 static int fortify_test_init(struct kunit *te 1041 { 1042 if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE 1043 kunit_skip(test, "Not built w 1044 1045 fortify_read_overflows = 0; 1046 kunit_add_named_resource(test, NULL, 1047 "fortify_rea 1048 &fortify_rea 1049 fortify_write_overflows = 0; 1050 kunit_add_named_resource(test, NULL, 1051 "fortify_wri 1052 &fortify_wri 1053 return 0; 1054 } 1055 1056 static struct kunit_case fortify_test_cases[] 1057 KUNIT_CASE(fortify_test_known_sizes), 1058 KUNIT_CASE(fortify_test_control_flow_ 1059 KUNIT_CASE(fortify_test_alloc_size_km 1060 KUNIT_CASE(fortify_test_alloc_size_km 1061 KUNIT_CASE(fortify_test_alloc_size_vm 1062 KUNIT_CASE(fortify_test_alloc_size_vm 1063 KUNIT_CASE(fortify_test_alloc_size_kv 1064 KUNIT_CASE(fortify_test_alloc_size_kv 1065 KUNIT_CASE(fortify_test_alloc_size_de 1066 KUNIT_CASE(fortify_test_alloc_size_de 1067 KUNIT_CASE(fortify_test_realloc_size) 1068 KUNIT_CASE(fortify_test_strlen), 1069 KUNIT_CASE(fortify_test_strnlen), 1070 KUNIT_CASE(fortify_test_strcpy), 1071 KUNIT_CASE(fortify_test_strncpy), 1072 KUNIT_CASE(fortify_test_strscpy), 1073 KUNIT_CASE(fortify_test_strcat), 1074 KUNIT_CASE(fortify_test_strncat), 1075 KUNIT_CASE(fortify_test_strlcat), 1076 /* skip memset: performs bounds check 1077 KUNIT_CASE(fortify_test_memcpy), 1078 KUNIT_CASE(fortify_test_memmove), 1079 KUNIT_CASE(fortify_test_memscan), 1080 KUNIT_CASE(fortify_test_memchr), 1081 KUNIT_CASE(fortify_test_memchr_inv), 1082 KUNIT_CASE(fortify_test_memcmp), 1083 KUNIT_CASE(fortify_test_kmemdup), 1084 {} 1085 }; 1086 1087 static struct kunit_suite fortify_test_suite 1088 .name = "fortify", 1089 .init = fortify_test_init, 1090 .test_cases = fortify_test_cases, 1091 }; 1092 1093 kunit_test_suite(fortify_test_suite); 1094 1095 MODULE_DESCRIPTION("Runtime test cases for CO 1096 MODULE_LICENSE("GPL"); 1097
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.