1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Runtime test cases for CONFIG_FORTIFY_SOURC !! 3 * Runtime test cases for CONFIG_FORTIFY_SOURCE. For testing memcpy(), 4 * testing see FORTIFY_MEM_* tests in LKDTM (d !! 4 * see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c). 5 * 5 * 6 * For corner cases with UBSAN, try testing wi 6 * For corner cases with UBSAN, try testing with: 7 * 7 * 8 * ./tools/testing/kunit/kunit.py run --arch=x 8 * ./tools/testing/kunit/kunit.py run --arch=x86_64 \ 9 * --kconfig_add CONFIG_FORTIFY_SOURCE=y 9 * --kconfig_add CONFIG_FORTIFY_SOURCE=y \ 10 * --kconfig_add CONFIG_UBSAN=y \ 10 * --kconfig_add CONFIG_UBSAN=y \ 11 * --kconfig_add CONFIG_UBSAN_TRAP=y \ 11 * --kconfig_add CONFIG_UBSAN_TRAP=y \ 12 * --kconfig_add CONFIG_UBSAN_BOUNDS=y \ 12 * --kconfig_add CONFIG_UBSAN_BOUNDS=y \ 13 * --kconfig_add CONFIG_UBSAN_LOCAL_BOUND 13 * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \ 14 * --make_options LLVM=1 fortify 14 * --make_options LLVM=1 fortify 15 */ 15 */ 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 17 18 /* We don't need to fill dmesg with the fortif 18 /* We don't need to fill dmesg with the fortify WARNs during testing. */ 19 #ifdef DEBUG 19 #ifdef DEBUG 20 # define FORTIFY_REPORT_KUNIT(x...) __fortify_ 20 # define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x) 21 # define FORTIFY_WARN_KUNIT(x...) WARN_ONCE( << 22 #else 21 #else 23 # define FORTIFY_REPORT_KUNIT(x...) do { } whi 22 # define FORTIFY_REPORT_KUNIT(x...) do { } while (0) 24 # define FORTIFY_WARN_KUNIT(x...) do { } whi << 25 #endif 23 #endif 26 24 27 /* Redefine fortify_panic() to track failures. 25 /* Redefine fortify_panic() to track failures. */ 28 void fortify_add_kunit_error(int write); 26 void fortify_add_kunit_error(int write); 29 #define fortify_panic(func, write, avail, size 27 #define fortify_panic(func, write, avail, size, retfail) do { \ 30 FORTIFY_REPORT_KUNIT(FORTIFY_REASON(fu 28 FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \ 31 fortify_add_kunit_error(write); 29 fortify_add_kunit_error(write); \ 32 return (retfail); 30 return (retfail); \ 33 } while (0) 31 } while (0) 34 32 35 /* Redefine fortify_warn_once() to track memcp << 36 #define fortify_warn_once(chk_func, x...) do { << 37 bool __result = chk_func; << 38 FORTIFY_WARN_KUNIT(__result, x); << 39 if (__result) << 40 fortify_add_kunit_error(1); << 41 } while (0) << 42 << 43 #include <kunit/device.h> 33 #include <kunit/device.h> 44 #include <kunit/test.h> 34 #include <kunit/test.h> 45 #include <kunit/test-bug.h> 35 #include <kunit/test-bug.h> 46 #include <linux/device.h> 36 #include <linux/device.h> 47 #include <linux/slab.h> 37 #include <linux/slab.h> 48 #include <linux/string.h> 38 #include <linux/string.h> 49 #include <linux/vmalloc.h> 39 #include <linux/vmalloc.h> 50 40 51 /* Handle being built without CONFIG_FORTIFY_S 41 /* Handle being built without CONFIG_FORTIFY_SOURCE */ 52 #ifndef __compiletime_strlen 42 #ifndef __compiletime_strlen 53 # define __compiletime_strlen __builtin_strlen 43 # define __compiletime_strlen __builtin_strlen 54 #endif 44 #endif 55 45 56 static struct kunit_resource read_resource; 46 static struct kunit_resource read_resource; 57 static struct kunit_resource write_resource; 47 static struct kunit_resource write_resource; 58 static int fortify_read_overflows; 48 static int fortify_read_overflows; 59 static int fortify_write_overflows; 49 static int fortify_write_overflows; 60 50 61 static const char array_of_10[] = "this is 10" 51 static const char array_of_10[] = "this is 10"; 62 static const char *ptr_of_11 = "this is 11!"; 52 static const char *ptr_of_11 = "this is 11!"; 63 static char array_unknown[] = "compiler thinks 53 static char array_unknown[] = "compiler thinks I might change"; 64 54 65 void fortify_add_kunit_error(int write) 55 void fortify_add_kunit_error(int write) 66 { 56 { 67 struct kunit_resource *resource; 57 struct kunit_resource *resource; 68 struct kunit *current_test; 58 struct kunit *current_test; 69 59 70 current_test = kunit_get_current_test( 60 current_test = kunit_get_current_test(); 71 if (!current_test) 61 if (!current_test) 72 return; 62 return; 73 63 74 resource = kunit_find_named_resource(c 64 resource = kunit_find_named_resource(current_test, 75 write ? "fortify_write 65 write ? "fortify_write_overflows" 76 : "fortify_read_ 66 : "fortify_read_overflows"); 77 if (!resource) 67 if (!resource) 78 return; 68 return; 79 69 80 (*(int *)resource->data)++; 70 (*(int *)resource->data)++; 81 kunit_put_resource(resource); 71 kunit_put_resource(resource); 82 } 72 } 83 73 84 static void fortify_test_known_sizes(struct ku !! 74 static void known_sizes_test(struct kunit *test) 85 { 75 { 86 KUNIT_EXPECT_EQ(test, __compiletime_st 76 KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8); 87 KUNIT_EXPECT_EQ(test, __compiletime_st 77 KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10); 88 KUNIT_EXPECT_EQ(test, __compiletime_st 78 KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11); 89 79 90 KUNIT_EXPECT_EQ(test, __compiletime_st 80 KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX); 91 /* Externally defined and dynamically 81 /* Externally defined and dynamically sized string pointer: */ 92 KUNIT_EXPECT_EQ(test, __compiletime_st 82 KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX); 93 } 83 } 94 84 95 /* This is volatile so the optimizer can't per 85 /* This is volatile so the optimizer can't perform DCE below. */ 96 static volatile int pick; 86 static volatile int pick; 97 87 98 /* Not inline to keep optimizer from figuring 88 /* Not inline to keep optimizer from figuring out which string we want. */ 99 static noinline size_t want_minus_one(int pick 89 static noinline size_t want_minus_one(int pick) 100 { 90 { 101 const char *str; 91 const char *str; 102 92 103 switch (pick) { 93 switch (pick) { 104 case 1: 94 case 1: 105 str = "4444"; 95 str = "4444"; 106 break; 96 break; 107 case 2: 97 case 2: 108 str = "333"; 98 str = "333"; 109 break; 99 break; 110 default: 100 default: 111 str = "1"; 101 str = "1"; 112 break; 102 break; 113 } 103 } 114 return __compiletime_strlen(str); 104 return __compiletime_strlen(str); 115 } 105 } 116 106 117 static void fortify_test_control_flow_split(st !! 107 static void control_flow_split_test(struct kunit *test) 118 { 108 { 119 KUNIT_EXPECT_EQ(test, want_minus_one(p 109 KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX); 120 } 110 } 121 111 122 #define KUNIT_EXPECT_BOS(test, p, expected, na 112 #define KUNIT_EXPECT_BOS(test, p, expected, name) \ 123 KUNIT_EXPECT_EQ_MSG(test, __builtin_ob 113 KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \ 124 expected, 114 expected, \ 125 "__alloc_size() not working wi 115 "__alloc_size() not working with __bos on " name "\n") 126 116 127 #if !__has_builtin(__builtin_dynamic_object_si 117 #if !__has_builtin(__builtin_dynamic_object_size) 128 #define KUNIT_EXPECT_BDOS(test, p, expected, n 118 #define KUNIT_EXPECT_BDOS(test, p, expected, name) \ 129 /* Silence "unused variable 'expected' 119 /* Silence "unused variable 'expected'" warning. */ \ 130 KUNIT_EXPECT_EQ(test, expected, expect 120 KUNIT_EXPECT_EQ(test, expected, expected) 131 #else 121 #else 132 #define KUNIT_EXPECT_BDOS(test, p, expected, n 122 #define KUNIT_EXPECT_BDOS(test, p, expected, name) \ 133 KUNIT_EXPECT_EQ_MSG(test, __builtin_dy 123 KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \ 134 expected, 124 expected, \ 135 "__alloc_size() not working wi 125 "__alloc_size() not working with __bdos on " name "\n") 136 #endif 126 #endif 137 127 138 /* If the execpted size is a constant value, _ 128 /* If the execpted size is a constant value, __bos can see it. */ 139 #define check_const(_expected, alloc, free) 129 #define check_const(_expected, alloc, free) do { \ 140 size_t expected = (_expected); 130 size_t expected = (_expected); \ 141 void *p = alloc; 131 void *p = alloc; \ 142 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, 132 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \ 143 KUNIT_EXPECT_BOS(test, p, expected, #a 133 KUNIT_EXPECT_BOS(test, p, expected, #alloc); \ 144 KUNIT_EXPECT_BDOS(test, p, expected, # 134 KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \ 145 free; 135 free; \ 146 } while (0) 136 } while (0) 147 137 148 /* If the execpted size is NOT a constant valu 138 /* If the execpted size is NOT a constant value, __bos CANNOT see it. */ 149 #define check_dynamic(_expected, alloc, free) 139 #define check_dynamic(_expected, alloc, free) do { \ 150 size_t expected = (_expected); 140 size_t expected = (_expected); \ 151 void *p = alloc; 141 void *p = alloc; \ 152 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, 142 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \ 153 KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #a 143 KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \ 154 KUNIT_EXPECT_BDOS(test, p, expected, # 144 KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \ 155 free; 145 free; \ 156 } while (0) 146 } while (0) 157 147 158 /* Assortment of constant-value kinda-edge cas 148 /* Assortment of constant-value kinda-edge cases. */ 159 #define CONST_TEST_BODY(TEST_alloc) do { 149 #define CONST_TEST_BODY(TEST_alloc) do { \ 160 /* Special-case vmalloc()-family to sk 150 /* Special-case vmalloc()-family to skip 0-sized allocs. */ \ 161 if (strcmp(#TEST_alloc, "TEST_vmalloc" 151 if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \ 162 TEST_alloc(check_const, 0, 0); 152 TEST_alloc(check_const, 0, 0); \ 163 TEST_alloc(check_const, 1, 1); 153 TEST_alloc(check_const, 1, 1); \ 164 TEST_alloc(check_const, 128, 128); 154 TEST_alloc(check_const, 128, 128); \ 165 TEST_alloc(check_const, 1023, 1023); 155 TEST_alloc(check_const, 1023, 1023); \ 166 TEST_alloc(check_const, 1025, 1025); 156 TEST_alloc(check_const, 1025, 1025); \ 167 TEST_alloc(check_const, 4096, 4096); 157 TEST_alloc(check_const, 4096, 4096); \ 168 TEST_alloc(check_const, 4097, 4097); 158 TEST_alloc(check_const, 4097, 4097); \ 169 } while (0) 159 } while (0) 170 160 171 static volatile size_t zero_size; 161 static volatile size_t zero_size; 172 static volatile size_t unknown_size = 50; 162 static volatile size_t unknown_size = 50; 173 163 174 #if !__has_builtin(__builtin_dynamic_object_si 164 #if !__has_builtin(__builtin_dynamic_object_size) 175 #define DYNAMIC_TEST_BODY(TEST_alloc) 165 #define DYNAMIC_TEST_BODY(TEST_alloc) \ 176 kunit_skip(test, "Compiler is missing 166 kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n") 177 #else 167 #else 178 #define DYNAMIC_TEST_BODY(TEST_alloc) do { 168 #define DYNAMIC_TEST_BODY(TEST_alloc) do { \ 179 size_t size = unknown_size; 169 size_t size = unknown_size; \ 180 170 \ 181 /* 171 /* \ 182 * Expected size is "size" in each tes 172 * Expected size is "size" in each test, before it is then \ 183 * internally incremented in each test 173 * internally incremented in each test. Requires we disable \ 184 * -Wunsequenced. 174 * -Wunsequenced. \ 185 */ 175 */ \ 186 TEST_alloc(check_dynamic, size, size++ 176 TEST_alloc(check_dynamic, size, size++); \ 187 /* Make sure incrementing actually hap 177 /* Make sure incrementing actually happened. */ \ 188 KUNIT_EXPECT_NE(test, size, unknown_si 178 KUNIT_EXPECT_NE(test, size, unknown_size); \ 189 } while (0) 179 } while (0) 190 #endif 180 #endif 191 181 192 #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) 182 #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \ 193 static void fortify_test_alloc_size_##allocato !! 183 static void alloc_size_##allocator##_const_test(struct kunit *test) \ 194 { 184 { \ 195 CONST_TEST_BODY(TEST_##allocator); 185 CONST_TEST_BODY(TEST_##allocator); \ 196 } 186 } \ 197 static void fortify_test_alloc_size_##allocato !! 187 static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \ 198 { 188 { \ 199 DYNAMIC_TEST_BODY(TEST_##allocator); 189 DYNAMIC_TEST_BODY(TEST_##allocator); \ 200 } 190 } 201 191 202 #define TEST_kmalloc(checker, expected_size, a 192 #define TEST_kmalloc(checker, expected_size, alloc_size) do { \ 203 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 193 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \ 204 void *orig; 194 void *orig; \ 205 size_t len; 195 size_t len; \ 206 196 \ 207 checker(expected_size, kmalloc(alloc_s 197 checker(expected_size, kmalloc(alloc_size, gfp), \ 208 kfree(p)); 198 kfree(p)); \ 209 checker(expected_size, 199 checker(expected_size, \ 210 kmalloc_node(alloc_size, gfp, 200 kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \ 211 kfree(p)); 201 kfree(p)); \ 212 checker(expected_size, kzalloc(alloc_s 202 checker(expected_size, kzalloc(alloc_size, gfp), \ 213 kfree(p)); 203 kfree(p)); \ 214 checker(expected_size, 204 checker(expected_size, \ 215 kzalloc_node(alloc_size, gfp, 205 kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \ 216 kfree(p)); 206 kfree(p)); \ 217 checker(expected_size, kcalloc(1, allo 207 checker(expected_size, kcalloc(1, alloc_size, gfp), \ 218 kfree(p)); 208 kfree(p)); \ 219 checker(expected_size, kcalloc(alloc_s 209 checker(expected_size, kcalloc(alloc_size, 1, gfp), \ 220 kfree(p)); 210 kfree(p)); \ 221 checker(expected_size, 211 checker(expected_size, \ 222 kcalloc_node(1, alloc_size, gf 212 kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \ 223 kfree(p)); 213 kfree(p)); \ 224 checker(expected_size, 214 checker(expected_size, \ 225 kcalloc_node(alloc_size, 1, gf 215 kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \ 226 kfree(p)); 216 kfree(p)); \ 227 checker(expected_size, kmalloc_array(1 217 checker(expected_size, kmalloc_array(1, alloc_size, gfp), \ 228 kfree(p)); 218 kfree(p)); \ 229 checker(expected_size, kmalloc_array(a 219 checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \ 230 kfree(p)); 220 kfree(p)); \ 231 checker(expected_size, 221 checker(expected_size, \ 232 kmalloc_array_node(1, alloc_si 222 kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \ 233 kfree(p)); 223 kfree(p)); \ 234 checker(expected_size, 224 checker(expected_size, \ 235 kmalloc_array_node(alloc_size, 225 kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \ 236 kfree(p)); 226 kfree(p)); \ >> 227 checker(expected_size, __kmalloc(alloc_size, gfp), \ >> 228 kfree(p)); \ >> 229 checker(expected_size, \ >> 230 __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \ >> 231 kfree(p)); \ 237 232 \ 238 orig = kmalloc(alloc_size, gfp); 233 orig = kmalloc(alloc_size, gfp); \ 239 KUNIT_EXPECT_TRUE(test, orig != NULL); 234 KUNIT_EXPECT_TRUE(test, orig != NULL); \ 240 checker((expected_size) * 2, 235 checker((expected_size) * 2, \ 241 krealloc(orig, (alloc_size) * 236 krealloc(orig, (alloc_size) * 2, gfp), \ 242 kfree(p)); 237 kfree(p)); \ 243 orig = kmalloc(alloc_size, gfp); 238 orig = kmalloc(alloc_size, gfp); \ 244 KUNIT_EXPECT_TRUE(test, orig != NULL); 239 KUNIT_EXPECT_TRUE(test, orig != NULL); \ 245 checker((expected_size) * 2, 240 checker((expected_size) * 2, \ 246 krealloc_array(orig, 1, (alloc 241 krealloc_array(orig, 1, (alloc_size) * 2, gfp), \ 247 kfree(p)); 242 kfree(p)); \ 248 orig = kmalloc(alloc_size, gfp); 243 orig = kmalloc(alloc_size, gfp); \ 249 KUNIT_EXPECT_TRUE(test, orig != NULL); 244 KUNIT_EXPECT_TRUE(test, orig != NULL); \ 250 checker((expected_size) * 2, 245 checker((expected_size) * 2, \ 251 krealloc_array(orig, (alloc_si 246 krealloc_array(orig, (alloc_size) * 2, 1, gfp), \ 252 kfree(p)); 247 kfree(p)); \ 253 248 \ 254 len = 11; 249 len = 11; \ 255 /* Using memdup() with fixed size, so 250 /* Using memdup() with fixed size, so force unknown length. */ \ 256 if (!__builtin_constant_p(expected_siz 251 if (!__builtin_constant_p(expected_size)) \ 257 len += zero_size; 252 len += zero_size; \ 258 checker(len, kmemdup("hello there", le 253 checker(len, kmemdup("hello there", len, gfp), kfree(p)); \ 259 } while (0) 254 } while (0) 260 DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc) 255 DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc) 261 256 262 /* Sizes are in pages, not bytes. */ 257 /* Sizes are in pages, not bytes. */ 263 #define TEST_vmalloc(checker, expected_pages, 258 #define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \ 264 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 259 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \ 265 checker((expected_pages) * PAGE_SIZE, 260 checker((expected_pages) * PAGE_SIZE, \ 266 vmalloc((alloc_pages) * PAGE_S 261 vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \ 267 checker((expected_pages) * PAGE_SIZE, 262 checker((expected_pages) * PAGE_SIZE, \ 268 vzalloc((alloc_pages) * PAGE_S 263 vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \ 269 checker((expected_pages) * PAGE_SIZE, 264 checker((expected_pages) * PAGE_SIZE, \ 270 __vmalloc((alloc_pages) * PAGE 265 __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \ 271 } while (0) 266 } while (0) 272 DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc) 267 DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc) 273 268 274 /* Sizes are in pages (and open-coded for side 269 /* Sizes are in pages (and open-coded for side-effects), not bytes. */ 275 #define TEST_kvmalloc(checker, expected_pages, 270 #define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \ 276 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 271 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \ 277 size_t prev_size; 272 size_t prev_size; \ 278 void *orig; 273 void *orig; \ 279 274 \ 280 checker((expected_pages) * PAGE_SIZE, 275 checker((expected_pages) * PAGE_SIZE, \ 281 kvmalloc((alloc_pages) * PAGE_ 276 kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \ 282 kvfree(p)); 277 kvfree(p)); \ 283 checker((expected_pages) * PAGE_SIZE, 278 checker((expected_pages) * PAGE_SIZE, \ 284 kvmalloc_node((alloc_pages) * 279 kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \ 285 kvfree(p)); 280 kvfree(p)); \ 286 checker((expected_pages) * PAGE_SIZE, 281 checker((expected_pages) * PAGE_SIZE, \ 287 kvzalloc((alloc_pages) * PAGE_ 282 kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \ 288 kvfree(p)); 283 kvfree(p)); \ 289 checker((expected_pages) * PAGE_SIZE, 284 checker((expected_pages) * PAGE_SIZE, \ 290 kvzalloc_node((alloc_pages) * 285 kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \ 291 kvfree(p)); 286 kvfree(p)); \ 292 checker((expected_pages) * PAGE_SIZE, 287 checker((expected_pages) * PAGE_SIZE, \ 293 kvcalloc(1, (alloc_pages) * PA 288 kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \ 294 kvfree(p)); 289 kvfree(p)); \ 295 checker((expected_pages) * PAGE_SIZE, 290 checker((expected_pages) * PAGE_SIZE, \ 296 kvcalloc((alloc_pages) * PAGE_ 291 kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \ 297 kvfree(p)); 292 kvfree(p)); \ 298 checker((expected_pages) * PAGE_SIZE, 293 checker((expected_pages) * PAGE_SIZE, \ 299 kvmalloc_array(1, (alloc_pages 294 kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \ 300 kvfree(p)); 295 kvfree(p)); \ 301 checker((expected_pages) * PAGE_SIZE, 296 checker((expected_pages) * PAGE_SIZE, \ 302 kvmalloc_array((alloc_pages) * 297 kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \ 303 kvfree(p)); 298 kvfree(p)); \ 304 299 \ 305 prev_size = (expected_pages) * PAGE_SI 300 prev_size = (expected_pages) * PAGE_SIZE; \ 306 orig = kvmalloc(prev_size, gfp); 301 orig = kvmalloc(prev_size, gfp); \ 307 KUNIT_EXPECT_TRUE(test, orig != NULL); 302 KUNIT_EXPECT_TRUE(test, orig != NULL); \ 308 checker(((expected_pages) * PAGE_SIZE) 303 checker(((expected_pages) * PAGE_SIZE) * 2, \ 309 kvrealloc(orig, ((alloc_pages) !! 304 kvrealloc(orig, prev_size, \ >> 305 ((alloc_pages) * PAGE_SIZE) * 2, gfp), \ 310 kvfree(p)); 306 kvfree(p)); \ 311 } while (0) 307 } while (0) 312 DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc) 308 DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc) 313 309 314 #define TEST_devm_kmalloc(checker, expected_si 310 #define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \ 315 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; 311 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \ 316 const char dev_name[] = "fortify-test" 312 const char dev_name[] = "fortify-test"; \ 317 struct device *dev; 313 struct device *dev; \ 318 void *orig; 314 void *orig; \ 319 size_t len; 315 size_t len; \ 320 316 \ 321 /* Create dummy device for devm_kmallo 317 /* Create dummy device for devm_kmalloc()-family tests. */ \ 322 dev = kunit_device_register(test, dev_ 318 dev = kunit_device_register(test, dev_name); \ 323 KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(de 319 KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \ 324 "Cannot registe 320 "Cannot register test device\n"); \ 325 321 \ 326 checker(expected_size, devm_kmalloc(de 322 checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \ 327 devm_kfree(dev, p)); 323 devm_kfree(dev, p)); \ 328 checker(expected_size, devm_kzalloc(de 324 checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \ 329 devm_kfree(dev, p)); 325 devm_kfree(dev, p)); \ 330 checker(expected_size, 326 checker(expected_size, \ 331 devm_kmalloc_array(dev, 1, all 327 devm_kmalloc_array(dev, 1, alloc_size, gfp), \ 332 devm_kfree(dev, p)); 328 devm_kfree(dev, p)); \ 333 checker(expected_size, 329 checker(expected_size, \ 334 devm_kmalloc_array(dev, alloc_ 330 devm_kmalloc_array(dev, alloc_size, 1, gfp), \ 335 devm_kfree(dev, p)); 331 devm_kfree(dev, p)); \ 336 checker(expected_size, 332 checker(expected_size, \ 337 devm_kcalloc(dev, 1, alloc_siz 333 devm_kcalloc(dev, 1, alloc_size, gfp), \ 338 devm_kfree(dev, p)); 334 devm_kfree(dev, p)); \ 339 checker(expected_size, 335 checker(expected_size, \ 340 devm_kcalloc(dev, alloc_size, 336 devm_kcalloc(dev, alloc_size, 1, gfp), \ 341 devm_kfree(dev, p)); 337 devm_kfree(dev, p)); \ 342 338 \ 343 orig = devm_kmalloc(dev, alloc_size, g 339 orig = devm_kmalloc(dev, alloc_size, gfp); \ 344 KUNIT_EXPECT_TRUE(test, orig != NULL); 340 KUNIT_EXPECT_TRUE(test, orig != NULL); \ 345 checker((expected_size) * 2, 341 checker((expected_size) * 2, \ 346 devm_krealloc(dev, orig, (allo 342 devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \ 347 devm_kfree(dev, p)); 343 devm_kfree(dev, p)); \ 348 344 \ 349 len = 4; 345 len = 4; \ 350 /* Using memdup() with fixed size, so 346 /* Using memdup() with fixed size, so force unknown length. */ \ 351 if (!__builtin_constant_p(expected_siz 347 if (!__builtin_constant_p(expected_size)) \ 352 len += zero_size; 348 len += zero_size; \ 353 checker(len, devm_kmemdup(dev, "Ohai", 349 checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \ 354 devm_kfree(dev, p)); 350 devm_kfree(dev, p)); \ 355 351 \ 356 kunit_device_unregister(test, dev); 352 kunit_device_unregister(test, dev); \ 357 } while (0) 353 } while (0) 358 DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc) 354 DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc) 359 355 360 static const char * const test_strs[] = { << 361 "", << 362 "Hello there", << 363 "A longer string, just for variety", << 364 }; << 365 << 366 #define TEST_realloc(checker) do { << 367 gfp_t gfp = GFP_KERNEL; << 368 size_t len; << 369 int i; << 370 << 371 for (i = 0; i < ARRAY_SIZE(test_strs); << 372 len = strlen(test_strs[i]); << 373 KUNIT_EXPECT_EQ(test, __builti << 374 checker(len, kmemdup_array(tes << 375 kfree(p)); << 376 checker(len, kmemdup(test_strs << 377 kfree(p)); << 378 } << 379 } while (0) << 380 static void fortify_test_realloc_size(struct k << 381 { << 382 TEST_realloc(check_dynamic); << 383 } << 384 << 385 /* 356 /* 386 * We can't have an array at the end of a stru 357 * We can't have an array at the end of a structure or else 387 * builds without -fstrict-flex-arrays=3 will 358 * builds without -fstrict-flex-arrays=3 will report them as 388 * being an unknown length. Additionally, add 359 * being an unknown length. Additionally, add bytes before 389 * and after the string to catch over/underflo 360 * and after the string to catch over/underflows if tests 390 * fail. 361 * fail. 391 */ 362 */ 392 struct fortify_padding { 363 struct fortify_padding { 393 unsigned long bytes_before; 364 unsigned long bytes_before; 394 char buf[32]; 365 char buf[32]; 395 unsigned long bytes_after; 366 unsigned long bytes_after; 396 }; 367 }; 397 /* Force compiler into not being able to resol 368 /* Force compiler into not being able to resolve size at compile-time. */ 398 static volatile int unconst; 369 static volatile int unconst; 399 370 400 static void fortify_test_strlen(struct kunit * !! 371 static void strlen_test(struct kunit *test) 401 { 372 { 402 struct fortify_padding pad = { }; 373 struct fortify_padding pad = { }; 403 int i, end = sizeof(pad.buf) - 1; 374 int i, end = sizeof(pad.buf) - 1; 404 375 405 /* Fill 31 bytes with valid characters 376 /* Fill 31 bytes with valid characters. */ 406 for (i = 0; i < sizeof(pad.buf) - 1; i 377 for (i = 0; i < sizeof(pad.buf) - 1; i++) 407 pad.buf[i] = i + ''; 378 pad.buf[i] = i + ''; 408 /* Trailing bytes are still %NUL. */ 379 /* Trailing bytes are still %NUL. */ 409 KUNIT_EXPECT_EQ(test, pad.buf[end], '\ 380 KUNIT_EXPECT_EQ(test, pad.buf[end], '\0'); 410 KUNIT_EXPECT_EQ(test, pad.bytes_after, 381 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 411 382 412 /* String is terminated, so strlen() i 383 /* String is terminated, so strlen() is valid. */ 413 KUNIT_EXPECT_EQ(test, strlen(pad.buf), 384 KUNIT_EXPECT_EQ(test, strlen(pad.buf), end); 414 KUNIT_EXPECT_EQ(test, fortify_read_ove 385 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 415 386 416 /* Make string unterminated, and recou 387 /* Make string unterminated, and recount. */ 417 pad.buf[end] = 'A'; 388 pad.buf[end] = 'A'; 418 end = sizeof(pad.buf); 389 end = sizeof(pad.buf); 419 KUNIT_EXPECT_EQ(test, strlen(pad.buf), 390 KUNIT_EXPECT_EQ(test, strlen(pad.buf), end); 420 KUNIT_EXPECT_EQ(test, fortify_read_ove 391 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); 421 } 392 } 422 393 423 static void fortify_test_strnlen(struct kunit !! 394 static void strnlen_test(struct kunit *test) 424 { 395 { 425 struct fortify_padding pad = { }; 396 struct fortify_padding pad = { }; 426 int i, end = sizeof(pad.buf) - 1; 397 int i, end = sizeof(pad.buf) - 1; 427 398 428 /* Fill 31 bytes with valid characters 399 /* Fill 31 bytes with valid characters. */ 429 for (i = 0; i < sizeof(pad.buf) - 1; i 400 for (i = 0; i < sizeof(pad.buf) - 1; i++) 430 pad.buf[i] = i + ''; 401 pad.buf[i] = i + ''; 431 /* Trailing bytes are still %NUL. */ 402 /* Trailing bytes are still %NUL. */ 432 KUNIT_EXPECT_EQ(test, pad.buf[end], '\ 403 KUNIT_EXPECT_EQ(test, pad.buf[end], '\0'); 433 KUNIT_EXPECT_EQ(test, pad.bytes_after, 404 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 434 405 435 /* String is terminated, so strnlen() 406 /* String is terminated, so strnlen() is valid. */ 436 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 407 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end); 437 KUNIT_EXPECT_EQ(test, fortify_read_ove 408 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 438 /* A truncated strnlen() will be safe, 409 /* A truncated strnlen() will be safe, too. */ 439 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 410 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2), 440 sizeof 411 sizeof(pad.buf) / 2); 441 KUNIT_EXPECT_EQ(test, fortify_read_ove 412 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 442 413 443 /* Make string unterminated, and recou 414 /* Make string unterminated, and recount. */ 444 pad.buf[end] = 'A'; 415 pad.buf[end] = 'A'; 445 end = sizeof(pad.buf); 416 end = sizeof(pad.buf); 446 /* Reading beyond with strncpy() will 417 /* Reading beyond with strncpy() will fail. */ 447 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 418 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end); 448 KUNIT_EXPECT_EQ(test, fortify_read_ove 419 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); 449 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 420 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end); 450 KUNIT_EXPECT_EQ(test, fortify_read_ove 421 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); 451 422 452 /* Early-truncated is safe still, thou 423 /* Early-truncated is safe still, though. */ 453 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 424 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end); 454 KUNIT_EXPECT_EQ(test, fortify_read_ove 425 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); 455 426 456 end = sizeof(pad.buf) / 2; 427 end = sizeof(pad.buf) / 2; 457 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, 428 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end); 458 KUNIT_EXPECT_EQ(test, fortify_read_ove 429 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); 459 } 430 } 460 431 461 static void fortify_test_strcpy(struct kunit * !! 432 static void strcpy_test(struct kunit *test) 462 { 433 { 463 struct fortify_padding pad = { }; 434 struct fortify_padding pad = { }; 464 char src[sizeof(pad.buf) + 1] = { }; 435 char src[sizeof(pad.buf) + 1] = { }; 465 int i; 436 int i; 466 437 467 /* Fill 31 bytes with valid characters 438 /* Fill 31 bytes with valid characters. */ 468 for (i = 0; i < sizeof(src) - 2; i++) 439 for (i = 0; i < sizeof(src) - 2; i++) 469 src[i] = i + ''; 440 src[i] = i + ''; 470 441 471 /* Destination is %NUL-filled to start 442 /* Destination is %NUL-filled to start with. */ 472 KUNIT_EXPECT_EQ(test, pad.bytes_before 443 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); 473 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 444 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 474 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 445 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 475 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 446 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 476 KUNIT_EXPECT_EQ(test, pad.bytes_after, 447 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 477 448 478 /* Legitimate strcpy() 1 less than of 449 /* Legitimate strcpy() 1 less than of max size. */ 479 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf 450 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src) 480 == pad.buf); 451 == pad.buf); 481 KUNIT_EXPECT_EQ(test, fortify_read_ove 452 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 482 KUNIT_EXPECT_EQ(test, fortify_write_ov 453 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 483 /* Only last byte should be %NUL */ 454 /* Only last byte should be %NUL */ 484 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 455 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 485 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 456 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 486 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 457 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 487 458 488 src[sizeof(src) - 2] = 'A'; 459 src[sizeof(src) - 2] = 'A'; 489 /* But now we trip the overflow checki 460 /* But now we trip the overflow checking. */ 490 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf 461 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src) 491 == pad.buf); 462 == pad.buf); 492 KUNIT_EXPECT_EQ(test, fortify_read_ove 463 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 493 KUNIT_EXPECT_EQ(test, fortify_write_ov 464 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); 494 /* Trailing %NUL -- thanks to FORTIFY. 465 /* Trailing %NUL -- thanks to FORTIFY. */ 495 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 466 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 496 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 467 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 497 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 468 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 498 /* And we will not have gone beyond. * 469 /* And we will not have gone beyond. */ 499 KUNIT_EXPECT_EQ(test, pad.bytes_after, 470 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 500 471 501 src[sizeof(src) - 1] = 'A'; 472 src[sizeof(src) - 1] = 'A'; 502 /* And for sure now, two bytes past. * 473 /* And for sure now, two bytes past. */ 503 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf 474 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src) 504 == pad.buf); 475 == pad.buf); 505 /* 476 /* 506 * Which trips both the strlen() on th 477 * Which trips both the strlen() on the unterminated src, 507 * and the resulting copy attempt. 478 * and the resulting copy attempt. 508 */ 479 */ 509 KUNIT_EXPECT_EQ(test, fortify_read_ove 480 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); 510 KUNIT_EXPECT_EQ(test, fortify_write_ov 481 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); 511 /* Trailing %NUL -- thanks to FORTIFY. 482 /* Trailing %NUL -- thanks to FORTIFY. */ 512 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 483 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 513 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 484 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 514 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 485 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 515 /* And we will not have gone beyond. * 486 /* And we will not have gone beyond. */ 516 KUNIT_EXPECT_EQ(test, pad.bytes_after, 487 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 517 } 488 } 518 489 519 static void fortify_test_strncpy(struct kunit !! 490 static void strncpy_test(struct kunit *test) 520 { 491 { 521 struct fortify_padding pad = { }; 492 struct fortify_padding pad = { }; 522 char src[] = "Copy me fully into a sma 493 char src[] = "Copy me fully into a small buffer and I will overflow!"; 523 494 524 /* Destination is %NUL-filled to start 495 /* Destination is %NUL-filled to start with. */ 525 KUNIT_EXPECT_EQ(test, pad.bytes_before 496 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); 526 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 497 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 527 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 498 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 528 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 499 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 529 KUNIT_EXPECT_EQ(test, pad.bytes_after, 500 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 530 501 531 /* Legitimate strncpy() 1 less than of 502 /* Legitimate strncpy() 1 less than of max size. */ 532 KUNIT_ASSERT_TRUE(test, strncpy(pad.bu 503 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, 533 sizeof 504 sizeof(pad.buf) + unconst - 1) 534 == pad.buf); 505 == pad.buf); 535 KUNIT_EXPECT_EQ(test, fortify_write_ov 506 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 536 /* Only last byte should be %NUL */ 507 /* Only last byte should be %NUL */ 537 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 508 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 538 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 509 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 539 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 510 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 540 511 541 /* Legitimate (though unterminated) ma 512 /* Legitimate (though unterminated) max-size strncpy. */ 542 KUNIT_ASSERT_TRUE(test, strncpy(pad.bu 513 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, 543 sizeof 514 sizeof(pad.buf) + unconst) 544 == pad.buf); 515 == pad.buf); 545 KUNIT_EXPECT_EQ(test, fortify_write_ov 516 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 546 /* No trailing %NUL -- thanks strncpy 517 /* No trailing %NUL -- thanks strncpy API. */ 547 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 518 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 548 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 519 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 549 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 520 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 550 /* But we will not have gone beyond. * 521 /* But we will not have gone beyond. */ 551 KUNIT_EXPECT_EQ(test, pad.bytes_after, 522 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 552 523 553 /* Now verify that FORTIFY is working. 524 /* Now verify that FORTIFY is working... */ 554 KUNIT_ASSERT_TRUE(test, strncpy(pad.bu 525 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, 555 sizeof 526 sizeof(pad.buf) + unconst + 1) 556 == pad.buf); 527 == pad.buf); 557 /* Should catch the overflow. */ 528 /* Should catch the overflow. */ 558 KUNIT_EXPECT_EQ(test, fortify_write_ov 529 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); 559 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 530 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 560 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 531 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 561 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 532 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 562 /* And we will not have gone beyond. * 533 /* And we will not have gone beyond. */ 563 KUNIT_EXPECT_EQ(test, pad.bytes_after, 534 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 564 535 565 /* And further... */ 536 /* And further... */ 566 KUNIT_ASSERT_TRUE(test, strncpy(pad.bu 537 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, 567 sizeof 538 sizeof(pad.buf) + unconst + 2) 568 == pad.buf); 539 == pad.buf); 569 /* Should catch the overflow. */ 540 /* Should catch the overflow. */ 570 KUNIT_EXPECT_EQ(test, fortify_write_ov 541 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); 571 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 542 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 572 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 543 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 573 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 544 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 574 /* And we will not have gone beyond. * 545 /* And we will not have gone beyond. */ 575 KUNIT_EXPECT_EQ(test, pad.bytes_after, 546 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 576 } 547 } 577 548 578 static void fortify_test_strscpy(struct kunit !! 549 static void strscpy_test(struct kunit *test) 579 { 550 { 580 struct fortify_padding pad = { }; 551 struct fortify_padding pad = { }; 581 char src[] = "Copy me fully into a sma 552 char src[] = "Copy me fully into a small buffer and I will overflow!"; 582 553 583 /* Destination is %NUL-filled to start 554 /* Destination is %NUL-filled to start with. */ 584 KUNIT_EXPECT_EQ(test, pad.bytes_before 555 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); 585 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 556 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 586 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 557 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 587 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 558 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 588 KUNIT_EXPECT_EQ(test, pad.bytes_after, 559 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 589 560 590 /* Legitimate strscpy() 1 less than of 561 /* Legitimate strscpy() 1 less than of max size. */ 591 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, 562 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, 592 sizeof(p 563 sizeof(pad.buf) + unconst - 1), 593 -E2BIG); 564 -E2BIG); 594 KUNIT_EXPECT_EQ(test, fortify_write_ov 565 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 595 /* Keeping space for %NUL, last two by 566 /* Keeping space for %NUL, last two bytes should be %NUL */ 596 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 567 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 597 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 568 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 598 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 569 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 599 570 600 /* Legitimate max-size strscpy. */ 571 /* Legitimate max-size strscpy. */ 601 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, 572 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, 602 sizeof(p 573 sizeof(pad.buf) + unconst), 603 -E2BIG); 574 -E2BIG); 604 KUNIT_EXPECT_EQ(test, fortify_write_ov 575 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 605 /* A trailing %NUL will exist. */ 576 /* A trailing %NUL will exist. */ 606 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 577 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 607 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 578 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 608 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 579 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 609 580 610 /* Now verify that FORTIFY is working. 581 /* Now verify that FORTIFY is working... */ 611 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, 582 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, 612 sizeof(p 583 sizeof(pad.buf) + unconst + 1), 613 -E2BIG); 584 -E2BIG); 614 /* Should catch the overflow. */ 585 /* Should catch the overflow. */ 615 KUNIT_EXPECT_EQ(test, fortify_write_ov 586 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); 616 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 587 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 617 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 588 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 618 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 589 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 619 /* And we will not have gone beyond. * 590 /* And we will not have gone beyond. */ 620 KUNIT_EXPECT_EQ(test, pad.bytes_after, 591 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 621 592 622 /* And much further... */ 593 /* And much further... */ 623 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, 594 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, 624 sizeof(s 595 sizeof(src) * 2 + unconst), 625 -E2BIG); 596 -E2BIG); 626 /* Should catch the overflow. */ 597 /* Should catch the overflow. */ 627 KUNIT_EXPECT_EQ(test, fortify_write_ov 598 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); 628 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 599 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 629 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 600 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 630 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 601 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 631 /* And we will not have gone beyond. * 602 /* And we will not have gone beyond. */ 632 KUNIT_EXPECT_EQ(test, pad.bytes_after, 603 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 633 } 604 } 634 605 635 static void fortify_test_strcat(struct kunit * !! 606 static void strcat_test(struct kunit *test) 636 { 607 { 637 struct fortify_padding pad = { }; 608 struct fortify_padding pad = { }; 638 char src[sizeof(pad.buf) / 2] = { }; 609 char src[sizeof(pad.buf) / 2] = { }; 639 char one[] = "A"; 610 char one[] = "A"; 640 char two[] = "BC"; 611 char two[] = "BC"; 641 int i; 612 int i; 642 613 643 /* Fill 15 bytes with valid characters 614 /* Fill 15 bytes with valid characters. */ 644 for (i = 0; i < sizeof(src) - 1; i++) 615 for (i = 0; i < sizeof(src) - 1; i++) 645 src[i] = i + 'A'; 616 src[i] = i + 'A'; 646 617 647 /* Destination is %NUL-filled to start 618 /* Destination is %NUL-filled to start with. */ 648 KUNIT_EXPECT_EQ(test, pad.bytes_before 619 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); 649 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 620 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 650 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 621 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 651 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 622 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 652 KUNIT_EXPECT_EQ(test, pad.bytes_after, 623 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 653 624 654 /* Legitimate strcat() using less than 625 /* Legitimate strcat() using less than half max size. */ 655 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 626 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf); 656 KUNIT_EXPECT_EQ(test, fortify_write_ov 627 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 657 /* Legitimate strcat() now 2 bytes shy 628 /* Legitimate strcat() now 2 bytes shy of end. */ 658 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 629 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf); 659 KUNIT_EXPECT_EQ(test, fortify_write_ov 630 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 660 /* Last two bytes should be %NUL */ 631 /* Last two bytes should be %NUL */ 661 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 632 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 662 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 633 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 663 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 634 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 664 635 665 /* Add one more character to the end. 636 /* Add one more character to the end. */ 666 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 637 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf); 667 KUNIT_EXPECT_EQ(test, fortify_write_ov 638 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 668 /* Last byte should be %NUL */ 639 /* Last byte should be %NUL */ 669 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 640 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 670 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 641 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 671 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 642 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 672 643 673 /* And this one char will overflow. */ 644 /* And this one char will overflow. */ 674 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 645 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf); 675 KUNIT_EXPECT_EQ(test, fortify_write_ov 646 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); 676 /* Last byte should be %NUL thanks to 647 /* Last byte should be %NUL thanks to FORTIFY. */ 677 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 648 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 678 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 649 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 679 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 650 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 680 KUNIT_EXPECT_EQ(test, pad.bytes_after, 651 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 681 652 682 /* And adding two will overflow more. 653 /* And adding two will overflow more. */ 683 KUNIT_ASSERT_TRUE(test, strcat(pad.buf 654 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf); 684 KUNIT_EXPECT_EQ(test, fortify_write_ov 655 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); 685 /* Last byte should be %NUL thanks to 656 /* Last byte should be %NUL thanks to FORTIFY. */ 686 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 657 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 687 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 658 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 688 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 659 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 689 KUNIT_EXPECT_EQ(test, pad.bytes_after, 660 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 690 } 661 } 691 662 692 static void fortify_test_strncat(struct kunit !! 663 static void strncat_test(struct kunit *test) 693 { 664 { 694 struct fortify_padding pad = { }; 665 struct fortify_padding pad = { }; 695 char src[sizeof(pad.buf)] = { }; 666 char src[sizeof(pad.buf)] = { }; 696 int i, partial; 667 int i, partial; 697 668 698 /* Fill 31 bytes with valid characters 669 /* Fill 31 bytes with valid characters. */ 699 partial = sizeof(src) / 2 - 1; 670 partial = sizeof(src) / 2 - 1; 700 for (i = 0; i < partial; i++) 671 for (i = 0; i < partial; i++) 701 src[i] = i + 'A'; 672 src[i] = i + 'A'; 702 673 703 /* Destination is %NUL-filled to start 674 /* Destination is %NUL-filled to start with. */ 704 KUNIT_EXPECT_EQ(test, pad.bytes_before 675 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); 705 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 676 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 706 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 677 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 707 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 678 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 708 KUNIT_EXPECT_EQ(test, pad.bytes_after, 679 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 709 680 710 /* Legitimate strncat() using less tha 681 /* Legitimate strncat() using less than half max size. */ 711 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 682 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf); 712 KUNIT_EXPECT_EQ(test, fortify_read_ove 683 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 713 KUNIT_EXPECT_EQ(test, fortify_write_ov 684 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 714 /* Legitimate strncat() now 2 bytes sh 685 /* Legitimate strncat() now 2 bytes shy of end. */ 715 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 686 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf); 716 KUNIT_EXPECT_EQ(test, fortify_read_ove 687 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 717 KUNIT_EXPECT_EQ(test, fortify_write_ov 688 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 718 /* Last two bytes should be %NUL */ 689 /* Last two bytes should be %NUL */ 719 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 690 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 720 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 691 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 721 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 692 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 722 693 723 /* Add one more character to the end. 694 /* Add one more character to the end. */ 724 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 695 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf); 725 KUNIT_EXPECT_EQ(test, fortify_read_ove 696 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 726 KUNIT_EXPECT_EQ(test, fortify_write_ov 697 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 727 /* Last byte should be %NUL */ 698 /* Last byte should be %NUL */ 728 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 699 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 729 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 700 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 730 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 701 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 731 702 732 /* And this one char will overflow. */ 703 /* And this one char will overflow. */ 733 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 704 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf); 734 KUNIT_EXPECT_EQ(test, fortify_read_ove 705 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 735 KUNIT_EXPECT_EQ(test, fortify_write_ov 706 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); 736 /* Last byte should be %NUL thanks to 707 /* Last byte should be %NUL thanks to FORTIFY. */ 737 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 708 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 738 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 709 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 739 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 710 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 740 KUNIT_EXPECT_EQ(test, pad.bytes_after, 711 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 741 712 742 /* And adding two will overflow more. 713 /* And adding two will overflow more. */ 743 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 714 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf); 744 KUNIT_EXPECT_EQ(test, fortify_read_ove 715 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 745 KUNIT_EXPECT_EQ(test, fortify_write_ov 716 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); 746 /* Last byte should be %NUL thanks to 717 /* Last byte should be %NUL thanks to FORTIFY. */ 747 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 718 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 748 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 719 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 749 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 720 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 750 KUNIT_EXPECT_EQ(test, pad.bytes_after, 721 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 751 722 752 /* Force an unterminated destination, 723 /* Force an unterminated destination, and overflow. */ 753 pad.buf[sizeof(pad.buf) - 1] = 'A'; 724 pad.buf[sizeof(pad.buf) - 1] = 'A'; 754 KUNIT_ASSERT_TRUE(test, strncat(pad.bu 725 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf); 755 /* This will have tripped both strlen( 726 /* This will have tripped both strlen() and strcat(). */ 756 KUNIT_EXPECT_EQ(test, fortify_read_ove 727 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); 757 KUNIT_EXPECT_EQ(test, fortify_write_ov 728 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3); 758 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 729 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 759 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 730 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 760 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 731 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 761 /* But we should not go beyond the end 732 /* But we should not go beyond the end. */ 762 KUNIT_EXPECT_EQ(test, pad.bytes_after, 733 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 763 } 734 } 764 735 765 static void fortify_test_strlcat(struct kunit !! 736 static void strlcat_test(struct kunit *test) 766 { 737 { 767 struct fortify_padding pad = { }; 738 struct fortify_padding pad = { }; 768 char src[sizeof(pad.buf)] = { }; 739 char src[sizeof(pad.buf)] = { }; 769 int i, partial; 740 int i, partial; 770 int len = sizeof(pad.buf) + unconst; 741 int len = sizeof(pad.buf) + unconst; 771 742 772 /* Fill 15 bytes with valid characters 743 /* Fill 15 bytes with valid characters. */ 773 partial = sizeof(src) / 2 - 1; 744 partial = sizeof(src) / 2 - 1; 774 for (i = 0; i < partial; i++) 745 for (i = 0; i < partial; i++) 775 src[i] = i + 'A'; 746 src[i] = i + 'A'; 776 747 777 /* Destination is %NUL-filled to start 748 /* Destination is %NUL-filled to start with. */ 778 KUNIT_EXPECT_EQ(test, pad.bytes_before 749 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0); 779 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 750 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 780 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 751 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 781 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 752 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 782 KUNIT_EXPECT_EQ(test, pad.bytes_after, 753 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 783 754 784 /* Legitimate strlcat() using less tha 755 /* Legitimate strlcat() using less than half max size. */ 785 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 756 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial); 786 KUNIT_EXPECT_EQ(test, fortify_read_ove 757 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 787 KUNIT_EXPECT_EQ(test, fortify_write_ov 758 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 788 /* Legitimate strlcat() now 2 bytes sh 759 /* Legitimate strlcat() now 2 bytes shy of end. */ 789 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 760 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2); 790 KUNIT_EXPECT_EQ(test, fortify_read_ove 761 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 791 KUNIT_EXPECT_EQ(test, fortify_write_ov 762 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 792 /* Last two bytes should be %NUL */ 763 /* Last two bytes should be %NUL */ 793 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 764 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 794 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 765 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 795 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 766 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 796 767 797 /* Add one more character to the end. 768 /* Add one more character to the end. */ 798 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 769 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1); 799 KUNIT_EXPECT_EQ(test, fortify_read_ove 770 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 800 KUNIT_EXPECT_EQ(test, fortify_write_ov 771 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); 801 /* Last byte should be %NUL */ 772 /* Last byte should be %NUL */ 802 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 773 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 803 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 774 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 804 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 775 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 805 776 806 /* And this one char will overflow. */ 777 /* And this one char will overflow. */ 807 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 778 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len); 808 KUNIT_EXPECT_EQ(test, fortify_read_ove 779 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 809 KUNIT_EXPECT_EQ(test, fortify_write_ov 780 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); 810 /* Last byte should be %NUL thanks to 781 /* Last byte should be %NUL thanks to FORTIFY. */ 811 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 782 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 812 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 783 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 813 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 784 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 814 KUNIT_EXPECT_EQ(test, pad.bytes_after, 785 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 815 786 816 /* And adding two will overflow more. 787 /* And adding two will overflow more. */ 817 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 788 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1); 818 KUNIT_EXPECT_EQ(test, fortify_read_ove 789 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 819 KUNIT_EXPECT_EQ(test, fortify_write_ov 790 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); 820 /* Last byte should be %NUL thanks to 791 /* Last byte should be %NUL thanks to FORTIFY. */ 821 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 792 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 822 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 793 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 823 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 794 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 824 KUNIT_EXPECT_EQ(test, pad.bytes_after, 795 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 825 796 826 /* Force an unterminated destination, 797 /* Force an unterminated destination, and overflow. */ 827 pad.buf[sizeof(pad.buf) - 1] = 'A'; 798 pad.buf[sizeof(pad.buf) - 1] = 'A'; 828 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 799 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2); 829 /* This will have tripped both strlen( 800 /* This will have tripped both strlen() and strlcat(). */ 830 KUNIT_EXPECT_EQ(test, fortify_read_ove 801 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); 831 KUNIT_EXPECT_EQ(test, fortify_write_ov 802 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); 832 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 803 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 833 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 804 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0'); 834 KUNIT_EXPECT_NE(test, pad.buf[sizeof(p 805 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0'); 835 /* But we should not go beyond the end 806 /* But we should not go beyond the end. */ 836 KUNIT_EXPECT_EQ(test, pad.bytes_after, 807 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 837 808 838 /* Force an unterminated source, and o 809 /* Force an unterminated source, and overflow. */ 839 memset(src, 'B', sizeof(src)); 810 memset(src, 'B', sizeof(src)); 840 pad.buf[sizeof(pad.buf) - 1] = '\0'; 811 pad.buf[sizeof(pad.buf) - 1] = '\0'; 841 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, 812 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src)); 842 /* This will have tripped both strlen( 813 /* This will have tripped both strlen() and strlcat(). */ 843 KUNIT_EXPECT_EQ(test, fortify_read_ove 814 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3); 844 KUNIT_EXPECT_EQ(test, fortify_write_ov 815 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3); 845 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(p 816 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0'); 846 /* But we should not go beyond the end 817 /* But we should not go beyond the end. */ 847 KUNIT_EXPECT_EQ(test, pad.bytes_after, 818 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); 848 } 819 } 849 820 850 /* Check for 0-sized arrays... */ !! 821 static void memscan_test(struct kunit *test) 851 struct fortify_zero_sized { << 852 unsigned long bytes_before; << 853 char buf[0]; << 854 unsigned long bytes_after; << 855 }; << 856 << 857 #define __fortify_test(memfunc) << 858 static void fortify_test_##memfunc(struct kuni << 859 { << 860 struct fortify_zero_sized zero = { }; << 861 struct fortify_padding pad = { }; << 862 char srcA[sizeof(pad.buf) + 2]; << 863 char srcB[sizeof(pad.buf) + 2]; << 864 size_t len = sizeof(pad.buf) + unconst << 865 << 866 memset(srcA, 'A', sizeof(srcA)); << 867 KUNIT_ASSERT_EQ(test, srcA[0], 'A'); << 868 memset(srcB, 'B', sizeof(srcB)); << 869 KUNIT_ASSERT_EQ(test, srcB[0], 'B'); << 870 << 871 memfunc(pad.buf, srcA, 0 + unconst); << 872 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0' << 873 KUNIT_EXPECT_EQ(test, fortify_read_ove << 874 KUNIT_EXPECT_EQ(test, fortify_write_ov << 875 memfunc(pad.buf + 1, srcB, 1 + unconst << 876 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0' << 877 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B') << 878 KUNIT_EXPECT_EQ(test, pad.buf[2], '\0' << 879 KUNIT_EXPECT_EQ(test, fortify_read_ove << 880 KUNIT_EXPECT_EQ(test, fortify_write_ov << 881 memfunc(pad.buf, srcA, 1 + unconst); << 882 KUNIT_EXPECT_EQ(test, pad.buf[0], 'A') << 883 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B') << 884 KUNIT_EXPECT_EQ(test, fortify_read_ove << 885 KUNIT_EXPECT_EQ(test, fortify_write_ov << 886 memfunc(pad.buf, srcA, len - 1); << 887 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A') << 888 KUNIT_EXPECT_EQ(test, pad.buf[len - 1] << 889 KUNIT_EXPECT_EQ(test, fortify_read_ove << 890 KUNIT_EXPECT_EQ(test, fortify_write_ov << 891 memfunc(pad.buf, srcA, len); << 892 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A') << 893 KUNIT_EXPECT_EQ(test, pad.buf[len - 1] << 894 KUNIT_EXPECT_EQ(test, pad.bytes_after, << 895 KUNIT_EXPECT_EQ(test, fortify_read_ove << 896 KUNIT_EXPECT_EQ(test, fortify_write_ov << 897 memfunc(pad.buf, srcA, len + 1); << 898 KUNIT_EXPECT_EQ(test, fortify_read_ove << 899 KUNIT_EXPECT_EQ(test, fortify_write_ov << 900 memfunc(pad.buf + 1, srcB, len); << 901 KUNIT_EXPECT_EQ(test, fortify_read_ove << 902 KUNIT_EXPECT_EQ(test, fortify_write_ov << 903 << 904 /* Reset error counter. */ << 905 fortify_write_overflows = 0; << 906 /* Copy nothing into nothing: no error << 907 memfunc(zero.buf, srcB, 0 + unconst); << 908 KUNIT_EXPECT_EQ(test, fortify_read_ove << 909 KUNIT_EXPECT_EQ(test, fortify_write_ov << 910 memfunc(zero.buf, srcB, 1 + unconst); << 911 KUNIT_EXPECT_EQ(test, fortify_read_ove << 912 KUNIT_EXPECT_EQ(test, fortify_write_ov << 913 } << 914 __fortify_test(memcpy) << 915 __fortify_test(memmove) << 916 << 917 static void fortify_test_memscan(struct kunit << 918 { 822 { 919 char haystack[] = "Where oh where is m 823 char haystack[] = "Where oh where is my memory range?"; 920 char *mem = haystack + strlen("Where o 824 char *mem = haystack + strlen("Where oh where is "); 921 char needle = 'm'; 825 char needle = 'm'; 922 size_t len = sizeof(haystack) + uncons 826 size_t len = sizeof(haystack) + unconst; 923 827 924 KUNIT_ASSERT_PTR_EQ(test, memscan(hays 828 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len), 925 mem); 829 mem); 926 KUNIT_EXPECT_EQ(test, fortify_read_ove 830 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 927 /* Catch too-large range. */ 831 /* Catch too-large range. */ 928 KUNIT_ASSERT_PTR_EQ(test, memscan(hays 832 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1), 929 NULL); 833 NULL); 930 KUNIT_EXPECT_EQ(test, fortify_read_ove 834 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); 931 KUNIT_ASSERT_PTR_EQ(test, memscan(hays 835 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2), 932 NULL); 836 NULL); 933 KUNIT_EXPECT_EQ(test, fortify_read_ove 837 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); 934 } 838 } 935 839 936 static void fortify_test_memchr(struct kunit * !! 840 static void memchr_test(struct kunit *test) 937 { 841 { 938 char haystack[] = "Where oh where is m 842 char haystack[] = "Where oh where is my memory range?"; 939 char *mem = haystack + strlen("Where o 843 char *mem = haystack + strlen("Where oh where is "); 940 char needle = 'm'; 844 char needle = 'm'; 941 size_t len = sizeof(haystack) + uncons 845 size_t len = sizeof(haystack) + unconst; 942 846 943 KUNIT_ASSERT_PTR_EQ(test, memchr(hayst 847 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len), 944 mem); 848 mem); 945 KUNIT_EXPECT_EQ(test, fortify_read_ove 849 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 946 /* Catch too-large range. */ 850 /* Catch too-large range. */ 947 KUNIT_ASSERT_PTR_EQ(test, memchr(hayst 851 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1), 948 NULL); 852 NULL); 949 KUNIT_EXPECT_EQ(test, fortify_read_ove 853 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); 950 KUNIT_ASSERT_PTR_EQ(test, memchr(hayst 854 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2), 951 NULL); 855 NULL); 952 KUNIT_EXPECT_EQ(test, fortify_read_ove 856 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); 953 } 857 } 954 858 955 static void fortify_test_memchr_inv(struct kun !! 859 static void memchr_inv_test(struct kunit *test) 956 { 860 { 957 char haystack[] = "Where oh where is m 861 char haystack[] = "Where oh where is my memory range?"; 958 char *mem = haystack + 1; 862 char *mem = haystack + 1; 959 char needle = 'W'; 863 char needle = 'W'; 960 size_t len = sizeof(haystack) + uncons 864 size_t len = sizeof(haystack) + unconst; 961 865 962 /* Normal search is okay. */ 866 /* Normal search is okay. */ 963 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(h 867 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len), 964 mem); 868 mem); 965 KUNIT_EXPECT_EQ(test, fortify_read_ove 869 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 966 /* Catch too-large range. */ 870 /* Catch too-large range. */ 967 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(h 871 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1), 968 NULL); 872 NULL); 969 KUNIT_EXPECT_EQ(test, fortify_read_ove 873 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); 970 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(h 874 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2), 971 NULL); 875 NULL); 972 KUNIT_EXPECT_EQ(test, fortify_read_ove 876 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); 973 } 877 } 974 878 975 static void fortify_test_memcmp(struct kunit * !! 879 static void memcmp_test(struct kunit *test) 976 { 880 { 977 char one[] = "My mind is going ..."; 881 char one[] = "My mind is going ..."; 978 char two[] = "My mind is going ... I c 882 char two[] = "My mind is going ... I can feel it."; 979 size_t one_len = sizeof(one) + unconst 883 size_t one_len = sizeof(one) + unconst - 1; 980 size_t two_len = sizeof(two) + unconst 884 size_t two_len = sizeof(two) + unconst - 1; 981 885 982 /* We match the first string (ignoring 886 /* We match the first string (ignoring the %NUL). */ 983 KUNIT_ASSERT_EQ(test, memcmp(one, two, 887 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0); 984 KUNIT_EXPECT_EQ(test, fortify_read_ove 888 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 985 /* Still in bounds, but no longer matc 889 /* Still in bounds, but no longer matching. */ 986 KUNIT_ASSERT_LT(test, memcmp(one, two, !! 890 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 1), -32); 987 KUNIT_EXPECT_EQ(test, fortify_read_ove 891 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 988 892 989 /* Catch too-large ranges. */ 893 /* Catch too-large ranges. */ 990 KUNIT_ASSERT_EQ(test, memcmp(one, two, 894 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN); 991 KUNIT_EXPECT_EQ(test, fortify_read_ove 895 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); 992 896 993 KUNIT_ASSERT_EQ(test, memcmp(two, one, 897 KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN); 994 KUNIT_EXPECT_EQ(test, fortify_read_ove 898 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); 995 } 899 } 996 900 997 static void fortify_test_kmemdup(struct kunit !! 901 static void kmemdup_test(struct kunit *test) 998 { 902 { 999 char src[] = "I got Doom running on it 903 char src[] = "I got Doom running on it!"; 1000 char *copy; 904 char *copy; 1001 size_t len = sizeof(src) + unconst; 905 size_t len = sizeof(src) + unconst; 1002 906 1003 /* Copy is within bounds. */ 907 /* Copy is within bounds. */ 1004 copy = kmemdup(src, len, GFP_KERNEL); 908 copy = kmemdup(src, len, GFP_KERNEL); 1005 KUNIT_EXPECT_NOT_NULL(test, copy); 909 KUNIT_EXPECT_NOT_NULL(test, copy); 1006 KUNIT_EXPECT_EQ(test, fortify_read_ov 910 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 1007 kfree(copy); 911 kfree(copy); 1008 912 1009 /* Without %NUL. */ 913 /* Without %NUL. */ 1010 copy = kmemdup(src, len - 1, GFP_KERN 914 copy = kmemdup(src, len - 1, GFP_KERNEL); 1011 KUNIT_EXPECT_NOT_NULL(test, copy); 915 KUNIT_EXPECT_NOT_NULL(test, copy); 1012 KUNIT_EXPECT_EQ(test, fortify_read_ov 916 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 1013 kfree(copy); 917 kfree(copy); 1014 918 1015 /* Tiny bounds. */ 919 /* Tiny bounds. */ 1016 copy = kmemdup(src, 1, GFP_KERNEL); 920 copy = kmemdup(src, 1, GFP_KERNEL); 1017 KUNIT_EXPECT_NOT_NULL(test, copy); 921 KUNIT_EXPECT_NOT_NULL(test, copy); 1018 KUNIT_EXPECT_EQ(test, fortify_read_ov 922 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); 1019 kfree(copy); 923 kfree(copy); 1020 924 1021 /* Out of bounds by 1 byte. */ 925 /* Out of bounds by 1 byte. */ 1022 copy = kmemdup(src, len + 1, GFP_KERN 926 copy = kmemdup(src, len + 1, GFP_KERNEL); 1023 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_ 927 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); 1024 KUNIT_EXPECT_EQ(test, fortify_read_ov 928 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); 1025 kfree(copy); 929 kfree(copy); 1026 930 1027 /* Way out of bounds. */ 931 /* Way out of bounds. */ 1028 copy = kmemdup(src, len * 2, GFP_KERN 932 copy = kmemdup(src, len * 2, GFP_KERNEL); 1029 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_ 933 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); 1030 KUNIT_EXPECT_EQ(test, fortify_read_ov 934 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); 1031 kfree(copy); 935 kfree(copy); 1032 936 1033 /* Starting offset causing out of bou 937 /* Starting offset causing out of bounds. */ 1034 copy = kmemdup(src + 1, len, GFP_KERN 938 copy = kmemdup(src + 1, len, GFP_KERNEL); 1035 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_ 939 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); 1036 KUNIT_EXPECT_EQ(test, fortify_read_ov 940 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3); 1037 kfree(copy); 941 kfree(copy); 1038 } 942 } 1039 943 1040 static int fortify_test_init(struct kunit *te 944 static int fortify_test_init(struct kunit *test) 1041 { 945 { 1042 if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE 946 if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE)) 1043 kunit_skip(test, "Not built w 947 kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y"); 1044 948 1045 fortify_read_overflows = 0; 949 fortify_read_overflows = 0; 1046 kunit_add_named_resource(test, NULL, 950 kunit_add_named_resource(test, NULL, NULL, &read_resource, 1047 "fortify_rea 951 "fortify_read_overflows", 1048 &fortify_rea 952 &fortify_read_overflows); 1049 fortify_write_overflows = 0; 953 fortify_write_overflows = 0; 1050 kunit_add_named_resource(test, NULL, 954 kunit_add_named_resource(test, NULL, NULL, &write_resource, 1051 "fortify_wri 955 "fortify_write_overflows", 1052 &fortify_wri 956 &fortify_write_overflows); 1053 return 0; 957 return 0; 1054 } 958 } 1055 959 1056 static struct kunit_case fortify_test_cases[] 960 static struct kunit_case fortify_test_cases[] = { 1057 KUNIT_CASE(fortify_test_known_sizes), !! 961 KUNIT_CASE(known_sizes_test), 1058 KUNIT_CASE(fortify_test_control_flow_ !! 962 KUNIT_CASE(control_flow_split_test), 1059 KUNIT_CASE(fortify_test_alloc_size_km !! 963 KUNIT_CASE(alloc_size_kmalloc_const_test), 1060 KUNIT_CASE(fortify_test_alloc_size_km !! 964 KUNIT_CASE(alloc_size_kmalloc_dynamic_test), 1061 KUNIT_CASE(fortify_test_alloc_size_vm !! 965 KUNIT_CASE(alloc_size_vmalloc_const_test), 1062 KUNIT_CASE(fortify_test_alloc_size_vm !! 966 KUNIT_CASE(alloc_size_vmalloc_dynamic_test), 1063 KUNIT_CASE(fortify_test_alloc_size_kv !! 967 KUNIT_CASE(alloc_size_kvmalloc_const_test), 1064 KUNIT_CASE(fortify_test_alloc_size_kv !! 968 KUNIT_CASE(alloc_size_kvmalloc_dynamic_test), 1065 KUNIT_CASE(fortify_test_alloc_size_de !! 969 KUNIT_CASE(alloc_size_devm_kmalloc_const_test), 1066 KUNIT_CASE(fortify_test_alloc_size_de !! 970 KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test), 1067 KUNIT_CASE(fortify_test_realloc_size) !! 971 KUNIT_CASE(strlen_test), 1068 KUNIT_CASE(fortify_test_strlen), !! 972 KUNIT_CASE(strnlen_test), 1069 KUNIT_CASE(fortify_test_strnlen), !! 973 KUNIT_CASE(strcpy_test), 1070 KUNIT_CASE(fortify_test_strcpy), !! 974 KUNIT_CASE(strncpy_test), 1071 KUNIT_CASE(fortify_test_strncpy), !! 975 KUNIT_CASE(strscpy_test), 1072 KUNIT_CASE(fortify_test_strscpy), !! 976 KUNIT_CASE(strcat_test), 1073 KUNIT_CASE(fortify_test_strcat), !! 977 KUNIT_CASE(strncat_test), 1074 KUNIT_CASE(fortify_test_strncat), !! 978 KUNIT_CASE(strlcat_test), 1075 KUNIT_CASE(fortify_test_strlcat), << 1076 /* skip memset: performs bounds check 979 /* skip memset: performs bounds checking on whole structs */ 1077 KUNIT_CASE(fortify_test_memcpy), !! 980 /* skip memcpy: still using warn-and-overwrite instead of hard-fail */ 1078 KUNIT_CASE(fortify_test_memmove), !! 981 KUNIT_CASE(memscan_test), 1079 KUNIT_CASE(fortify_test_memscan), !! 982 KUNIT_CASE(memchr_test), 1080 KUNIT_CASE(fortify_test_memchr), !! 983 KUNIT_CASE(memchr_inv_test), 1081 KUNIT_CASE(fortify_test_memchr_inv), !! 984 KUNIT_CASE(memcmp_test), 1082 KUNIT_CASE(fortify_test_memcmp), !! 985 KUNIT_CASE(kmemdup_test), 1083 KUNIT_CASE(fortify_test_kmemdup), << 1084 {} 986 {} 1085 }; 987 }; 1086 988 1087 static struct kunit_suite fortify_test_suite 989 static struct kunit_suite fortify_test_suite = { 1088 .name = "fortify", 990 .name = "fortify", 1089 .init = fortify_test_init, 991 .init = fortify_test_init, 1090 .test_cases = fortify_test_cases, 992 .test_cases = fortify_test_cases, 1091 }; 993 }; 1092 994 1093 kunit_test_suite(fortify_test_suite); 995 kunit_test_suite(fortify_test_suite); 1094 996 1095 MODULE_DESCRIPTION("Runtime test cases for CO << 1096 MODULE_LICENSE("GPL"); 997 MODULE_LICENSE("GPL"); 1097 998
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.