1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel module for testing copy_to/from_user infrastructure. 4 * 5 * Copyright 2013 Google Inc. All Rights Reserved 6 * 7 * Authors: 8 * Kees Cook <keescook@chromium.org> 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/mman.h> 14 #include <linux/module.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/uaccess.h> 18 #include <kunit/test.h> 19 20 /* 21 * Several 32-bit architectures support 64-bit {get,put}_user() calls. 22 * As there doesn't appear to be anything that can safely determine 23 * their capability at compile-time, we just have to opt-out certain archs. 24 */ 25 #if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ 26 !defined(CONFIG_M68K) && \ 27 !defined(CONFIG_MICROBLAZE) && \ 28 !defined(CONFIG_NIOS2) && \ 29 !defined(CONFIG_PPC32) && \ 30 !defined(CONFIG_SUPERH)) 31 # define TEST_U64 32 #endif 33 34 struct usercopy_test_priv { 35 char *kmem; 36 char __user *umem; 37 size_t size; 38 }; 39 40 static bool is_zeroed(void *from, size_t size) 41 { 42 return memchr_inv(from, 0x0, size) == NULL; 43 } 44 45 /* Test usage of check_nonzero_user(). */ 46 static void usercopy_test_check_nonzero_user(struct kunit *test) 47 { 48 size_t start, end, i, zero_start, zero_end; 49 struct usercopy_test_priv *priv = test->priv; 50 char __user *umem = priv->umem; 51 char *kmem = priv->kmem; 52 size_t size = priv->size; 53 54 KUNIT_ASSERT_GE_MSG(test, size, 2 * PAGE_SIZE, "buffer too small"); 55 56 /* 57 * We want to cross a page boundary to exercise the code more 58 * effectively. We also don't want to make the size we scan too large, 59 * otherwise the test can take a long time and cause soft lockups. So 60 * scan a 1024 byte region across the page boundary. 61 */ 62 size = 1024; 63 start = PAGE_SIZE - (size / 2); 64 65 kmem += start; 66 umem += start; 67 68 zero_start = size / 4; 69 zero_end = size - zero_start; 70 71 /* 72 * We conduct a series of check_nonzero_user() tests on a block of 73 * memory with the following byte-pattern (trying every possible 74 * [start,end] pair): 75 * 76 * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ] 77 * 78 * And we verify that check_nonzero_user() acts identically to 79 * memchr_inv(). 80 */ 81 82 memset(kmem, 0x0, size); 83 for (i = 1; i < zero_start; i += 2) 84 kmem[i] = 0xff; 85 for (i = zero_end; i < size; i += 2) 86 kmem[i] = 0xff; 87 88 KUNIT_EXPECT_EQ_MSG(test, copy_to_user(umem, kmem, size), 0, 89 "legitimate copy_to_user failed"); 90 91 for (start = 0; start <= size; start++) { 92 for (end = start; end <= size; end++) { 93 size_t len = end - start; 94 int retval = check_zeroed_user(umem + start, len); 95 int expected = is_zeroed(kmem + start, len); 96 97 KUNIT_ASSERT_EQ_MSG(test, retval, expected, 98 "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)", 99 retval, expected, start, end); 100 } 101 } 102 } 103 104 /* Test usage of copy_struct_from_user(). */ 105 static void usercopy_test_copy_struct_from_user(struct kunit *test) 106 { 107 char *umem_src = NULL, *expected = NULL; 108 struct usercopy_test_priv *priv = test->priv; 109 char __user *umem = priv->umem; 110 char *kmem = priv->kmem; 111 size_t size = priv->size; 112 size_t ksize, usize; 113 114 umem_src = kunit_kmalloc(test, size, GFP_KERNEL); 115 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, umem_src); 116 117 expected = kunit_kmalloc(test, size, GFP_KERNEL); 118 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected); 119 120 /* Fill umem with a fixed byte pattern. */ 121 memset(umem_src, 0x3e, size); 122 KUNIT_ASSERT_EQ_MSG(test, copy_to_user(umem, umem_src, size), 0, 123 "legitimate copy_to_user failed"); 124 125 /* Check basic case -- (usize == ksize). */ 126 ksize = size; 127 usize = size; 128 129 memcpy(expected, umem_src, ksize); 130 131 memset(kmem, 0x0, size); 132 KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0, 133 "copy_struct_from_user(usize == ksize) failed"); 134 KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize, 135 "copy_struct_from_user(usize == ksize) gives unexpected copy"); 136 137 /* Old userspace case -- (usize < ksize). */ 138 ksize = size; 139 usize = size / 2; 140 141 memcpy(expected, umem_src, usize); 142 memset(expected + usize, 0x0, ksize - usize); 143 144 memset(kmem, 0x0, size); 145 KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0, 146 "copy_struct_from_user(usize < ksize) failed"); 147 KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize, 148 "copy_struct_from_user(usize < ksize) gives unexpected copy"); 149 150 /* New userspace (-E2BIG) case -- (usize > ksize). */ 151 ksize = size / 2; 152 usize = size; 153 154 memset(kmem, 0x0, size); 155 KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), -E2BIG, 156 "copy_struct_from_user(usize > ksize) didn't give E2BIG"); 157 158 /* New userspace (success) case -- (usize > ksize). */ 159 ksize = size / 2; 160 usize = size; 161 162 memcpy(expected, umem_src, ksize); 163 KUNIT_EXPECT_EQ_MSG(test, clear_user(umem + ksize, usize - ksize), 0, 164 "legitimate clear_user failed"); 165 166 memset(kmem, 0x0, size); 167 KUNIT_EXPECT_EQ_MSG(test, copy_struct_from_user(kmem, ksize, umem, usize), 0, 168 "copy_struct_from_user(usize > ksize) failed"); 169 KUNIT_EXPECT_MEMEQ_MSG(test, kmem, expected, ksize, 170 "copy_struct_from_user(usize > ksize) gives unexpected copy"); 171 } 172 173 /* 174 * Legitimate usage: none of these copies should fail. 175 */ 176 static void usercopy_test_valid(struct kunit *test) 177 { 178 struct usercopy_test_priv *priv = test->priv; 179 char __user *usermem = priv->umem; 180 char *kmem = priv->kmem; 181 182 memset(kmem, 0x3a, PAGE_SIZE * 2); 183 KUNIT_EXPECT_EQ_MSG(test, 0, copy_to_user(usermem, kmem, PAGE_SIZE), 184 "legitimate copy_to_user failed"); 185 memset(kmem, 0x0, PAGE_SIZE); 186 KUNIT_EXPECT_EQ_MSG(test, 0, copy_from_user(kmem, usermem, PAGE_SIZE), 187 "legitimate copy_from_user failed"); 188 KUNIT_EXPECT_MEMEQ_MSG(test, kmem, kmem + PAGE_SIZE, PAGE_SIZE, 189 "legitimate usercopy failed to copy data"); 190 191 #define test_legit(size, check) \ 192 do { \ 193 size val_##size = (check); \ 194 KUNIT_EXPECT_EQ_MSG(test, 0, \ 195 put_user(val_##size, (size __user *)usermem), \ 196 "legitimate put_user (" #size ") failed"); \ 197 val_##size = 0; \ 198 KUNIT_EXPECT_EQ_MSG(test, 0, \ 199 get_user(val_##size, (size __user *)usermem), \ 200 "legitimate get_user (" #size ") failed"); \ 201 KUNIT_EXPECT_EQ_MSG(test, val_##size, check, \ 202 "legitimate get_user (" #size ") failed to do copy"); \ 203 } while (0) 204 205 test_legit(u8, 0x5a); 206 test_legit(u16, 0x5a5b); 207 test_legit(u32, 0x5a5b5c5d); 208 #ifdef TEST_U64 209 test_legit(u64, 0x5a5b5c5d6a6b6c6d); 210 #endif 211 #undef test_legit 212 } 213 214 /* 215 * Invalid usage: none of these copies should succeed. 216 */ 217 static void usercopy_test_invalid(struct kunit *test) 218 { 219 struct usercopy_test_priv *priv = test->priv; 220 char __user *usermem = priv->umem; 221 char *bad_usermem = (char *)usermem; 222 char *kmem = priv->kmem; 223 u64 *kmem_u64 = (u64 *)kmem; 224 225 if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) || 226 !IS_ENABLED(CONFIG_MMU)) { 227 kunit_skip(test, "Testing for kernel/userspace address confusion is only sensible on architectures with a shared address space"); 228 return; 229 } 230 231 /* Prepare kernel memory with check values. */ 232 memset(kmem, 0x5a, PAGE_SIZE); 233 memset(kmem + PAGE_SIZE, 0, PAGE_SIZE); 234 235 /* Reject kernel-to-kernel copies through copy_from_user(). */ 236 KUNIT_EXPECT_NE_MSG(test, copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), 237 PAGE_SIZE), 0, 238 "illegal all-kernel copy_from_user passed"); 239 240 /* Destination half of buffer should have been zeroed. */ 241 KUNIT_EXPECT_MEMEQ_MSG(test, kmem + PAGE_SIZE, kmem, PAGE_SIZE, 242 "zeroing failure for illegal all-kernel copy_from_user"); 243 244 #if 0 245 /* 246 * When running with SMAP/PAN/etc, this will Oops the kernel 247 * due to the zeroing of userspace memory on failure. This needs 248 * to be tested in LKDTM instead, since this test module does not 249 * expect to explode. 250 */ 251 KUNIT_EXPECT_NE_MSG(test, copy_from_user(bad_usermem, (char __user *)kmem, 252 PAGE_SIZE), 0, 253 "illegal reversed copy_from_user passed"); 254 #endif 255 KUNIT_EXPECT_NE_MSG(test, copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, 256 PAGE_SIZE), 0, 257 "illegal all-kernel copy_to_user passed"); 258 259 KUNIT_EXPECT_NE_MSG(test, copy_to_user((char __user *)kmem, bad_usermem, 260 PAGE_SIZE), 0, 261 "illegal reversed copy_to_user passed"); 262 263 #define test_illegal(size, check) \ 264 do { \ 265 size val_##size = (check); \ 266 /* get_user() */ \ 267 KUNIT_EXPECT_NE_MSG(test, get_user(val_##size, (size __user *)kmem), 0, \ 268 "illegal get_user (" #size ") passed"); \ 269 KUNIT_EXPECT_EQ_MSG(test, val_##size, 0, \ 270 "zeroing failure for illegal get_user (" #size ")"); \ 271 /* put_user() */ \ 272 *kmem_u64 = 0xF09FA4AFF09FA4AF; \ 273 KUNIT_EXPECT_NE_MSG(test, put_user(val_##size, (size __user *)kmem), 0, \ 274 "illegal put_user (" #size ") passed"); \ 275 KUNIT_EXPECT_EQ_MSG(test, *kmem_u64, 0xF09FA4AFF09FA4AF, \ 276 "illegal put_user (" #size ") wrote to kernel memory!"); \ 277 } while (0) 278 279 test_illegal(u8, 0x5a); 280 test_illegal(u16, 0x5a5b); 281 test_illegal(u32, 0x5a5b5c5d); 282 #ifdef TEST_U64 283 test_illegal(u64, 0x5a5b5c5d6a6b6c6d); 284 #endif 285 #undef test_illegal 286 } 287 288 static int usercopy_test_init(struct kunit *test) 289 { 290 struct usercopy_test_priv *priv; 291 unsigned long user_addr; 292 293 if (!IS_ENABLED(CONFIG_MMU)) { 294 kunit_skip(test, "Userspace allocation testing not available on non-MMU systems"); 295 return 0; 296 } 297 298 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); 299 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); 300 test->priv = priv; 301 priv->size = PAGE_SIZE * 2; 302 303 priv->kmem = kunit_kmalloc(test, priv->size, GFP_KERNEL); 304 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->kmem); 305 306 user_addr = kunit_vm_mmap(test, NULL, 0, priv->size, 307 PROT_READ | PROT_WRITE | PROT_EXEC, 308 MAP_ANONYMOUS | MAP_PRIVATE, 0); 309 KUNIT_ASSERT_NE_MSG(test, user_addr, 0, 310 "Could not create userspace mm"); 311 KUNIT_ASSERT_LT_MSG(test, user_addr, (unsigned long)TASK_SIZE, 312 "Failed to allocate user memory"); 313 priv->umem = (char __user *)user_addr; 314 315 return 0; 316 } 317 318 static struct kunit_case usercopy_test_cases[] = { 319 KUNIT_CASE(usercopy_test_valid), 320 KUNIT_CASE(usercopy_test_invalid), 321 KUNIT_CASE(usercopy_test_check_nonzero_user), 322 KUNIT_CASE(usercopy_test_copy_struct_from_user), 323 {} 324 }; 325 326 static struct kunit_suite usercopy_test_suite = { 327 .name = "usercopy", 328 .init = usercopy_test_init, 329 .test_cases = usercopy_test_cases, 330 }; 331 332 kunit_test_suites(&usercopy_test_suite); 333 MODULE_AUTHOR("Kees Cook <kees@kernel.org>"); 334 MODULE_DESCRIPTION("Kernel module for testing copy_to/from_user infrastructure"); 335 MODULE_LICENSE("GPL"); 336
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.