1 // SPDX-License-Identifier: GPL-2.0-or-later 1 2 /* 3 * test_kprobes.c - simple sanity test for k*p 4 * 5 * Copyright IBM Corp. 2008 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/kprobes.h> 10 #include <linux/random.h> 11 #include <kunit/test.h> 12 13 #define div_factor 3 14 15 static u32 rand1, preh_val, posth_val; 16 static u32 (*target)(u32 value); 17 static u32 (*recursed_target)(u32 value); 18 static u32 (*target2)(u32 value); 19 static struct kunit *current_test; 20 21 static unsigned long (*internal_target)(void); 22 static unsigned long (*stacktrace_target)(void 23 static unsigned long (*stacktrace_driver)(void 24 static unsigned long target_return_address[2]; 25 26 static noinline u32 kprobe_target(u32 value) 27 { 28 return (value / div_factor); 29 } 30 31 static noinline u32 kprobe_recursed_target(u32 32 { 33 return (value / div_factor); 34 } 35 36 static int kp_pre_handler(struct kprobe *p, st 37 { 38 KUNIT_EXPECT_FALSE(current_test, preem 39 40 preh_val = recursed_target(rand1); 41 return 0; 42 } 43 44 static void kp_post_handler(struct kprobe *p, 45 unsigned long flags) 46 { 47 u32 expval = recursed_target(rand1); 48 49 KUNIT_EXPECT_FALSE(current_test, preem 50 KUNIT_EXPECT_EQ(current_test, preh_val 51 52 posth_val = preh_val + div_factor; 53 } 54 55 static struct kprobe kp = { 56 .symbol_name = "kprobe_target", 57 .pre_handler = kp_pre_handler, 58 .post_handler = kp_post_handler 59 }; 60 61 static void test_kprobe(struct kunit *test) 62 { 63 current_test = test; 64 KUNIT_EXPECT_EQ(test, 0, register_kpro 65 target(rand1); 66 unregister_kprobe(&kp); 67 KUNIT_EXPECT_NE(test, 0, preh_val); 68 KUNIT_EXPECT_NE(test, 0, posth_val); 69 } 70 71 static noinline u32 kprobe_target2(u32 value) 72 { 73 return (value / div_factor) + 1; 74 } 75 76 static noinline unsigned long kprobe_stacktrac 77 { 78 if (!target_return_address[0]) 79 target_return_address[0] = (un 80 return target_return_address[0]; 81 } 82 83 static noinline unsigned long kprobe_stacktrac 84 { 85 if (!target_return_address[1]) 86 target_return_address[1] = (un 87 88 if (internal_target) 89 internal_target(); 90 91 return target_return_address[1]; 92 } 93 94 static noinline unsigned long kprobe_stacktrac 95 { 96 if (stacktrace_target) 97 stacktrace_target(); 98 99 /* This is for preventing inlining the 100 return (unsigned long)__builtin_return 101 } 102 103 static int kp_pre_handler2(struct kprobe *p, s 104 { 105 preh_val = (rand1 / div_factor) + 1; 106 return 0; 107 } 108 109 static void kp_post_handler2(struct kprobe *p, 110 unsigned long flags) 111 { 112 KUNIT_EXPECT_EQ(current_test, preh_val 113 posth_val = preh_val + div_factor; 114 } 115 116 static struct kprobe kp2 = { 117 .symbol_name = "kprobe_target2", 118 .pre_handler = kp_pre_handler2, 119 .post_handler = kp_post_handler2 120 }; 121 122 static void test_kprobes(struct kunit *test) 123 { 124 struct kprobe *kps[2] = {&kp, &kp2}; 125 126 current_test = test; 127 128 /* addr and flags should be cleard for 129 kp.addr = NULL; 130 kp.flags = 0; 131 132 KUNIT_EXPECT_EQ(test, 0, register_kpro 133 preh_val = 0; 134 posth_val = 0; 135 target(rand1); 136 137 KUNIT_EXPECT_NE(test, 0, preh_val); 138 KUNIT_EXPECT_NE(test, 0, posth_val); 139 140 preh_val = 0; 141 posth_val = 0; 142 target2(rand1); 143 144 KUNIT_EXPECT_NE(test, 0, preh_val); 145 KUNIT_EXPECT_NE(test, 0, posth_val); 146 unregister_kprobes(kps, 2); 147 } 148 149 static struct kprobe kp_missed = { 150 .symbol_name = "kprobe_recursed_target 151 .pre_handler = kp_pre_handler, 152 .post_handler = kp_post_handler, 153 }; 154 155 static void test_kprobe_missed(struct kunit *t 156 { 157 current_test = test; 158 preh_val = 0; 159 posth_val = 0; 160 161 KUNIT_EXPECT_EQ(test, 0, register_kpro 162 163 recursed_target(rand1); 164 165 KUNIT_EXPECT_EQ(test, 2, kp_missed.nmi 166 KUNIT_EXPECT_NE(test, 0, preh_val); 167 KUNIT_EXPECT_NE(test, 0, posth_val); 168 169 unregister_kprobe(&kp_missed); 170 } 171 172 #ifdef CONFIG_KRETPROBES 173 static u32 krph_val; 174 175 static int entry_handler(struct kretprobe_inst 176 { 177 KUNIT_EXPECT_FALSE(current_test, preem 178 krph_val = (rand1 / div_factor); 179 return 0; 180 } 181 182 static int return_handler(struct kretprobe_ins 183 { 184 unsigned long ret = regs_return_value( 185 186 KUNIT_EXPECT_FALSE(current_test, preem 187 KUNIT_EXPECT_EQ(current_test, ret, ran 188 KUNIT_EXPECT_NE(current_test, krph_val 189 krph_val = rand1; 190 return 0; 191 } 192 193 static struct kretprobe rp = { 194 .handler = return_handler, 195 .entry_handler = entry_handler, 196 .kp.symbol_name = "kprobe_target" 197 }; 198 199 static void test_kretprobe(struct kunit *test) 200 { 201 current_test = test; 202 KUNIT_EXPECT_EQ(test, 0, register_kret 203 target(rand1); 204 unregister_kretprobe(&rp); 205 KUNIT_EXPECT_EQ(test, krph_val, rand1) 206 } 207 208 static int return_handler2(struct kretprobe_in 209 { 210 unsigned long ret = regs_return_value( 211 212 KUNIT_EXPECT_EQ(current_test, ret, (ra 213 KUNIT_EXPECT_NE(current_test, krph_val 214 krph_val = rand1; 215 return 0; 216 } 217 218 static struct kretprobe rp2 = { 219 .handler = return_handler2, 220 .entry_handler = entry_handler, 221 .kp.symbol_name = "kprobe_target2" 222 }; 223 224 static void test_kretprobes(struct kunit *test 225 { 226 struct kretprobe *rps[2] = {&rp, &rp2} 227 228 current_test = test; 229 /* addr and flags should be cleard for 230 rp.kp.addr = NULL; 231 rp.kp.flags = 0; 232 KUNIT_EXPECT_EQ(test, 0, register_kret 233 234 krph_val = 0; 235 target(rand1); 236 KUNIT_EXPECT_EQ(test, krph_val, rand1) 237 238 krph_val = 0; 239 target2(rand1); 240 KUNIT_EXPECT_EQ(test, krph_val, rand1) 241 unregister_kretprobes(rps, 2); 242 } 243 244 #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETP 245 #define STACK_BUF_SIZE 16 246 static unsigned long stack_buf[STACK_BUF_SIZE] 247 248 static int stacktrace_return_handler(struct kr 249 { 250 unsigned long retval = regs_return_val 251 int i, ret; 252 253 KUNIT_EXPECT_FALSE(current_test, preem 254 KUNIT_EXPECT_EQ(current_test, retval, 255 256 /* 257 * Test stacktrace inside the kretprob 258 * kretprobe trampoline, but must incl 259 * of the target function. 260 */ 261 ret = stack_trace_save(stack_buf, STAC 262 KUNIT_EXPECT_NE(current_test, ret, 0); 263 264 for (i = 0; i < ret; i++) { 265 if (stack_buf[i] == target_ret 266 break; 267 } 268 KUNIT_EXPECT_NE(current_test, i, ret); 269 270 #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST) 271 /* 272 * Test stacktrace from pt_regs at the 273 * trace must start from the target re 274 */ 275 ret = stack_trace_save_regs(regs, stac 276 KUNIT_EXPECT_NE(current_test, ret, 0); 277 KUNIT_EXPECT_EQ(current_test, stack_bu 278 #endif 279 280 return 0; 281 } 282 283 static struct kretprobe rp3 = { 284 .handler = stacktrace_return_ha 285 .kp.symbol_name = "kprobe_stacktrace_t 286 }; 287 288 static void test_stacktrace_on_kretprobe(struc 289 { 290 unsigned long myretaddr = (unsigned lo 291 292 current_test = test; 293 rp3.kp.addr = NULL; 294 rp3.kp.flags = 0; 295 296 /* 297 * Run the stacktrace_driver() to reco 298 * stacktrace_target() and ensure stac 299 * inlined by checking the return addr 300 * and the return address of this func 301 */ 302 KUNIT_ASSERT_NE(test, myretaddr, stack 303 304 KUNIT_ASSERT_EQ(test, 0, register_kret 305 KUNIT_ASSERT_NE(test, myretaddr, stack 306 unregister_kretprobe(&rp3); 307 } 308 309 static int stacktrace_internal_return_handler( 310 { 311 unsigned long retval = regs_return_val 312 int i, ret; 313 314 KUNIT_EXPECT_FALSE(current_test, preem 315 KUNIT_EXPECT_EQ(current_test, retval, 316 317 /* 318 * Test stacktrace inside the kretprob 319 * The unwinder will find the kretprob 320 * return address, and kretprobe must 321 */ 322 ret = stack_trace_save(stack_buf, STAC 323 KUNIT_EXPECT_NE(current_test, ret, 0); 324 325 for (i = 0; i < ret - 1; i++) { 326 if (stack_buf[i] == target_ret 327 KUNIT_EXPECT_EQ(curren 328 break; 329 } 330 } 331 KUNIT_EXPECT_NE(current_test, i, ret); 332 333 #if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST) 334 /* Ditto for the regs version. */ 335 ret = stack_trace_save_regs(regs, stac 336 KUNIT_EXPECT_NE(current_test, ret, 0); 337 KUNIT_EXPECT_EQ(current_test, stack_bu 338 KUNIT_EXPECT_EQ(current_test, stack_bu 339 #endif 340 341 return 0; 342 } 343 344 static struct kretprobe rp4 = { 345 .handler = stacktrace_internal_ 346 .kp.symbol_name = "kprobe_stacktrace_i 347 }; 348 349 static void test_stacktrace_on_nested_kretprob 350 { 351 unsigned long myretaddr = (unsigned lo 352 struct kretprobe *rps[2] = {&rp3, &rp4 353 354 current_test = test; 355 rp3.kp.addr = NULL; 356 rp3.kp.flags = 0; 357 358 //KUNIT_ASSERT_NE(test, myretaddr, sta 359 360 KUNIT_ASSERT_EQ(test, 0, register_kret 361 KUNIT_ASSERT_NE(test, myretaddr, stack 362 unregister_kretprobes(rps, 2); 363 } 364 #endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KR 365 366 #endif /* CONFIG_KRETPROBES */ 367 368 static int kprobes_test_init(struct kunit *tes 369 { 370 target = kprobe_target; 371 target2 = kprobe_target2; 372 recursed_target = kprobe_recursed_targ 373 stacktrace_target = kprobe_stacktrace_ 374 internal_target = kprobe_stacktrace_in 375 stacktrace_driver = kprobe_stacktrace_ 376 rand1 = get_random_u32_above(div_facto 377 return 0; 378 } 379 380 static struct kunit_case kprobes_testcases[] = 381 KUNIT_CASE(test_kprobe), 382 KUNIT_CASE(test_kprobes), 383 KUNIT_CASE(test_kprobe_missed), 384 #ifdef CONFIG_KRETPROBES 385 KUNIT_CASE(test_kretprobe), 386 KUNIT_CASE(test_kretprobes), 387 #ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETP 388 KUNIT_CASE(test_stacktrace_on_kretprob 389 KUNIT_CASE(test_stacktrace_on_nested_k 390 #endif 391 #endif 392 {} 393 }; 394 395 static struct kunit_suite kprobes_test_suite = 396 .name = "kprobes_test", 397 .init = kprobes_test_init, 398 .test_cases = kprobes_testcases, 399 }; 400 401 kunit_test_suites(&kprobes_test_suite); 402 403 MODULE_DESCRIPTION("simple sanity test for k*p 404 MODULE_LICENSE("GPL"); 405
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.