~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Copyright (c) 2020 Facebook */
  3 #include <linux/bpf.h>
  4 #include <linux/btf.h>
  5 #include <linux/btf_ids.h>
  6 #include <linux/delay.h>
  7 #include <linux/error-injection.h>
  8 #include <linux/init.h>
  9 #include <linux/module.h>
 10 #include <linux/percpu-defs.h>
 11 #include <linux/sysfs.h>
 12 #include <linux/tracepoint.h>
 13 #include <linux/net.h>
 14 #include <linux/socket.h>
 15 #include <linux/nsproxy.h>
 16 #include <linux/inet.h>
 17 #include <linux/in.h>
 18 #include <linux/in6.h>
 19 #include <linux/un.h>
 20 #include <net/sock.h>
 21 #include <linux/namei.h>
 22 #include "bpf_testmod.h"
 23 #include "bpf_testmod_kfunc.h"
 24 
 25 #define CREATE_TRACE_POINTS
 26 #include "bpf_testmod-events.h"
 27 
 28 #define CONNECT_TIMEOUT_SEC 1
 29 
 30 typedef int (*func_proto_typedef)(long);
 31 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
 32 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
 33 
 34 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
 35 long bpf_testmod_test_struct_arg_result;
 36 static DEFINE_MUTEX(sock_lock);
 37 static struct socket *sock;
 38 
 39 struct bpf_testmod_struct_arg_1 {
 40         int a;
 41 };
 42 struct bpf_testmod_struct_arg_2 {
 43         long a;
 44         long b;
 45 };
 46 
 47 struct bpf_testmod_struct_arg_3 {
 48         int a;
 49         int b[];
 50 };
 51 
 52 struct bpf_testmod_struct_arg_4 {
 53         u64 a;
 54         int b;
 55 };
 56 
 57 struct bpf_testmod_struct_arg_5 {
 58         char a;
 59         short b;
 60         int c;
 61         long d;
 62 };
 63 
 64 __bpf_hook_start();
 65 
 66 noinline int
 67 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
 68         bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
 69         return bpf_testmod_test_struct_arg_result;
 70 }
 71 
 72 noinline int
 73 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
 74         bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
 75         return bpf_testmod_test_struct_arg_result;
 76 }
 77 
 78 noinline int
 79 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
 80         bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
 81         return bpf_testmod_test_struct_arg_result;
 82 }
 83 
 84 noinline int
 85 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
 86                               int c, int d, struct bpf_testmod_struct_arg_2 e) {
 87         bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
 88         return bpf_testmod_test_struct_arg_result;
 89 }
 90 
 91 noinline int
 92 bpf_testmod_test_struct_arg_5(void) {
 93         bpf_testmod_test_struct_arg_result = 1;
 94         return bpf_testmod_test_struct_arg_result;
 95 }
 96 
 97 noinline int
 98 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
 99         bpf_testmod_test_struct_arg_result = a->b[0];
100         return bpf_testmod_test_struct_arg_result;
101 }
102 
103 noinline int
104 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
105                               struct bpf_testmod_struct_arg_4 f)
106 {
107         bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
108                 (long)e + f.a + f.b;
109         return bpf_testmod_test_struct_arg_result;
110 }
111 
112 noinline int
113 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
114                               struct bpf_testmod_struct_arg_4 f, int g)
115 {
116         bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
117                 (long)e + f.a + f.b + g;
118         return bpf_testmod_test_struct_arg_result;
119 }
120 
121 noinline int
122 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
123                               short g, struct bpf_testmod_struct_arg_5 h, long i)
124 {
125         bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
126                 f + g + h.a + h.b + h.c + h.d + i;
127         return bpf_testmod_test_struct_arg_result;
128 }
129 
130 noinline int
131 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
132         bpf_testmod_test_struct_arg_result = a->a;
133         return bpf_testmod_test_struct_arg_result;
134 }
135 
136 __bpf_kfunc void
137 bpf_testmod_test_mod_kfunc(int i)
138 {
139         *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
140 }
141 
142 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
143 {
144         if (cnt < 0) {
145                 it->cnt = 0;
146                 return -EINVAL;
147         }
148 
149         it->value = value;
150         it->cnt = cnt;
151 
152         return 0;
153 }
154 
155 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
156 {
157         if (it->cnt <= 0)
158                 return NULL;
159 
160         it->cnt--;
161 
162         return &it->value;
163 }
164 
165 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
166 {
167         it->cnt = 0;
168 }
169 
170 __bpf_kfunc void bpf_kfunc_common_test(void)
171 {
172 }
173 
174 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
175                                        struct bpf_dynptr *ptr__nullable)
176 {
177 }
178 
179 __bpf_kfunc struct bpf_testmod_ctx *
180 bpf_testmod_ctx_create(int *err)
181 {
182         struct bpf_testmod_ctx *ctx;
183 
184         ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
185         if (!ctx) {
186                 *err = -ENOMEM;
187                 return NULL;
188         }
189         refcount_set(&ctx->usage, 1);
190 
191         return ctx;
192 }
193 
194 static void testmod_free_cb(struct rcu_head *head)
195 {
196         struct bpf_testmod_ctx *ctx;
197 
198         ctx = container_of(head, struct bpf_testmod_ctx, rcu);
199         kfree(ctx);
200 }
201 
202 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
203 {
204         if (!ctx)
205                 return;
206         if (refcount_dec_and_test(&ctx->usage))
207                 call_rcu(&ctx->rcu, testmod_free_cb);
208 }
209 
210 struct bpf_testmod_btf_type_tag_1 {
211         int a;
212 };
213 
214 struct bpf_testmod_btf_type_tag_2 {
215         struct bpf_testmod_btf_type_tag_1 __user *p;
216 };
217 
218 struct bpf_testmod_btf_type_tag_3 {
219         struct bpf_testmod_btf_type_tag_1 __percpu *p;
220 };
221 
222 noinline int
223 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
224         BTF_TYPE_EMIT(func_proto_typedef);
225         BTF_TYPE_EMIT(func_proto_typedef_nested1);
226         BTF_TYPE_EMIT(func_proto_typedef_nested2);
227         return arg->a;
228 }
229 
230 noinline int
231 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
232         return arg->p->a;
233 }
234 
235 noinline int
236 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
237         return arg->a;
238 }
239 
240 noinline int
241 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
242         return arg->p->a;
243 }
244 
245 noinline int bpf_testmod_loop_test(int n)
246 {
247         /* Make sum volatile, so smart compilers, such as clang, will not
248          * optimize the code by removing the loop.
249          */
250         volatile int sum = 0;
251         int i;
252 
253         /* the primary goal of this test is to test LBR. Create a lot of
254          * branches in the function, so we can catch it easily.
255          */
256         for (i = 0; i < n; i++)
257                 sum += i;
258         return sum;
259 }
260 
261 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
262 {
263         static struct file f = {};
264 
265         switch (arg) {
266         case 1: return (void *)EINVAL;          /* user addr */
267         case 2: return (void *)0xcafe4a11;      /* user addr */
268         case 3: return (void *)-EINVAL;         /* canonical, but invalid */
269         case 4: return (void *)(1ull << 60);    /* non-canonical and invalid */
270         case 5: return (void *)~(1ull << 30);   /* trigger extable */
271         case 6: return &f;                      /* valid addr */
272         case 7: return (void *)((long)&f | 1);  /* kernel tricks */
273 #ifdef CONFIG_X86_64
274         case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
275 #endif
276         default: return NULL;
277         }
278 }
279 
280 noinline int bpf_testmod_fentry_test1(int a)
281 {
282         return a + 1;
283 }
284 
285 noinline int bpf_testmod_fentry_test2(int a, u64 b)
286 {
287         return a + b;
288 }
289 
290 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
291 {
292         return a + b + c;
293 }
294 
295 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
296                                       void *e, char f, int g)
297 {
298         return a + (long)b + c + d + (long)e + f + g;
299 }
300 
301 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
302                                        void *e, char f, int g,
303                                        unsigned int h, long i, __u64 j,
304                                        unsigned long k)
305 {
306         return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
307 }
308 
309 int bpf_testmod_fentry_ok;
310 
311 noinline ssize_t
312 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
313                       struct bin_attribute *bin_attr,
314                       char *buf, loff_t off, size_t len)
315 {
316         struct bpf_testmod_test_read_ctx ctx = {
317                 .buf = buf,
318                 .off = off,
319                 .len = len,
320         };
321         struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
322         struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
323         struct bpf_testmod_struct_arg_3 *struct_arg3;
324         struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
325         struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
326         int i = 1;
327 
328         while (bpf_testmod_return_ptr(i))
329                 i++;
330 
331         (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
332         (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
333         (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
334         (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
335         (void)bpf_testmod_test_struct_arg_5();
336         (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
337                                             (void *)20, struct_arg4);
338         (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
339                                             (void *)20, struct_arg4, 23);
340         (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
341                                             21, 22, struct_arg5, 27);
342 
343         (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
344 
345         struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
346                                 sizeof(int)), GFP_KERNEL);
347         if (struct_arg3 != NULL) {
348                 struct_arg3->b[0] = 1;
349                 (void)bpf_testmod_test_struct_arg_6(struct_arg3);
350                 kfree(struct_arg3);
351         }
352 
353         /* This is always true. Use the check to make sure the compiler
354          * doesn't remove bpf_testmod_loop_test.
355          */
356         if (bpf_testmod_loop_test(101) > 100)
357                 trace_bpf_testmod_test_read(current, &ctx);
358 
359         /* Magic number to enable writable tp */
360         if (len == 64) {
361                 struct bpf_testmod_test_writable_ctx writable = {
362                         .val = 1024,
363                 };
364                 trace_bpf_testmod_test_writable_bare(&writable);
365                 if (writable.early_ret)
366                         return snprintf(buf, len, "%d\n", writable.val);
367         }
368 
369         if (bpf_testmod_fentry_test1(1) != 2 ||
370             bpf_testmod_fentry_test2(2, 3) != 5 ||
371             bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
372             bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
373                         21, 22) != 133 ||
374             bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
375                         21, 22, 23, 24, 25, 26) != 231)
376                 goto out;
377 
378         bpf_testmod_fentry_ok = 1;
379 out:
380         return -EIO; /* always fail */
381 }
382 EXPORT_SYMBOL(bpf_testmod_test_read);
383 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
384 
385 noinline ssize_t
386 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
387                       struct bin_attribute *bin_attr,
388                       char *buf, loff_t off, size_t len)
389 {
390         struct bpf_testmod_test_write_ctx ctx = {
391                 .buf = buf,
392                 .off = off,
393                 .len = len,
394         };
395 
396         trace_bpf_testmod_test_write_bare(current, &ctx);
397 
398         return -EIO; /* always fail */
399 }
400 EXPORT_SYMBOL(bpf_testmod_test_write);
401 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
402 
403 noinline int bpf_fentry_shadow_test(int a)
404 {
405         return a + 2;
406 }
407 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
408 
409 __bpf_hook_end();
410 
411 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
412         .attr = { .name = "bpf_testmod", .mode = 0666, },
413         .read = bpf_testmod_test_read,
414         .write = bpf_testmod_test_write,
415 };
416 
417 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
418  * please see test_uretprobe_regs_change test
419  */
420 #ifdef __x86_64__
421 
422 static int
423 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
424                    struct pt_regs *regs)
425 
426 {
427         regs->ax  = 0x12345678deadbeef;
428         regs->cx  = 0x87654321feebdaed;
429         regs->r11 = (u64) -1;
430         return true;
431 }
432 
433 struct testmod_uprobe {
434         struct path path;
435         loff_t offset;
436         struct uprobe_consumer consumer;
437 };
438 
439 static DEFINE_MUTEX(testmod_uprobe_mutex);
440 
441 static struct testmod_uprobe uprobe = {
442         .consumer.ret_handler = uprobe_ret_handler,
443 };
444 
445 static int testmod_register_uprobe(loff_t offset)
446 {
447         int err = -EBUSY;
448 
449         if (uprobe.offset)
450                 return -EBUSY;
451 
452         mutex_lock(&testmod_uprobe_mutex);
453 
454         if (uprobe.offset)
455                 goto out;
456 
457         err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
458         if (err)
459                 goto out;
460 
461         err = uprobe_register_refctr(d_real_inode(uprobe.path.dentry),
462                                      offset, 0, &uprobe.consumer);
463         if (err)
464                 path_put(&uprobe.path);
465         else
466                 uprobe.offset = offset;
467 
468 out:
469         mutex_unlock(&testmod_uprobe_mutex);
470         return err;
471 }
472 
473 static void testmod_unregister_uprobe(void)
474 {
475         mutex_lock(&testmod_uprobe_mutex);
476 
477         if (uprobe.offset) {
478                 uprobe_unregister(d_real_inode(uprobe.path.dentry),
479                                   uprobe.offset, &uprobe.consumer);
480                 path_put(&uprobe.path);
481                 uprobe.offset = 0;
482         }
483 
484         mutex_unlock(&testmod_uprobe_mutex);
485 }
486 
487 static ssize_t
488 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
489                          struct bin_attribute *bin_attr,
490                          char *buf, loff_t off, size_t len)
491 {
492         unsigned long offset = 0;
493         int err = 0;
494 
495         if (kstrtoul(buf, 0, &offset))
496                 return -EINVAL;
497 
498         if (offset)
499                 err = testmod_register_uprobe(offset);
500         else
501                 testmod_unregister_uprobe();
502 
503         return err ?: strlen(buf);
504 }
505 
506 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
507         .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
508         .write = bpf_testmod_uprobe_write,
509 };
510 
511 static int register_bpf_testmod_uprobe(void)
512 {
513         return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
514 }
515 
516 static void unregister_bpf_testmod_uprobe(void)
517 {
518         testmod_unregister_uprobe();
519         sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
520 }
521 
522 #else
523 static int register_bpf_testmod_uprobe(void)
524 {
525         return 0;
526 }
527 
528 static void unregister_bpf_testmod_uprobe(void) { }
529 #endif
530 
531 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
532 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
533 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
534 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
535 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
536 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
537 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
538 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
539 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
540 
541 BTF_ID_LIST(bpf_testmod_dtor_ids)
542 BTF_ID(struct, bpf_testmod_ctx)
543 BTF_ID(func, bpf_testmod_ctx_release)
544 
545 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
546         .owner = THIS_MODULE,
547         .set   = &bpf_testmod_common_kfunc_ids,
548 };
549 
550 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
551 {
552         return a + b + c + d;
553 }
554 
555 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
556 {
557         return a + b;
558 }
559 
560 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
561 {
562         return sk;
563 }
564 
565 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
566 {
567         /* Provoke the compiler to assume that the caller has sign-extended a,
568          * b and c on platforms where this is required (e.g. s390x).
569          */
570         return (long)a + (long)b + (long)c + d;
571 }
572 
573 static struct prog_test_ref_kfunc prog_test_struct = {
574         .a = 42,
575         .b = 108,
576         .next = &prog_test_struct,
577         .cnt = REFCOUNT_INIT(1),
578 };
579 
580 __bpf_kfunc struct prog_test_ref_kfunc *
581 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
582 {
583         refcount_inc(&prog_test_struct.cnt);
584         return &prog_test_struct;
585 }
586 
587 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
588 {
589         WARN_ON_ONCE(1);
590 }
591 
592 __bpf_kfunc struct prog_test_member *
593 bpf_kfunc_call_memb_acquire(void)
594 {
595         WARN_ON_ONCE(1);
596         return NULL;
597 }
598 
599 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
600 {
601         WARN_ON_ONCE(1);
602 }
603 
604 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
605 {
606         if (size > 2 * sizeof(int))
607                 return NULL;
608 
609         return (int *)p;
610 }
611 
612 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
613                                                   const int rdwr_buf_size)
614 {
615         return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
616 }
617 
618 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
619                                                     const int rdonly_buf_size)
620 {
621         return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
622 }
623 
624 /* the next 2 ones can't be really used for testing expect to ensure
625  * that the verifier rejects the call.
626  * Acquire functions must return struct pointers, so these ones are
627  * failing.
628  */
629 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
630                                                     const int rdonly_buf_size)
631 {
632         return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
633 }
634 
635 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
636 {
637 }
638 
639 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
640 {
641 }
642 
643 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
644 {
645 }
646 
647 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
648 {
649 }
650 
651 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
652 {
653 }
654 
655 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
656 {
657 }
658 
659 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
660 {
661 }
662 
663 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
664 {
665 }
666 
667 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
668 {
669 }
670 
671 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
672 {
673 }
674 
675 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
676 {
677         /* p != NULL, but p->cnt could be 0 */
678 }
679 
680 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
681 {
682 }
683 
684 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
685 {
686         return arg;
687 }
688 
689 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
690 {
691 }
692 
693 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
694 {
695         int proto;
696         int err;
697 
698         mutex_lock(&sock_lock);
699 
700         if (sock) {
701                 pr_err("%s called without releasing old sock", __func__);
702                 err = -EPERM;
703                 goto out;
704         }
705 
706         switch (args->af) {
707         case AF_INET:
708         case AF_INET6:
709                 proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
710                 break;
711         case AF_UNIX:
712                 proto = PF_UNIX;
713                 break;
714         default:
715                 pr_err("invalid address family %d\n", args->af);
716                 err = -EINVAL;
717                 goto out;
718         }
719 
720         err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
721                                proto, &sock);
722 
723         if (!err)
724                 /* Set timeout for call to kernel_connect() to prevent it from hanging,
725                  * and consider the connection attempt failed if it returns
726                  * -EINPROGRESS.
727                  */
728                 sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
729 out:
730         mutex_unlock(&sock_lock);
731 
732         return err;
733 }
734 
735 __bpf_kfunc void bpf_kfunc_close_sock(void)
736 {
737         mutex_lock(&sock_lock);
738 
739         if (sock) {
740                 sock_release(sock);
741                 sock = NULL;
742         }
743 
744         mutex_unlock(&sock_lock);
745 }
746 
747 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
748 {
749         int err;
750 
751         if (args->addrlen > sizeof(args->addr))
752                 return -EINVAL;
753 
754         mutex_lock(&sock_lock);
755 
756         if (!sock) {
757                 pr_err("%s called without initializing sock", __func__);
758                 err = -EPERM;
759                 goto out;
760         }
761 
762         err = kernel_connect(sock, (struct sockaddr *)&args->addr,
763                              args->addrlen, 0);
764 out:
765         mutex_unlock(&sock_lock);
766 
767         return err;
768 }
769 
770 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
771 {
772         int err;
773 
774         if (args->addrlen > sizeof(args->addr))
775                 return -EINVAL;
776 
777         mutex_lock(&sock_lock);
778 
779         if (!sock) {
780                 pr_err("%s called without initializing sock", __func__);
781                 err = -EPERM;
782                 goto out;
783         }
784 
785         err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
786 out:
787         mutex_unlock(&sock_lock);
788 
789         return err;
790 }
791 
792 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
793 {
794         int err;
795 
796         mutex_lock(&sock_lock);
797 
798         if (!sock) {
799                 pr_err("%s called without initializing sock", __func__);
800                 err = -EPERM;
801                 goto out;
802         }
803 
804         err = kernel_listen(sock, 128);
805 out:
806         mutex_unlock(&sock_lock);
807 
808         return err;
809 }
810 
811 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
812 {
813         struct msghdr msg = {
814                 .msg_name       = &args->addr.addr,
815                 .msg_namelen    = args->addr.addrlen,
816         };
817         struct kvec iov;
818         int err;
819 
820         if (args->addr.addrlen > sizeof(args->addr.addr) ||
821             args->msglen > sizeof(args->msg))
822                 return -EINVAL;
823 
824         iov.iov_base = args->msg;
825         iov.iov_len  = args->msglen;
826 
827         mutex_lock(&sock_lock);
828 
829         if (!sock) {
830                 pr_err("%s called without initializing sock", __func__);
831                 err = -EPERM;
832                 goto out;
833         }
834 
835         err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
836         args->addr.addrlen = msg.msg_namelen;
837 out:
838         mutex_unlock(&sock_lock);
839 
840         return err;
841 }
842 
843 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
844 {
845         struct msghdr msg = {
846                 .msg_name       = &args->addr.addr,
847                 .msg_namelen    = args->addr.addrlen,
848         };
849         struct kvec iov;
850         int err;
851 
852         if (args->addr.addrlen > sizeof(args->addr.addr) ||
853             args->msglen > sizeof(args->msg))
854                 return -EINVAL;
855 
856         iov.iov_base = args->msg;
857         iov.iov_len  = args->msglen;
858 
859         iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
860         mutex_lock(&sock_lock);
861 
862         if (!sock) {
863                 pr_err("%s called without initializing sock", __func__);
864                 err = -EPERM;
865                 goto out;
866         }
867 
868         err = sock_sendmsg(sock, &msg);
869         args->addr.addrlen = msg.msg_namelen;
870 out:
871         mutex_unlock(&sock_lock);
872 
873         return err;
874 }
875 
876 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
877 {
878         int err;
879 
880         mutex_lock(&sock_lock);
881 
882         if (!sock) {
883                 pr_err("%s called without initializing sock", __func__);
884                 err = -EPERM;
885                 goto out;
886         }
887 
888         err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
889         if (err < 0)
890                 goto out;
891 
892         args->addrlen = err;
893         err = 0;
894 out:
895         mutex_unlock(&sock_lock);
896 
897         return err;
898 }
899 
900 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
901 {
902         int err;
903 
904         mutex_lock(&sock_lock);
905 
906         if (!sock) {
907                 pr_err("%s called without initializing sock", __func__);
908                 err = -EPERM;
909                 goto out;
910         }
911 
912         err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
913         if (err < 0)
914                 goto out;
915 
916         args->addrlen = err;
917         err = 0;
918 out:
919         mutex_unlock(&sock_lock);
920 
921         return err;
922 }
923 
924 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
925 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
926 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
927 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
928 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
929 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
930 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
931 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
932 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
933 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
934 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
935 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
936 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
937 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
938 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
939 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
940 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
941 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
942 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
943 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
944 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
945 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
946 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
947 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
948 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
949 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
950 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
951 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
952 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
953 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
954 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
955 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
956 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
957 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
958 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
959 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
960 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
961 
962 static int bpf_testmod_ops_init(struct btf *btf)
963 {
964         return 0;
965 }
966 
967 static bool bpf_testmod_ops_is_valid_access(int off, int size,
968                                             enum bpf_access_type type,
969                                             const struct bpf_prog *prog,
970                                             struct bpf_insn_access_aux *info)
971 {
972         return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
973 }
974 
975 static int bpf_testmod_ops_init_member(const struct btf_type *t,
976                                        const struct btf_member *member,
977                                        void *kdata, const void *udata)
978 {
979         if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
980                 /* For data fields, this function has to copy it and return
981                  * 1 to indicate that the data has been handled by the
982                  * struct_ops type, or the verifier will reject the map if
983                  * the value of the data field is not zero.
984                  */
985                 ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
986                 return 1;
987         }
988         return 0;
989 }
990 
991 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
992         .owner = THIS_MODULE,
993         .set   = &bpf_testmod_check_kfunc_ids,
994 };
995 
996 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
997         .is_valid_access = bpf_testmod_ops_is_valid_access,
998 };
999 
1000 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1001 {
1002         struct bpf_testmod_ops *ops = kdata;
1003 
1004         if (ops->test_1)
1005                 ops->test_1();
1006         /* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1007          * initialized, so we need to check for NULL.
1008          */
1009         if (ops->test_2)
1010                 ops->test_2(4, ops->data);
1011 
1012         return 0;
1013 }
1014 
1015 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1016 {
1017 }
1018 
1019 static int bpf_testmod_test_1(void)
1020 {
1021         return 0;
1022 }
1023 
1024 static void bpf_testmod_test_2(int a, int b)
1025 {
1026 }
1027 
1028 static int bpf_testmod_ops__test_maybe_null(int dummy,
1029                                             struct task_struct *task__nullable)
1030 {
1031         return 0;
1032 }
1033 
1034 static struct bpf_testmod_ops __bpf_testmod_ops = {
1035         .test_1 = bpf_testmod_test_1,
1036         .test_2 = bpf_testmod_test_2,
1037         .test_maybe_null = bpf_testmod_ops__test_maybe_null,
1038 };
1039 
1040 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1041         .verifier_ops = &bpf_testmod_verifier_ops,
1042         .init = bpf_testmod_ops_init,
1043         .init_member = bpf_testmod_ops_init_member,
1044         .reg = bpf_dummy_reg,
1045         .unreg = bpf_dummy_unreg,
1046         .cfi_stubs = &__bpf_testmod_ops,
1047         .name = "bpf_testmod_ops",
1048         .owner = THIS_MODULE,
1049 };
1050 
1051 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1052 {
1053         struct bpf_testmod_ops2 *ops = kdata;
1054 
1055         ops->test_1();
1056         return 0;
1057 }
1058 
1059 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1060         .test_1 = bpf_testmod_test_1,
1061 };
1062 
1063 struct bpf_struct_ops bpf_testmod_ops2 = {
1064         .verifier_ops = &bpf_testmod_verifier_ops,
1065         .init = bpf_testmod_ops_init,
1066         .init_member = bpf_testmod_ops_init_member,
1067         .reg = bpf_dummy_reg2,
1068         .unreg = bpf_dummy_unreg,
1069         .cfi_stubs = &__bpf_testmod_ops2,
1070         .name = "bpf_testmod_ops2",
1071         .owner = THIS_MODULE,
1072 };
1073 
1074 extern int bpf_fentry_test1(int a);
1075 
1076 static int bpf_testmod_init(void)
1077 {
1078         const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1079                 {
1080                         .btf_id         = bpf_testmod_dtor_ids[0],
1081                         .kfunc_btf_id   = bpf_testmod_dtor_ids[1]
1082                 },
1083         };
1084         int ret;
1085 
1086         ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1087         ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1088         ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1089         ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1090         ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1091         ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1092         ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1093                                                  ARRAY_SIZE(bpf_testmod_dtors),
1094                                                  THIS_MODULE);
1095         if (ret < 0)
1096                 return ret;
1097         if (bpf_fentry_test1(0) < 0)
1098                 return -EINVAL;
1099         sock = NULL;
1100         mutex_init(&sock_lock);
1101         ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1102         if (ret < 0)
1103                 return ret;
1104         ret = register_bpf_testmod_uprobe();
1105         if (ret < 0)
1106                 return ret;
1107         return 0;
1108 }
1109 
1110 static void bpf_testmod_exit(void)
1111 {
1112         /* Need to wait for all references to be dropped because
1113          * bpf_kfunc_call_test_release() which currently resides in kernel can
1114          * be called after bpf_testmod is unloaded. Once release function is
1115          * moved into the module this wait can be removed.
1116          */
1117         while (refcount_read(&prog_test_struct.cnt) > 1)
1118                 msleep(20);
1119 
1120         bpf_kfunc_close_sock();
1121         sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1122         unregister_bpf_testmod_uprobe();
1123 }
1124 
1125 module_init(bpf_testmod_init);
1126 module_exit(bpf_testmod_exit);
1127 
1128 MODULE_AUTHOR("Andrii Nakryiko");
1129 MODULE_DESCRIPTION("BPF selftests module");
1130 MODULE_LICENSE("Dual BSD/GPL");
1131 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php