1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/btf.h> 6 #include <linux/bpf-cgroup.h> 7 #include <linux/cgroup.h> 8 #include <linux/rcupdate.h> 9 #include <linux/random.h> 10 #include <linux/smp.h> 11 #include <linux/topology.h> 12 #include <linux/ktime.h> 13 #include <linux/sched.h> 14 #include <linux/uidgid.h> 15 #include <linux/filter.h> 16 #include <linux/ctype.h> 17 #include <linux/jiffies.h> 18 #include <linux/pid_namespace.h> 19 #include <linux/poison.h> 20 #include <linux/proc_ns.h> 21 #include <linux/sched/task.h> 22 #include <linux/security.h> 23 #include <linux/btf_ids.h> 24 #include <linux/bpf_mem_alloc.h> 25 #include <linux/kasan.h> 26 27 #include "../../lib/kstrtox.h" 28 29 /* If kernel subsystem is allowing eBPF programs to call this function, 30 * inside its own verifier_ops->get_func_proto() callback it should return 31 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments 32 * 33 * Different map implementations will rely on rcu in map methods 34 * lookup/update/delete, therefore eBPF programs must run under rcu lock 35 * if program is allowed to access maps, so check rcu_read_lock_held() or 36 * rcu_read_lock_trace_held() in all three functions. 37 */ 38 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 39 { 40 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 41 !rcu_read_lock_bh_held()); 42 return (unsigned long) map->ops->map_lookup_elem(map, key); 43 } 44 45 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 46 .func = bpf_map_lookup_elem, 47 .gpl_only = false, 48 .pkt_access = true, 49 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 50 .arg1_type = ARG_CONST_MAP_PTR, 51 .arg2_type = ARG_PTR_TO_MAP_KEY, 52 }; 53 54 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 55 void *, value, u64, flags) 56 { 57 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 58 !rcu_read_lock_bh_held()); 59 return map->ops->map_update_elem(map, key, value, flags); 60 } 61 62 const struct bpf_func_proto bpf_map_update_elem_proto = { 63 .func = bpf_map_update_elem, 64 .gpl_only = false, 65 .pkt_access = true, 66 .ret_type = RET_INTEGER, 67 .arg1_type = ARG_CONST_MAP_PTR, 68 .arg2_type = ARG_PTR_TO_MAP_KEY, 69 .arg3_type = ARG_PTR_TO_MAP_VALUE, 70 .arg4_type = ARG_ANYTHING, 71 }; 72 73 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 74 { 75 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 76 !rcu_read_lock_bh_held()); 77 return map->ops->map_delete_elem(map, key); 78 } 79 80 const struct bpf_func_proto bpf_map_delete_elem_proto = { 81 .func = bpf_map_delete_elem, 82 .gpl_only = false, 83 .pkt_access = true, 84 .ret_type = RET_INTEGER, 85 .arg1_type = ARG_CONST_MAP_PTR, 86 .arg2_type = ARG_PTR_TO_MAP_KEY, 87 }; 88 89 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) 90 { 91 return map->ops->map_push_elem(map, value, flags); 92 } 93 94 const struct bpf_func_proto bpf_map_push_elem_proto = { 95 .func = bpf_map_push_elem, 96 .gpl_only = false, 97 .pkt_access = true, 98 .ret_type = RET_INTEGER, 99 .arg1_type = ARG_CONST_MAP_PTR, 100 .arg2_type = ARG_PTR_TO_MAP_VALUE, 101 .arg3_type = ARG_ANYTHING, 102 }; 103 104 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) 105 { 106 return map->ops->map_pop_elem(map, value); 107 } 108 109 const struct bpf_func_proto bpf_map_pop_elem_proto = { 110 .func = bpf_map_pop_elem, 111 .gpl_only = false, 112 .ret_type = RET_INTEGER, 113 .arg1_type = ARG_CONST_MAP_PTR, 114 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, 115 }; 116 117 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) 118 { 119 return map->ops->map_peek_elem(map, value); 120 } 121 122 const struct bpf_func_proto bpf_map_peek_elem_proto = { 123 .func = bpf_map_peek_elem, 124 .gpl_only = false, 125 .ret_type = RET_INTEGER, 126 .arg1_type = ARG_CONST_MAP_PTR, 127 .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, 128 }; 129 130 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) 131 { 132 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 133 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); 134 } 135 136 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { 137 .func = bpf_map_lookup_percpu_elem, 138 .gpl_only = false, 139 .pkt_access = true, 140 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 141 .arg1_type = ARG_CONST_MAP_PTR, 142 .arg2_type = ARG_PTR_TO_MAP_KEY, 143 .arg3_type = ARG_ANYTHING, 144 }; 145 146 const struct bpf_func_proto bpf_get_prandom_u32_proto = { 147 .func = bpf_user_rnd_u32, 148 .gpl_only = false, 149 .ret_type = RET_INTEGER, 150 }; 151 152 BPF_CALL_0(bpf_get_smp_processor_id) 153 { 154 return smp_processor_id(); 155 } 156 157 const struct bpf_func_proto bpf_get_smp_processor_id_proto = { 158 .func = bpf_get_smp_processor_id, 159 .gpl_only = false, 160 .ret_type = RET_INTEGER, 161 }; 162 163 BPF_CALL_0(bpf_get_numa_node_id) 164 { 165 return numa_node_id(); 166 } 167 168 const struct bpf_func_proto bpf_get_numa_node_id_proto = { 169 .func = bpf_get_numa_node_id, 170 .gpl_only = false, 171 .ret_type = RET_INTEGER, 172 }; 173 174 BPF_CALL_0(bpf_ktime_get_ns) 175 { 176 /* NMI safe access to clock monotonic */ 177 return ktime_get_mono_fast_ns(); 178 } 179 180 const struct bpf_func_proto bpf_ktime_get_ns_proto = { 181 .func = bpf_ktime_get_ns, 182 .gpl_only = false, 183 .ret_type = RET_INTEGER, 184 }; 185 186 BPF_CALL_0(bpf_ktime_get_boot_ns) 187 { 188 /* NMI safe access to clock boottime */ 189 return ktime_get_boot_fast_ns(); 190 } 191 192 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { 193 .func = bpf_ktime_get_boot_ns, 194 .gpl_only = false, 195 .ret_type = RET_INTEGER, 196 }; 197 198 BPF_CALL_0(bpf_ktime_get_coarse_ns) 199 { 200 return ktime_get_coarse_ns(); 201 } 202 203 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { 204 .func = bpf_ktime_get_coarse_ns, 205 .gpl_only = false, 206 .ret_type = RET_INTEGER, 207 }; 208 209 BPF_CALL_0(bpf_ktime_get_tai_ns) 210 { 211 /* NMI safe access to clock tai */ 212 return ktime_get_tai_fast_ns(); 213 } 214 215 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { 216 .func = bpf_ktime_get_tai_ns, 217 .gpl_only = false, 218 .ret_type = RET_INTEGER, 219 }; 220 221 BPF_CALL_0(bpf_get_current_pid_tgid) 222 { 223 struct task_struct *task = current; 224 225 if (unlikely(!task)) 226 return -EINVAL; 227 228 return (u64) task->tgid << 32 | task->pid; 229 } 230 231 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { 232 .func = bpf_get_current_pid_tgid, 233 .gpl_only = false, 234 .ret_type = RET_INTEGER, 235 }; 236 237 BPF_CALL_0(bpf_get_current_uid_gid) 238 { 239 struct task_struct *task = current; 240 kuid_t uid; 241 kgid_t gid; 242 243 if (unlikely(!task)) 244 return -EINVAL; 245 246 current_uid_gid(&uid, &gid); 247 return (u64) from_kgid(&init_user_ns, gid) << 32 | 248 from_kuid(&init_user_ns, uid); 249 } 250 251 const struct bpf_func_proto bpf_get_current_uid_gid_proto = { 252 .func = bpf_get_current_uid_gid, 253 .gpl_only = false, 254 .ret_type = RET_INTEGER, 255 }; 256 257 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) 258 { 259 struct task_struct *task = current; 260 261 if (unlikely(!task)) 262 goto err_clear; 263 264 /* Verifier guarantees that size > 0 */ 265 strscpy_pad(buf, task->comm, size); 266 return 0; 267 err_clear: 268 memset(buf, 0, size); 269 return -EINVAL; 270 } 271 272 const struct bpf_func_proto bpf_get_current_comm_proto = { 273 .func = bpf_get_current_comm, 274 .gpl_only = false, 275 .ret_type = RET_INTEGER, 276 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 277 .arg2_type = ARG_CONST_SIZE, 278 }; 279 280 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) 281 282 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 283 { 284 arch_spinlock_t *l = (void *)lock; 285 union { 286 __u32 val; 287 arch_spinlock_t lock; 288 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; 289 290 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); 291 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); 292 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); 293 preempt_disable(); 294 arch_spin_lock(l); 295 } 296 297 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 298 { 299 arch_spinlock_t *l = (void *)lock; 300 301 arch_spin_unlock(l); 302 preempt_enable(); 303 } 304 305 #else 306 307 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) 308 { 309 atomic_t *l = (void *)lock; 310 311 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); 312 do { 313 atomic_cond_read_relaxed(l, !VAL); 314 } while (atomic_xchg(l, 1)); 315 } 316 317 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) 318 { 319 atomic_t *l = (void *)lock; 320 321 atomic_set_release(l, 0); 322 } 323 324 #endif 325 326 static DEFINE_PER_CPU(unsigned long, irqsave_flags); 327 328 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) 329 { 330 unsigned long flags; 331 332 local_irq_save(flags); 333 __bpf_spin_lock(lock); 334 __this_cpu_write(irqsave_flags, flags); 335 } 336 337 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) 338 { 339 __bpf_spin_lock_irqsave(lock); 340 return 0; 341 } 342 343 const struct bpf_func_proto bpf_spin_lock_proto = { 344 .func = bpf_spin_lock, 345 .gpl_only = false, 346 .ret_type = RET_VOID, 347 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 348 .arg1_btf_id = BPF_PTR_POISON, 349 }; 350 351 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) 352 { 353 unsigned long flags; 354 355 flags = __this_cpu_read(irqsave_flags); 356 __bpf_spin_unlock(lock); 357 local_irq_restore(flags); 358 } 359 360 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) 361 { 362 __bpf_spin_unlock_irqrestore(lock); 363 return 0; 364 } 365 366 const struct bpf_func_proto bpf_spin_unlock_proto = { 367 .func = bpf_spin_unlock, 368 .gpl_only = false, 369 .ret_type = RET_VOID, 370 .arg1_type = ARG_PTR_TO_SPIN_LOCK, 371 .arg1_btf_id = BPF_PTR_POISON, 372 }; 373 374 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 375 bool lock_src) 376 { 377 struct bpf_spin_lock *lock; 378 379 if (lock_src) 380 lock = src + map->record->spin_lock_off; 381 else 382 lock = dst + map->record->spin_lock_off; 383 preempt_disable(); 384 __bpf_spin_lock_irqsave(lock); 385 copy_map_value(map, dst, src); 386 __bpf_spin_unlock_irqrestore(lock); 387 preempt_enable(); 388 } 389 390 BPF_CALL_0(bpf_jiffies64) 391 { 392 return get_jiffies_64(); 393 } 394 395 const struct bpf_func_proto bpf_jiffies64_proto = { 396 .func = bpf_jiffies64, 397 .gpl_only = false, 398 .ret_type = RET_INTEGER, 399 }; 400 401 #ifdef CONFIG_CGROUPS 402 BPF_CALL_0(bpf_get_current_cgroup_id) 403 { 404 struct cgroup *cgrp; 405 u64 cgrp_id; 406 407 rcu_read_lock(); 408 cgrp = task_dfl_cgroup(current); 409 cgrp_id = cgroup_id(cgrp); 410 rcu_read_unlock(); 411 412 return cgrp_id; 413 } 414 415 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { 416 .func = bpf_get_current_cgroup_id, 417 .gpl_only = false, 418 .ret_type = RET_INTEGER, 419 }; 420 421 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) 422 { 423 struct cgroup *cgrp; 424 struct cgroup *ancestor; 425 u64 cgrp_id; 426 427 rcu_read_lock(); 428 cgrp = task_dfl_cgroup(current); 429 ancestor = cgroup_ancestor(cgrp, ancestor_level); 430 cgrp_id = ancestor ? cgroup_id(ancestor) : 0; 431 rcu_read_unlock(); 432 433 return cgrp_id; 434 } 435 436 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { 437 .func = bpf_get_current_ancestor_cgroup_id, 438 .gpl_only = false, 439 .ret_type = RET_INTEGER, 440 .arg1_type = ARG_ANYTHING, 441 }; 442 #endif /* CONFIG_CGROUPS */ 443 444 #define BPF_STRTOX_BASE_MASK 0x1F 445 446 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, 447 unsigned long long *res, bool *is_negative) 448 { 449 unsigned int base = flags & BPF_STRTOX_BASE_MASK; 450 const char *cur_buf = buf; 451 size_t cur_len = buf_len; 452 unsigned int consumed; 453 size_t val_len; 454 char str[64]; 455 456 if (!buf || !buf_len || !res || !is_negative) 457 return -EINVAL; 458 459 if (base != 0 && base != 8 && base != 10 && base != 16) 460 return -EINVAL; 461 462 if (flags & ~BPF_STRTOX_BASE_MASK) 463 return -EINVAL; 464 465 while (cur_buf < buf + buf_len && isspace(*cur_buf)) 466 ++cur_buf; 467 468 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); 469 if (*is_negative) 470 ++cur_buf; 471 472 consumed = cur_buf - buf; 473 cur_len -= consumed; 474 if (!cur_len) 475 return -EINVAL; 476 477 cur_len = min(cur_len, sizeof(str) - 1); 478 memcpy(str, cur_buf, cur_len); 479 str[cur_len] = '\0'; 480 cur_buf = str; 481 482 cur_buf = _parse_integer_fixup_radix(cur_buf, &base); 483 val_len = _parse_integer(cur_buf, base, res); 484 485 if (val_len & KSTRTOX_OVERFLOW) 486 return -ERANGE; 487 488 if (val_len == 0) 489 return -EINVAL; 490 491 cur_buf += val_len; 492 consumed += cur_buf - str; 493 494 return consumed; 495 } 496 497 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, 498 long long *res) 499 { 500 unsigned long long _res; 501 bool is_negative; 502 int err; 503 504 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 505 if (err < 0) 506 return err; 507 if (is_negative) { 508 if ((long long)-_res > 0) 509 return -ERANGE; 510 *res = -_res; 511 } else { 512 if ((long long)_res < 0) 513 return -ERANGE; 514 *res = _res; 515 } 516 return err; 517 } 518 519 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, 520 s64 *, res) 521 { 522 long long _res; 523 int err; 524 525 *res = 0; 526 err = __bpf_strtoll(buf, buf_len, flags, &_res); 527 if (err < 0) 528 return err; 529 if (_res != (long)_res) 530 return -ERANGE; 531 *res = _res; 532 return err; 533 } 534 535 const struct bpf_func_proto bpf_strtol_proto = { 536 .func = bpf_strtol, 537 .gpl_only = false, 538 .ret_type = RET_INTEGER, 539 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 540 .arg2_type = ARG_CONST_SIZE, 541 .arg3_type = ARG_ANYTHING, 542 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, 543 .arg4_size = sizeof(s64), 544 }; 545 546 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, 547 u64 *, res) 548 { 549 unsigned long long _res; 550 bool is_negative; 551 int err; 552 553 *res = 0; 554 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); 555 if (err < 0) 556 return err; 557 if (is_negative) 558 return -EINVAL; 559 if (_res != (unsigned long)_res) 560 return -ERANGE; 561 *res = _res; 562 return err; 563 } 564 565 const struct bpf_func_proto bpf_strtoul_proto = { 566 .func = bpf_strtoul, 567 .gpl_only = false, 568 .ret_type = RET_INTEGER, 569 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 570 .arg2_type = ARG_CONST_SIZE, 571 .arg3_type = ARG_ANYTHING, 572 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, 573 .arg4_size = sizeof(u64), 574 }; 575 576 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) 577 { 578 return strncmp(s1, s2, s1_sz); 579 } 580 581 static const struct bpf_func_proto bpf_strncmp_proto = { 582 .func = bpf_strncmp, 583 .gpl_only = false, 584 .ret_type = RET_INTEGER, 585 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 586 .arg2_type = ARG_CONST_SIZE, 587 .arg3_type = ARG_PTR_TO_CONST_STR, 588 }; 589 590 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, 591 struct bpf_pidns_info *, nsdata, u32, size) 592 { 593 struct task_struct *task = current; 594 struct pid_namespace *pidns; 595 int err = -EINVAL; 596 597 if (unlikely(size != sizeof(struct bpf_pidns_info))) 598 goto clear; 599 600 if (unlikely((u64)(dev_t)dev != dev)) 601 goto clear; 602 603 if (unlikely(!task)) 604 goto clear; 605 606 pidns = task_active_pid_ns(task); 607 if (unlikely(!pidns)) { 608 err = -ENOENT; 609 goto clear; 610 } 611 612 if (!ns_match(&pidns->ns, (dev_t)dev, ino)) 613 goto clear; 614 615 nsdata->pid = task_pid_nr_ns(task, pidns); 616 nsdata->tgid = task_tgid_nr_ns(task, pidns); 617 return 0; 618 clear: 619 memset((void *)nsdata, 0, (size_t) size); 620 return err; 621 } 622 623 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { 624 .func = bpf_get_ns_current_pid_tgid, 625 .gpl_only = false, 626 .ret_type = RET_INTEGER, 627 .arg1_type = ARG_ANYTHING, 628 .arg2_type = ARG_ANYTHING, 629 .arg3_type = ARG_PTR_TO_UNINIT_MEM, 630 .arg4_type = ARG_CONST_SIZE, 631 }; 632 633 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { 634 .func = bpf_get_raw_cpu_id, 635 .gpl_only = false, 636 .ret_type = RET_INTEGER, 637 }; 638 639 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, 640 u64, flags, void *, data, u64, size) 641 { 642 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 643 return -EINVAL; 644 645 return bpf_event_output(map, flags, data, size, NULL, 0, NULL); 646 } 647 648 const struct bpf_func_proto bpf_event_output_data_proto = { 649 .func = bpf_event_output_data, 650 .gpl_only = true, 651 .ret_type = RET_INTEGER, 652 .arg1_type = ARG_PTR_TO_CTX, 653 .arg2_type = ARG_CONST_MAP_PTR, 654 .arg3_type = ARG_ANYTHING, 655 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 656 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 657 }; 658 659 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, 660 const void __user *, user_ptr) 661 { 662 int ret = copy_from_user(dst, user_ptr, size); 663 664 if (unlikely(ret)) { 665 memset(dst, 0, size); 666 ret = -EFAULT; 667 } 668 669 return ret; 670 } 671 672 const struct bpf_func_proto bpf_copy_from_user_proto = { 673 .func = bpf_copy_from_user, 674 .gpl_only = false, 675 .might_sleep = true, 676 .ret_type = RET_INTEGER, 677 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 678 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 679 .arg3_type = ARG_ANYTHING, 680 }; 681 682 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, 683 const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) 684 { 685 int ret; 686 687 /* flags is not used yet */ 688 if (unlikely(flags)) 689 return -EINVAL; 690 691 if (unlikely(!size)) 692 return 0; 693 694 ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); 695 if (ret == size) 696 return 0; 697 698 memset(dst, 0, size); 699 /* Return -EFAULT for partial read */ 700 return ret < 0 ? ret : -EFAULT; 701 } 702 703 const struct bpf_func_proto bpf_copy_from_user_task_proto = { 704 .func = bpf_copy_from_user_task, 705 .gpl_only = true, 706 .might_sleep = true, 707 .ret_type = RET_INTEGER, 708 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 709 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 710 .arg3_type = ARG_ANYTHING, 711 .arg4_type = ARG_PTR_TO_BTF_ID, 712 .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 713 .arg5_type = ARG_ANYTHING 714 }; 715 716 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) 717 { 718 if (cpu >= nr_cpu_ids) 719 return (unsigned long)NULL; 720 721 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); 722 } 723 724 const struct bpf_func_proto bpf_per_cpu_ptr_proto = { 725 .func = bpf_per_cpu_ptr, 726 .gpl_only = false, 727 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, 728 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 729 .arg2_type = ARG_ANYTHING, 730 }; 731 732 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) 733 { 734 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr); 735 } 736 737 const struct bpf_func_proto bpf_this_cpu_ptr_proto = { 738 .func = bpf_this_cpu_ptr, 739 .gpl_only = false, 740 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, 741 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, 742 }; 743 744 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, 745 size_t bufsz) 746 { 747 void __user *user_ptr = (__force void __user *)unsafe_ptr; 748 749 buf[0] = 0; 750 751 switch (fmt_ptype) { 752 case 's': 753 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 754 if ((unsigned long)unsafe_ptr < TASK_SIZE) 755 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 756 fallthrough; 757 #endif 758 case 'k': 759 return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); 760 case 'u': 761 return strncpy_from_user_nofault(buf, user_ptr, bufsz); 762 } 763 764 return -EINVAL; 765 } 766 767 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary 768 * arguments representation. 769 */ 770 #define MAX_BPRINTF_BIN_ARGS 512 771 772 /* Support executing three nested bprintf helper calls on a given CPU */ 773 #define MAX_BPRINTF_NEST_LEVEL 3 774 struct bpf_bprintf_buffers { 775 char bin_args[MAX_BPRINTF_BIN_ARGS]; 776 char buf[MAX_BPRINTF_BUF]; 777 }; 778 779 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); 780 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); 781 782 static int try_get_buffers(struct bpf_bprintf_buffers **bufs) 783 { 784 int nest_level; 785 786 preempt_disable(); 787 nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); 788 if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { 789 this_cpu_dec(bpf_bprintf_nest_level); 790 preempt_enable(); 791 return -EBUSY; 792 } 793 *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); 794 795 return 0; 796 } 797 798 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) 799 { 800 if (!data->bin_args && !data->buf) 801 return; 802 if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) 803 return; 804 this_cpu_dec(bpf_bprintf_nest_level); 805 preempt_enable(); 806 } 807 808 /* 809 * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers 810 * 811 * Returns a negative value if fmt is an invalid format string or 0 otherwise. 812 * 813 * This can be used in two ways: 814 * - Format string verification only: when data->get_bin_args is false 815 * - Arguments preparation: in addition to the above verification, it writes in 816 * data->bin_args a binary representation of arguments usable by bstr_printf 817 * where pointers from BPF have been sanitized. 818 * 819 * In argument preparation mode, if 0 is returned, safe temporary buffers are 820 * allocated and bpf_bprintf_cleanup should be called to free them after use. 821 */ 822 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, 823 u32 num_args, struct bpf_bprintf_data *data) 824 { 825 bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; 826 char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; 827 struct bpf_bprintf_buffers *buffers = NULL; 828 size_t sizeof_cur_arg, sizeof_cur_ip; 829 int err, i, num_spec = 0; 830 u64 cur_arg; 831 char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; 832 833 fmt_end = strnchr(fmt, fmt_size, 0); 834 if (!fmt_end) 835 return -EINVAL; 836 fmt_size = fmt_end - fmt; 837 838 if (get_buffers && try_get_buffers(&buffers)) 839 return -EBUSY; 840 841 if (data->get_bin_args) { 842 if (num_args) 843 tmp_buf = buffers->bin_args; 844 tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; 845 data->bin_args = (u32 *)tmp_buf; 846 } 847 848 if (data->get_buf) 849 data->buf = buffers->buf; 850 851 for (i = 0; i < fmt_size; i++) { 852 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { 853 err = -EINVAL; 854 goto out; 855 } 856 857 if (fmt[i] != '%') 858 continue; 859 860 if (fmt[i + 1] == '%') { 861 i++; 862 continue; 863 } 864 865 if (num_spec >= num_args) { 866 err = -EINVAL; 867 goto out; 868 } 869 870 /* The string is zero-terminated so if fmt[i] != 0, we can 871 * always access fmt[i + 1], in the worst case it will be a 0 872 */ 873 i++; 874 875 /* skip optional "[0 +-][num]" width formatting field */ 876 while (fmt[i] == '' || fmt[i] == '+' || fmt[i] == '-' || 877 fmt[i] == ' ') 878 i++; 879 if (fmt[i] >= '1' && fmt[i] <= '9') { 880 i++; 881 while (fmt[i] >= '' && fmt[i] <= '9') 882 i++; 883 } 884 885 if (fmt[i] == 'p') { 886 sizeof_cur_arg = sizeof(long); 887 888 if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && 889 fmt[i + 2] == 's') { 890 fmt_ptype = fmt[i + 1]; 891 i += 2; 892 goto fmt_str; 893 } 894 895 if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || 896 ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || 897 fmt[i + 1] == 'x' || fmt[i + 1] == 's' || 898 fmt[i + 1] == 'S') { 899 /* just kernel pointers */ 900 if (tmp_buf) 901 cur_arg = raw_args[num_spec]; 902 i++; 903 goto nocopy_fmt; 904 } 905 906 if (fmt[i + 1] == 'B') { 907 if (tmp_buf) { 908 err = snprintf(tmp_buf, 909 (tmp_buf_end - tmp_buf), 910 "%pB", 911 (void *)(long)raw_args[num_spec]); 912 tmp_buf += (err + 1); 913 } 914 915 i++; 916 num_spec++; 917 continue; 918 } 919 920 /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ 921 if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || 922 (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { 923 err = -EINVAL; 924 goto out; 925 } 926 927 i += 2; 928 if (!tmp_buf) 929 goto nocopy_fmt; 930 931 sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; 932 if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { 933 err = -ENOSPC; 934 goto out; 935 } 936 937 unsafe_ptr = (char *)(long)raw_args[num_spec]; 938 err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, 939 sizeof_cur_ip); 940 if (err < 0) 941 memset(cur_ip, 0, sizeof_cur_ip); 942 943 /* hack: bstr_printf expects IP addresses to be 944 * pre-formatted as strings, ironically, the easiest way 945 * to do that is to call snprintf. 946 */ 947 ip_spec[2] = fmt[i - 1]; 948 ip_spec[3] = fmt[i]; 949 err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, 950 ip_spec, &cur_ip); 951 952 tmp_buf += err + 1; 953 num_spec++; 954 955 continue; 956 } else if (fmt[i] == 's') { 957 fmt_ptype = fmt[i]; 958 fmt_str: 959 if (fmt[i + 1] != 0 && 960 !isspace(fmt[i + 1]) && 961 !ispunct(fmt[i + 1])) { 962 err = -EINVAL; 963 goto out; 964 } 965 966 if (!tmp_buf) 967 goto nocopy_fmt; 968 969 if (tmp_buf_end == tmp_buf) { 970 err = -ENOSPC; 971 goto out; 972 } 973 974 unsafe_ptr = (char *)(long)raw_args[num_spec]; 975 err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, 976 fmt_ptype, 977 tmp_buf_end - tmp_buf); 978 if (err < 0) { 979 tmp_buf[0] = '\0'; 980 err = 1; 981 } 982 983 tmp_buf += err; 984 num_spec++; 985 986 continue; 987 } else if (fmt[i] == 'c') { 988 if (!tmp_buf) 989 goto nocopy_fmt; 990 991 if (tmp_buf_end == tmp_buf) { 992 err = -ENOSPC; 993 goto out; 994 } 995 996 *tmp_buf = raw_args[num_spec]; 997 tmp_buf++; 998 num_spec++; 999 1000 continue; 1001 } 1002 1003 sizeof_cur_arg = sizeof(int); 1004 1005 if (fmt[i] == 'l') { 1006 sizeof_cur_arg = sizeof(long); 1007 i++; 1008 } 1009 if (fmt[i] == 'l') { 1010 sizeof_cur_arg = sizeof(long long); 1011 i++; 1012 } 1013 1014 if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && 1015 fmt[i] != 'x' && fmt[i] != 'X') { 1016 err = -EINVAL; 1017 goto out; 1018 } 1019 1020 if (tmp_buf) 1021 cur_arg = raw_args[num_spec]; 1022 nocopy_fmt: 1023 if (tmp_buf) { 1024 tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); 1025 if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { 1026 err = -ENOSPC; 1027 goto out; 1028 } 1029 1030 if (sizeof_cur_arg == 8) { 1031 *(u32 *)tmp_buf = *(u32 *)&cur_arg; 1032 *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); 1033 } else { 1034 *(u32 *)tmp_buf = (u32)(long)cur_arg; 1035 } 1036 tmp_buf += sizeof_cur_arg; 1037 } 1038 num_spec++; 1039 } 1040 1041 err = 0; 1042 out: 1043 if (err) 1044 bpf_bprintf_cleanup(data); 1045 return err; 1046 } 1047 1048 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, 1049 const void *, args, u32, data_len) 1050 { 1051 struct bpf_bprintf_data data = { 1052 .get_bin_args = true, 1053 }; 1054 int err, num_args; 1055 1056 if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || 1057 (data_len && !args)) 1058 return -EINVAL; 1059 num_args = data_len / 8; 1060 1061 /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we 1062 * can safely give an unbounded size. 1063 */ 1064 err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); 1065 if (err < 0) 1066 return err; 1067 1068 err = bstr_printf(str, str_size, fmt, data.bin_args); 1069 1070 bpf_bprintf_cleanup(&data); 1071 1072 return err + 1; 1073 } 1074 1075 const struct bpf_func_proto bpf_snprintf_proto = { 1076 .func = bpf_snprintf, 1077 .gpl_only = true, 1078 .ret_type = RET_INTEGER, 1079 .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 1080 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1081 .arg3_type = ARG_PTR_TO_CONST_STR, 1082 .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 1083 .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1084 }; 1085 1086 struct bpf_async_cb { 1087 struct bpf_map *map; 1088 struct bpf_prog *prog; 1089 void __rcu *callback_fn; 1090 void *value; 1091 union { 1092 struct rcu_head rcu; 1093 struct work_struct delete_work; 1094 }; 1095 u64 flags; 1096 }; 1097 1098 /* BPF map elements can contain 'struct bpf_timer'. 1099 * Such map owns all of its BPF timers. 1100 * 'struct bpf_timer' is allocated as part of map element allocation 1101 * and it's zero initialized. 1102 * That space is used to keep 'struct bpf_async_kern'. 1103 * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and 1104 * remembers 'struct bpf_map *' pointer it's part of. 1105 * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. 1106 * bpf_timer_start() arms the timer. 1107 * If user space reference to a map goes to zero at this point 1108 * ops->map_release_uref callback is responsible for cancelling the timers, 1109 * freeing their memory, and decrementing prog's refcnts. 1110 * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. 1111 * Inner maps can contain bpf timers as well. ops->map_release_uref is 1112 * freeing the timers when inner map is replaced or deleted by user space. 1113 */ 1114 struct bpf_hrtimer { 1115 struct bpf_async_cb cb; 1116 struct hrtimer timer; 1117 atomic_t cancelling; 1118 }; 1119 1120 struct bpf_work { 1121 struct bpf_async_cb cb; 1122 struct work_struct work; 1123 struct work_struct delete_work; 1124 }; 1125 1126 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */ 1127 struct bpf_async_kern { 1128 union { 1129 struct bpf_async_cb *cb; 1130 struct bpf_hrtimer *timer; 1131 struct bpf_work *work; 1132 }; 1133 /* bpf_spin_lock is used here instead of spinlock_t to make 1134 * sure that it always fits into space reserved by struct bpf_timer 1135 * regardless of LOCKDEP and spinlock debug flags. 1136 */ 1137 struct bpf_spin_lock lock; 1138 } __attribute__((aligned(8))); 1139 1140 enum bpf_async_type { 1141 BPF_ASYNC_TYPE_TIMER = 0, 1142 BPF_ASYNC_TYPE_WQ, 1143 }; 1144 1145 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); 1146 1147 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) 1148 { 1149 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); 1150 struct bpf_map *map = t->cb.map; 1151 void *value = t->cb.value; 1152 bpf_callback_t callback_fn; 1153 void *key; 1154 u32 idx; 1155 1156 BTF_TYPE_EMIT(struct bpf_timer); 1157 callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); 1158 if (!callback_fn) 1159 goto out; 1160 1161 /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and 1162 * cannot be preempted by another bpf_timer_cb() on the same cpu. 1163 * Remember the timer this callback is servicing to prevent 1164 * deadlock if callback_fn() calls bpf_timer_cancel() or 1165 * bpf_map_delete_elem() on the same timer. 1166 */ 1167 this_cpu_write(hrtimer_running, t); 1168 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1169 struct bpf_array *array = container_of(map, struct bpf_array, map); 1170 1171 /* compute the key */ 1172 idx = ((char *)value - array->value) / array->elem_size; 1173 key = &idx; 1174 } else { /* hash or lru */ 1175 key = value - round_up(map->key_size, 8); 1176 } 1177 1178 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1179 /* The verifier checked that return value is zero. */ 1180 1181 this_cpu_write(hrtimer_running, NULL); 1182 out: 1183 return HRTIMER_NORESTART; 1184 } 1185 1186 static void bpf_wq_work(struct work_struct *work) 1187 { 1188 struct bpf_work *w = container_of(work, struct bpf_work, work); 1189 struct bpf_async_cb *cb = &w->cb; 1190 struct bpf_map *map = cb->map; 1191 bpf_callback_t callback_fn; 1192 void *value = cb->value; 1193 void *key; 1194 u32 idx; 1195 1196 BTF_TYPE_EMIT(struct bpf_wq); 1197 1198 callback_fn = READ_ONCE(cb->callback_fn); 1199 if (!callback_fn) 1200 return; 1201 1202 if (map->map_type == BPF_MAP_TYPE_ARRAY) { 1203 struct bpf_array *array = container_of(map, struct bpf_array, map); 1204 1205 /* compute the key */ 1206 idx = ((char *)value - array->value) / array->elem_size; 1207 key = &idx; 1208 } else { /* hash or lru */ 1209 key = value - round_up(map->key_size, 8); 1210 } 1211 1212 rcu_read_lock_trace(); 1213 migrate_disable(); 1214 1215 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); 1216 1217 migrate_enable(); 1218 rcu_read_unlock_trace(); 1219 } 1220 1221 static void bpf_wq_delete_work(struct work_struct *work) 1222 { 1223 struct bpf_work *w = container_of(work, struct bpf_work, delete_work); 1224 1225 cancel_work_sync(&w->work); 1226 1227 kfree_rcu(w, cb.rcu); 1228 } 1229 1230 static void bpf_timer_delete_work(struct work_struct *work) 1231 { 1232 struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work); 1233 1234 /* Cancel the timer and wait for callback to complete if it was running. 1235 * If hrtimer_cancel() can be safely called it's safe to call 1236 * kfree_rcu(t) right after for both preallocated and non-preallocated 1237 * maps. The async->cb = NULL was already done and no code path can see 1238 * address 't' anymore. Timer if armed for existing bpf_hrtimer before 1239 * bpf_timer_cancel_and_free will have been cancelled. 1240 */ 1241 hrtimer_cancel(&t->timer); 1242 kfree_rcu(t, cb.rcu); 1243 } 1244 1245 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, 1246 enum bpf_async_type type) 1247 { 1248 struct bpf_async_cb *cb; 1249 struct bpf_hrtimer *t; 1250 struct bpf_work *w; 1251 clockid_t clockid; 1252 size_t size; 1253 int ret = 0; 1254 1255 if (in_nmi()) 1256 return -EOPNOTSUPP; 1257 1258 switch (type) { 1259 case BPF_ASYNC_TYPE_TIMER: 1260 size = sizeof(struct bpf_hrtimer); 1261 break; 1262 case BPF_ASYNC_TYPE_WQ: 1263 size = sizeof(struct bpf_work); 1264 break; 1265 default: 1266 return -EINVAL; 1267 } 1268 1269 __bpf_spin_lock_irqsave(&async->lock); 1270 t = async->timer; 1271 if (t) { 1272 ret = -EBUSY; 1273 goto out; 1274 } 1275 1276 /* allocate hrtimer via map_kmalloc to use memcg accounting */ 1277 cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); 1278 if (!cb) { 1279 ret = -ENOMEM; 1280 goto out; 1281 } 1282 1283 switch (type) { 1284 case BPF_ASYNC_TYPE_TIMER: 1285 clockid = flags & (MAX_CLOCKS - 1); 1286 t = (struct bpf_hrtimer *)cb; 1287 1288 atomic_set(&t->cancelling, 0); 1289 INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); 1290 hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); 1291 t->timer.function = bpf_timer_cb; 1292 cb->value = (void *)async - map->record->timer_off; 1293 break; 1294 case BPF_ASYNC_TYPE_WQ: 1295 w = (struct bpf_work *)cb; 1296 1297 INIT_WORK(&w->work, bpf_wq_work); 1298 INIT_WORK(&w->delete_work, bpf_wq_delete_work); 1299 cb->value = (void *)async - map->record->wq_off; 1300 break; 1301 } 1302 cb->map = map; 1303 cb->prog = NULL; 1304 cb->flags = flags; 1305 rcu_assign_pointer(cb->callback_fn, NULL); 1306 1307 WRITE_ONCE(async->cb, cb); 1308 /* Guarantee the order between async->cb and map->usercnt. So 1309 * when there are concurrent uref release and bpf timer init, either 1310 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL 1311 * timer or atomic64_read() below returns a zero usercnt. 1312 */ 1313 smp_mb(); 1314 if (!atomic64_read(&map->usercnt)) { 1315 /* maps with timers must be either held by user space 1316 * or pinned in bpffs. 1317 */ 1318 WRITE_ONCE(async->cb, NULL); 1319 kfree(cb); 1320 ret = -EPERM; 1321 } 1322 out: 1323 __bpf_spin_unlock_irqrestore(&async->lock); 1324 return ret; 1325 } 1326 1327 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map, 1328 u64, flags) 1329 { 1330 clock_t clockid = flags & (MAX_CLOCKS - 1); 1331 1332 BUILD_BUG_ON(MAX_CLOCKS != 16); 1333 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer)); 1334 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer)); 1335 1336 if (flags >= MAX_CLOCKS || 1337 /* similar to timerfd except _ALARM variants are not supported */ 1338 (clockid != CLOCK_MONOTONIC && 1339 clockid != CLOCK_REALTIME && 1340 clockid != CLOCK_BOOTTIME)) 1341 return -EINVAL; 1342 1343 return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER); 1344 } 1345 1346 static const struct bpf_func_proto bpf_timer_init_proto = { 1347 .func = bpf_timer_init, 1348 .gpl_only = true, 1349 .ret_type = RET_INTEGER, 1350 .arg1_type = ARG_PTR_TO_TIMER, 1351 .arg2_type = ARG_CONST_MAP_PTR, 1352 .arg3_type = ARG_ANYTHING, 1353 }; 1354 1355 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, 1356 struct bpf_prog_aux *aux, unsigned int flags, 1357 enum bpf_async_type type) 1358 { 1359 struct bpf_prog *prev, *prog = aux->prog; 1360 struct bpf_async_cb *cb; 1361 int ret = 0; 1362 1363 if (in_nmi()) 1364 return -EOPNOTSUPP; 1365 __bpf_spin_lock_irqsave(&async->lock); 1366 cb = async->cb; 1367 if (!cb) { 1368 ret = -EINVAL; 1369 goto out; 1370 } 1371 if (!atomic64_read(&cb->map->usercnt)) { 1372 /* maps with timers must be either held by user space 1373 * or pinned in bpffs. Otherwise timer might still be 1374 * running even when bpf prog is detached and user space 1375 * is gone, since map_release_uref won't ever be called. 1376 */ 1377 ret = -EPERM; 1378 goto out; 1379 } 1380 prev = cb->prog; 1381 if (prev != prog) { 1382 /* Bump prog refcnt once. Every bpf_timer_set_callback() 1383 * can pick different callback_fn-s within the same prog. 1384 */ 1385 prog = bpf_prog_inc_not_zero(prog); 1386 if (IS_ERR(prog)) { 1387 ret = PTR_ERR(prog); 1388 goto out; 1389 } 1390 if (prev) 1391 /* Drop prev prog refcnt when swapping with new prog */ 1392 bpf_prog_put(prev); 1393 cb->prog = prog; 1394 } 1395 rcu_assign_pointer(cb->callback_fn, callback_fn); 1396 out: 1397 __bpf_spin_unlock_irqrestore(&async->lock); 1398 return ret; 1399 } 1400 1401 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn, 1402 struct bpf_prog_aux *, aux) 1403 { 1404 return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER); 1405 } 1406 1407 static const struct bpf_func_proto bpf_timer_set_callback_proto = { 1408 .func = bpf_timer_set_callback, 1409 .gpl_only = true, 1410 .ret_type = RET_INTEGER, 1411 .arg1_type = ARG_PTR_TO_TIMER, 1412 .arg2_type = ARG_PTR_TO_FUNC, 1413 }; 1414 1415 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags) 1416 { 1417 struct bpf_hrtimer *t; 1418 int ret = 0; 1419 enum hrtimer_mode mode; 1420 1421 if (in_nmi()) 1422 return -EOPNOTSUPP; 1423 if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN)) 1424 return -EINVAL; 1425 __bpf_spin_lock_irqsave(&timer->lock); 1426 t = timer->timer; 1427 if (!t || !t->cb.prog) { 1428 ret = -EINVAL; 1429 goto out; 1430 } 1431 1432 if (flags & BPF_F_TIMER_ABS) 1433 mode = HRTIMER_MODE_ABS_SOFT; 1434 else 1435 mode = HRTIMER_MODE_REL_SOFT; 1436 1437 if (flags & BPF_F_TIMER_CPU_PIN) 1438 mode |= HRTIMER_MODE_PINNED; 1439 1440 hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); 1441 out: 1442 __bpf_spin_unlock_irqrestore(&timer->lock); 1443 return ret; 1444 } 1445 1446 static const struct bpf_func_proto bpf_timer_start_proto = { 1447 .func = bpf_timer_start, 1448 .gpl_only = true, 1449 .ret_type = RET_INTEGER, 1450 .arg1_type = ARG_PTR_TO_TIMER, 1451 .arg2_type = ARG_ANYTHING, 1452 .arg3_type = ARG_ANYTHING, 1453 }; 1454 1455 static void drop_prog_refcnt(struct bpf_async_cb *async) 1456 { 1457 struct bpf_prog *prog = async->prog; 1458 1459 if (prog) { 1460 bpf_prog_put(prog); 1461 async->prog = NULL; 1462 rcu_assign_pointer(async->callback_fn, NULL); 1463 } 1464 } 1465 1466 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer) 1467 { 1468 struct bpf_hrtimer *t, *cur_t; 1469 bool inc = false; 1470 int ret = 0; 1471 1472 if (in_nmi()) 1473 return -EOPNOTSUPP; 1474 rcu_read_lock(); 1475 __bpf_spin_lock_irqsave(&timer->lock); 1476 t = timer->timer; 1477 if (!t) { 1478 ret = -EINVAL; 1479 goto out; 1480 } 1481 1482 cur_t = this_cpu_read(hrtimer_running); 1483 if (cur_t == t) { 1484 /* If bpf callback_fn is trying to bpf_timer_cancel() 1485 * its own timer the hrtimer_cancel() will deadlock 1486 * since it waits for callback_fn to finish. 1487 */ 1488 ret = -EDEADLK; 1489 goto out; 1490 } 1491 1492 /* Only account in-flight cancellations when invoked from a timer 1493 * callback, since we want to avoid waiting only if other _callbacks_ 1494 * are waiting on us, to avoid introducing lockups. Non-callback paths 1495 * are ok, since nobody would synchronously wait for their completion. 1496 */ 1497 if (!cur_t) 1498 goto drop; 1499 atomic_inc(&t->cancelling); 1500 /* Need full barrier after relaxed atomic_inc */ 1501 smp_mb__after_atomic(); 1502 inc = true; 1503 if (atomic_read(&cur_t->cancelling)) { 1504 /* We're cancelling timer t, while some other timer callback is 1505 * attempting to cancel us. In such a case, it might be possible 1506 * that timer t belongs to the other callback, or some other 1507 * callback waiting upon it (creating transitive dependencies 1508 * upon us), and we will enter a deadlock if we continue 1509 * cancelling and waiting for it synchronously, since it might 1510 * do the same. Bail! 1511 */ 1512 ret = -EDEADLK; 1513 goto out; 1514 } 1515 drop: 1516 drop_prog_refcnt(&t->cb); 1517 out: 1518 __bpf_spin_unlock_irqrestore(&timer->lock); 1519 /* Cancel the timer and wait for associated callback to finish 1520 * if it was running. 1521 */ 1522 ret = ret ?: hrtimer_cancel(&t->timer); 1523 if (inc) 1524 atomic_dec(&t->cancelling); 1525 rcu_read_unlock(); 1526 return ret; 1527 } 1528 1529 static const struct bpf_func_proto bpf_timer_cancel_proto = { 1530 .func = bpf_timer_cancel, 1531 .gpl_only = true, 1532 .ret_type = RET_INTEGER, 1533 .arg1_type = ARG_PTR_TO_TIMER, 1534 }; 1535 1536 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) 1537 { 1538 struct bpf_async_cb *cb; 1539 1540 /* Performance optimization: read async->cb without lock first. */ 1541 if (!READ_ONCE(async->cb)) 1542 return NULL; 1543 1544 __bpf_spin_lock_irqsave(&async->lock); 1545 /* re-read it under lock */ 1546 cb = async->cb; 1547 if (!cb) 1548 goto out; 1549 drop_prog_refcnt(cb); 1550 /* The subsequent bpf_timer_start/cancel() helpers won't be able to use 1551 * this timer, since it won't be initialized. 1552 */ 1553 WRITE_ONCE(async->cb, NULL); 1554 out: 1555 __bpf_spin_unlock_irqrestore(&async->lock); 1556 return cb; 1557 } 1558 1559 /* This function is called by map_delete/update_elem for individual element and 1560 * by ops->map_release_uref when the user space reference to a map reaches zero. 1561 */ 1562 void bpf_timer_cancel_and_free(void *val) 1563 { 1564 struct bpf_hrtimer *t; 1565 1566 t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val); 1567 1568 if (!t) 1569 return; 1570 /* We check that bpf_map_delete/update_elem() was called from timer 1571 * callback_fn. In such case we don't call hrtimer_cancel() (since it 1572 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will 1573 * just return -1). Though callback_fn is still running on this cpu it's 1574 * safe to do kfree(t) because bpf_timer_cb() read everything it needed 1575 * from 't'. The bpf subprog callback_fn won't be able to access 't', 1576 * since async->cb = NULL was already done. The timer will be 1577 * effectively cancelled because bpf_timer_cb() will return 1578 * HRTIMER_NORESTART. 1579 * 1580 * However, it is possible the timer callback_fn calling us armed the 1581 * timer _before_ calling us, such that failing to cancel it here will 1582 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer. 1583 * Therefore, we _need_ to cancel any outstanding timers before we do 1584 * kfree_rcu, even though no more timers can be armed. 1585 * 1586 * Moreover, we need to schedule work even if timer does not belong to 1587 * the calling callback_fn, as on two different CPUs, we can end up in a 1588 * situation where both sides run in parallel, try to cancel one 1589 * another, and we end up waiting on both sides in hrtimer_cancel 1590 * without making forward progress, since timer1 depends on time2 1591 * callback to finish, and vice versa. 1592 * 1593 * CPU 1 (timer1_cb) CPU 2 (timer2_cb) 1594 * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1) 1595 * 1596 * To avoid these issues, punt to workqueue context when we are in a 1597 * timer callback. 1598 */ 1599 if (this_cpu_read(hrtimer_running)) 1600 queue_work(system_unbound_wq, &t->cb.delete_work); 1601 else 1602 bpf_timer_delete_work(&t->cb.delete_work); 1603 } 1604 1605 /* This function is called by map_delete/update_elem for individual element and 1606 * by ops->map_release_uref when the user space reference to a map reaches zero. 1607 */ 1608 void bpf_wq_cancel_and_free(void *val) 1609 { 1610 struct bpf_work *work; 1611 1612 BTF_TYPE_EMIT(struct bpf_wq); 1613 1614 work = (struct bpf_work *)__bpf_async_cancel_and_free(val); 1615 if (!work) 1616 return; 1617 /* Trigger cancel of the sleepable work, but *do not* wait for 1618 * it to finish if it was running as we might not be in a 1619 * sleepable context. 1620 * kfree will be called once the work has finished. 1621 */ 1622 schedule_work(&work->delete_work); 1623 } 1624 1625 BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) 1626 { 1627 unsigned long *kptr = map_value; 1628 1629 /* This helper may be inlined by verifier. */ 1630 return xchg(kptr, (unsigned long)ptr); 1631 } 1632 1633 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() 1634 * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to 1635 * denote type that verifier will determine. 1636 */ 1637 static const struct bpf_func_proto bpf_kptr_xchg_proto = { 1638 .func = bpf_kptr_xchg, 1639 .gpl_only = false, 1640 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, 1641 .ret_btf_id = BPF_PTR_POISON, 1642 .arg1_type = ARG_PTR_TO_KPTR, 1643 .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, 1644 .arg2_btf_id = BPF_PTR_POISON, 1645 }; 1646 1647 /* Since the upper 8 bits of dynptr->size is reserved, the 1648 * maximum supported size is 2^24 - 1. 1649 */ 1650 #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) 1651 #define DYNPTR_TYPE_SHIFT 28 1652 #define DYNPTR_SIZE_MASK 0xFFFFFF 1653 #define DYNPTR_RDONLY_BIT BIT(31) 1654 1655 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) 1656 { 1657 return ptr->size & DYNPTR_RDONLY_BIT; 1658 } 1659 1660 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) 1661 { 1662 ptr->size |= DYNPTR_RDONLY_BIT; 1663 } 1664 1665 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) 1666 { 1667 ptr->size |= type << DYNPTR_TYPE_SHIFT; 1668 } 1669 1670 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) 1671 { 1672 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; 1673 } 1674 1675 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) 1676 { 1677 return ptr->size & DYNPTR_SIZE_MASK; 1678 } 1679 1680 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) 1681 { 1682 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; 1683 1684 ptr->size = new_size | metadata; 1685 } 1686 1687 int bpf_dynptr_check_size(u32 size) 1688 { 1689 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; 1690 } 1691 1692 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, 1693 enum bpf_dynptr_type type, u32 offset, u32 size) 1694 { 1695 ptr->data = data; 1696 ptr->offset = offset; 1697 ptr->size = size; 1698 bpf_dynptr_set_type(ptr, type); 1699 } 1700 1701 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) 1702 { 1703 memset(ptr, 0, sizeof(*ptr)); 1704 } 1705 1706 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) 1707 { 1708 u32 size = __bpf_dynptr_size(ptr); 1709 1710 if (len > size || offset > size - len) 1711 return -E2BIG; 1712 1713 return 0; 1714 } 1715 1716 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1717 { 1718 int err; 1719 1720 BTF_TYPE_EMIT(struct bpf_dynptr); 1721 1722 err = bpf_dynptr_check_size(size); 1723 if (err) 1724 goto error; 1725 1726 /* flags is currently unsupported */ 1727 if (flags) { 1728 err = -EINVAL; 1729 goto error; 1730 } 1731 1732 bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); 1733 1734 return 0; 1735 1736 error: 1737 bpf_dynptr_set_null(ptr); 1738 return err; 1739 } 1740 1741 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { 1742 .func = bpf_dynptr_from_mem, 1743 .gpl_only = false, 1744 .ret_type = RET_INTEGER, 1745 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1746 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1747 .arg3_type = ARG_ANYTHING, 1748 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, 1749 }; 1750 1751 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, 1752 u32, offset, u64, flags) 1753 { 1754 enum bpf_dynptr_type type; 1755 int err; 1756 1757 if (!src->data || flags) 1758 return -EINVAL; 1759 1760 err = bpf_dynptr_check_off_len(src, offset, len); 1761 if (err) 1762 return err; 1763 1764 type = bpf_dynptr_get_type(src); 1765 1766 switch (type) { 1767 case BPF_DYNPTR_TYPE_LOCAL: 1768 case BPF_DYNPTR_TYPE_RINGBUF: 1769 /* Source and destination may possibly overlap, hence use memmove to 1770 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1771 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1772 */ 1773 memmove(dst, src->data + src->offset + offset, len); 1774 return 0; 1775 case BPF_DYNPTR_TYPE_SKB: 1776 return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); 1777 case BPF_DYNPTR_TYPE_XDP: 1778 return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); 1779 default: 1780 WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); 1781 return -EFAULT; 1782 } 1783 } 1784 1785 static const struct bpf_func_proto bpf_dynptr_read_proto = { 1786 .func = bpf_dynptr_read, 1787 .gpl_only = false, 1788 .ret_type = RET_INTEGER, 1789 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1790 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1791 .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1792 .arg4_type = ARG_ANYTHING, 1793 .arg5_type = ARG_ANYTHING, 1794 }; 1795 1796 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, 1797 u32, len, u64, flags) 1798 { 1799 enum bpf_dynptr_type type; 1800 int err; 1801 1802 if (!dst->data || __bpf_dynptr_is_rdonly(dst)) 1803 return -EINVAL; 1804 1805 err = bpf_dynptr_check_off_len(dst, offset, len); 1806 if (err) 1807 return err; 1808 1809 type = bpf_dynptr_get_type(dst); 1810 1811 switch (type) { 1812 case BPF_DYNPTR_TYPE_LOCAL: 1813 case BPF_DYNPTR_TYPE_RINGBUF: 1814 if (flags) 1815 return -EINVAL; 1816 /* Source and destination may possibly overlap, hence use memmove to 1817 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr 1818 * pointing to overlapping PTR_TO_MAP_VALUE regions. 1819 */ 1820 memmove(dst->data + dst->offset + offset, src, len); 1821 return 0; 1822 case BPF_DYNPTR_TYPE_SKB: 1823 return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, 1824 flags); 1825 case BPF_DYNPTR_TYPE_XDP: 1826 if (flags) 1827 return -EINVAL; 1828 return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); 1829 default: 1830 WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); 1831 return -EFAULT; 1832 } 1833 } 1834 1835 static const struct bpf_func_proto bpf_dynptr_write_proto = { 1836 .func = bpf_dynptr_write, 1837 .gpl_only = false, 1838 .ret_type = RET_INTEGER, 1839 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1840 .arg2_type = ARG_ANYTHING, 1841 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1842 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 1843 .arg5_type = ARG_ANYTHING, 1844 }; 1845 1846 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) 1847 { 1848 enum bpf_dynptr_type type; 1849 int err; 1850 1851 if (!ptr->data) 1852 return 0; 1853 1854 err = bpf_dynptr_check_off_len(ptr, offset, len); 1855 if (err) 1856 return 0; 1857 1858 if (__bpf_dynptr_is_rdonly(ptr)) 1859 return 0; 1860 1861 type = bpf_dynptr_get_type(ptr); 1862 1863 switch (type) { 1864 case BPF_DYNPTR_TYPE_LOCAL: 1865 case BPF_DYNPTR_TYPE_RINGBUF: 1866 return (unsigned long)(ptr->data + ptr->offset + offset); 1867 case BPF_DYNPTR_TYPE_SKB: 1868 case BPF_DYNPTR_TYPE_XDP: 1869 /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ 1870 return 0; 1871 default: 1872 WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); 1873 return 0; 1874 } 1875 } 1876 1877 static const struct bpf_func_proto bpf_dynptr_data_proto = { 1878 .func = bpf_dynptr_data, 1879 .gpl_only = false, 1880 .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, 1881 .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, 1882 .arg2_type = ARG_ANYTHING, 1883 .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, 1884 }; 1885 1886 const struct bpf_func_proto bpf_get_current_task_proto __weak; 1887 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; 1888 const struct bpf_func_proto bpf_probe_read_user_proto __weak; 1889 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; 1890 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; 1891 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; 1892 const struct bpf_func_proto bpf_task_pt_regs_proto __weak; 1893 1894 const struct bpf_func_proto * 1895 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1896 { 1897 switch (func_id) { 1898 case BPF_FUNC_map_lookup_elem: 1899 return &bpf_map_lookup_elem_proto; 1900 case BPF_FUNC_map_update_elem: 1901 return &bpf_map_update_elem_proto; 1902 case BPF_FUNC_map_delete_elem: 1903 return &bpf_map_delete_elem_proto; 1904 case BPF_FUNC_map_push_elem: 1905 return &bpf_map_push_elem_proto; 1906 case BPF_FUNC_map_pop_elem: 1907 return &bpf_map_pop_elem_proto; 1908 case BPF_FUNC_map_peek_elem: 1909 return &bpf_map_peek_elem_proto; 1910 case BPF_FUNC_map_lookup_percpu_elem: 1911 return &bpf_map_lookup_percpu_elem_proto; 1912 case BPF_FUNC_get_prandom_u32: 1913 return &bpf_get_prandom_u32_proto; 1914 case BPF_FUNC_get_smp_processor_id: 1915 return &bpf_get_raw_smp_processor_id_proto; 1916 case BPF_FUNC_get_numa_node_id: 1917 return &bpf_get_numa_node_id_proto; 1918 case BPF_FUNC_tail_call: 1919 return &bpf_tail_call_proto; 1920 case BPF_FUNC_ktime_get_ns: 1921 return &bpf_ktime_get_ns_proto; 1922 case BPF_FUNC_ktime_get_boot_ns: 1923 return &bpf_ktime_get_boot_ns_proto; 1924 case BPF_FUNC_ktime_get_tai_ns: 1925 return &bpf_ktime_get_tai_ns_proto; 1926 case BPF_FUNC_ringbuf_output: 1927 return &bpf_ringbuf_output_proto; 1928 case BPF_FUNC_ringbuf_reserve: 1929 return &bpf_ringbuf_reserve_proto; 1930 case BPF_FUNC_ringbuf_submit: 1931 return &bpf_ringbuf_submit_proto; 1932 case BPF_FUNC_ringbuf_discard: 1933 return &bpf_ringbuf_discard_proto; 1934 case BPF_FUNC_ringbuf_query: 1935 return &bpf_ringbuf_query_proto; 1936 case BPF_FUNC_strncmp: 1937 return &bpf_strncmp_proto; 1938 case BPF_FUNC_strtol: 1939 return &bpf_strtol_proto; 1940 case BPF_FUNC_strtoul: 1941 return &bpf_strtoul_proto; 1942 case BPF_FUNC_get_current_pid_tgid: 1943 return &bpf_get_current_pid_tgid_proto; 1944 case BPF_FUNC_get_ns_current_pid_tgid: 1945 return &bpf_get_ns_current_pid_tgid_proto; 1946 default: 1947 break; 1948 } 1949 1950 if (!bpf_token_capable(prog->aux->token, CAP_BPF)) 1951 return NULL; 1952 1953 switch (func_id) { 1954 case BPF_FUNC_spin_lock: 1955 return &bpf_spin_lock_proto; 1956 case BPF_FUNC_spin_unlock: 1957 return &bpf_spin_unlock_proto; 1958 case BPF_FUNC_jiffies64: 1959 return &bpf_jiffies64_proto; 1960 case BPF_FUNC_per_cpu_ptr: 1961 return &bpf_per_cpu_ptr_proto; 1962 case BPF_FUNC_this_cpu_ptr: 1963 return &bpf_this_cpu_ptr_proto; 1964 case BPF_FUNC_timer_init: 1965 return &bpf_timer_init_proto; 1966 case BPF_FUNC_timer_set_callback: 1967 return &bpf_timer_set_callback_proto; 1968 case BPF_FUNC_timer_start: 1969 return &bpf_timer_start_proto; 1970 case BPF_FUNC_timer_cancel: 1971 return &bpf_timer_cancel_proto; 1972 case BPF_FUNC_kptr_xchg: 1973 return &bpf_kptr_xchg_proto; 1974 case BPF_FUNC_for_each_map_elem: 1975 return &bpf_for_each_map_elem_proto; 1976 case BPF_FUNC_loop: 1977 return &bpf_loop_proto; 1978 case BPF_FUNC_user_ringbuf_drain: 1979 return &bpf_user_ringbuf_drain_proto; 1980 case BPF_FUNC_ringbuf_reserve_dynptr: 1981 return &bpf_ringbuf_reserve_dynptr_proto; 1982 case BPF_FUNC_ringbuf_submit_dynptr: 1983 return &bpf_ringbuf_submit_dynptr_proto; 1984 case BPF_FUNC_ringbuf_discard_dynptr: 1985 return &bpf_ringbuf_discard_dynptr_proto; 1986 case BPF_FUNC_dynptr_from_mem: 1987 return &bpf_dynptr_from_mem_proto; 1988 case BPF_FUNC_dynptr_read: 1989 return &bpf_dynptr_read_proto; 1990 case BPF_FUNC_dynptr_write: 1991 return &bpf_dynptr_write_proto; 1992 case BPF_FUNC_dynptr_data: 1993 return &bpf_dynptr_data_proto; 1994 #ifdef CONFIG_CGROUPS 1995 case BPF_FUNC_cgrp_storage_get: 1996 return &bpf_cgrp_storage_get_proto; 1997 case BPF_FUNC_cgrp_storage_delete: 1998 return &bpf_cgrp_storage_delete_proto; 1999 case BPF_FUNC_get_current_cgroup_id: 2000 return &bpf_get_current_cgroup_id_proto; 2001 case BPF_FUNC_get_current_ancestor_cgroup_id: 2002 return &bpf_get_current_ancestor_cgroup_id_proto; 2003 #endif 2004 default: 2005 break; 2006 } 2007 2008 if (!bpf_token_capable(prog->aux->token, CAP_PERFMON)) 2009 return NULL; 2010 2011 switch (func_id) { 2012 case BPF_FUNC_trace_printk: 2013 return bpf_get_trace_printk_proto(); 2014 case BPF_FUNC_get_current_task: 2015 return &bpf_get_current_task_proto; 2016 case BPF_FUNC_get_current_task_btf: 2017 return &bpf_get_current_task_btf_proto; 2018 case BPF_FUNC_probe_read_user: 2019 return &bpf_probe_read_user_proto; 2020 case BPF_FUNC_probe_read_kernel: 2021 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2022 NULL : &bpf_probe_read_kernel_proto; 2023 case BPF_FUNC_probe_read_user_str: 2024 return &bpf_probe_read_user_str_proto; 2025 case BPF_FUNC_probe_read_kernel_str: 2026 return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 2027 NULL : &bpf_probe_read_kernel_str_proto; 2028 case BPF_FUNC_snprintf_btf: 2029 return &bpf_snprintf_btf_proto; 2030 case BPF_FUNC_snprintf: 2031 return &bpf_snprintf_proto; 2032 case BPF_FUNC_task_pt_regs: 2033 return &bpf_task_pt_regs_proto; 2034 case BPF_FUNC_trace_vprintk: 2035 return bpf_get_trace_vprintk_proto(); 2036 default: 2037 return NULL; 2038 } 2039 } 2040 2041 void bpf_list_head_free(const struct btf_field *field, void *list_head, 2042 struct bpf_spin_lock *spin_lock) 2043 { 2044 struct list_head *head = list_head, *orig_head = list_head; 2045 2046 BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); 2047 BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); 2048 2049 /* Do the actual list draining outside the lock to not hold the lock for 2050 * too long, and also prevent deadlocks if tracing programs end up 2051 * executing on entry/exit of functions called inside the critical 2052 * section, and end up doing map ops that call bpf_list_head_free for 2053 * the same map value again. 2054 */ 2055 __bpf_spin_lock_irqsave(spin_lock); 2056 if (!head->next || list_empty(head)) 2057 goto unlock; 2058 head = head->next; 2059 unlock: 2060 INIT_LIST_HEAD(orig_head); 2061 __bpf_spin_unlock_irqrestore(spin_lock); 2062 2063 while (head != orig_head) { 2064 void *obj = head; 2065 2066 obj -= field->graph_root.node_offset; 2067 head = head->next; 2068 /* The contained type can also have resources, including a 2069 * bpf_list_head which needs to be freed. 2070 */ 2071 migrate_disable(); 2072 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2073 migrate_enable(); 2074 } 2075 } 2076 2077 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are 2078 * 'rb_node *', so field name of rb_node within containing struct is not 2079 * needed. 2080 * 2081 * Since bpf_rb_tree's node type has a corresponding struct btf_field with 2082 * graph_root.node_offset, it's not necessary to know field name 2083 * or type of node struct 2084 */ 2085 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ 2086 for (pos = rb_first_postorder(root); \ 2087 pos && ({ n = rb_next_postorder(pos); 1; }); \ 2088 pos = n) 2089 2090 void bpf_rb_root_free(const struct btf_field *field, void *rb_root, 2091 struct bpf_spin_lock *spin_lock) 2092 { 2093 struct rb_root_cached orig_root, *root = rb_root; 2094 struct rb_node *pos, *n; 2095 void *obj; 2096 2097 BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); 2098 BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); 2099 2100 __bpf_spin_lock_irqsave(spin_lock); 2101 orig_root = *root; 2102 *root = RB_ROOT_CACHED; 2103 __bpf_spin_unlock_irqrestore(spin_lock); 2104 2105 bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { 2106 obj = pos; 2107 obj -= field->graph_root.node_offset; 2108 2109 2110 migrate_disable(); 2111 __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); 2112 migrate_enable(); 2113 } 2114 } 2115 2116 __bpf_kfunc_start_defs(); 2117 2118 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2119 { 2120 struct btf_struct_meta *meta = meta__ign; 2121 u64 size = local_type_id__k; 2122 void *p; 2123 2124 p = bpf_mem_alloc(&bpf_global_ma, size); 2125 if (!p) 2126 return NULL; 2127 if (meta) 2128 bpf_obj_init(meta->record, p); 2129 return p; 2130 } 2131 2132 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) 2133 { 2134 u64 size = local_type_id__k; 2135 2136 /* The verifier has ensured that meta__ign must be NULL */ 2137 return bpf_mem_alloc(&bpf_global_percpu_ma, size); 2138 } 2139 2140 /* Must be called under migrate_disable(), as required by bpf_mem_free */ 2141 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) 2142 { 2143 struct bpf_mem_alloc *ma; 2144 2145 if (rec && rec->refcount_off >= 0 && 2146 !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { 2147 /* Object is refcounted and refcount_dec didn't result in 0 2148 * refcount. Return without freeing the object 2149 */ 2150 return; 2151 } 2152 2153 if (rec) 2154 bpf_obj_free_fields(rec, p); 2155 2156 if (percpu) 2157 ma = &bpf_global_percpu_ma; 2158 else 2159 ma = &bpf_global_ma; 2160 bpf_mem_free_rcu(ma, p); 2161 } 2162 2163 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) 2164 { 2165 struct btf_struct_meta *meta = meta__ign; 2166 void *p = p__alloc; 2167 2168 __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); 2169 } 2170 2171 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) 2172 { 2173 /* The verifier has ensured that meta__ign must be NULL */ 2174 bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc); 2175 } 2176 2177 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) 2178 { 2179 struct btf_struct_meta *meta = meta__ign; 2180 struct bpf_refcount *ref; 2181 2182 /* Could just cast directly to refcount_t *, but need some code using 2183 * bpf_refcount type so that it is emitted in vmlinux BTF 2184 */ 2185 ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); 2186 if (!refcount_inc_not_zero((refcount_t *)ref)) 2187 return NULL; 2188 2189 /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null 2190 * in verifier.c 2191 */ 2192 return (void *)p__refcounted_kptr; 2193 } 2194 2195 static int __bpf_list_add(struct bpf_list_node_kern *node, 2196 struct bpf_list_head *head, 2197 bool tail, struct btf_record *rec, u64 off) 2198 { 2199 struct list_head *n = &node->list_head, *h = (void *)head; 2200 2201 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2202 * called on its fields, so init here 2203 */ 2204 if (unlikely(!h->next)) 2205 INIT_LIST_HEAD(h); 2206 2207 /* node->owner != NULL implies !list_empty(n), no need to separately 2208 * check the latter 2209 */ 2210 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2211 /* Only called from BPF prog, no need to migrate_disable */ 2212 __bpf_obj_drop_impl((void *)n - off, rec, false); 2213 return -EINVAL; 2214 } 2215 2216 tail ? list_add_tail(n, h) : list_add(n, h); 2217 WRITE_ONCE(node->owner, head); 2218 2219 return 0; 2220 } 2221 2222 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, 2223 struct bpf_list_node *node, 2224 void *meta__ign, u64 off) 2225 { 2226 struct bpf_list_node_kern *n = (void *)node; 2227 struct btf_struct_meta *meta = meta__ign; 2228 2229 return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); 2230 } 2231 2232 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, 2233 struct bpf_list_node *node, 2234 void *meta__ign, u64 off) 2235 { 2236 struct bpf_list_node_kern *n = (void *)node; 2237 struct btf_struct_meta *meta = meta__ign; 2238 2239 return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); 2240 } 2241 2242 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) 2243 { 2244 struct list_head *n, *h = (void *)head; 2245 struct bpf_list_node_kern *node; 2246 2247 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't 2248 * called on its fields, so init here 2249 */ 2250 if (unlikely(!h->next)) 2251 INIT_LIST_HEAD(h); 2252 if (list_empty(h)) 2253 return NULL; 2254 2255 n = tail ? h->prev : h->next; 2256 node = container_of(n, struct bpf_list_node_kern, list_head); 2257 if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) 2258 return NULL; 2259 2260 list_del_init(n); 2261 WRITE_ONCE(node->owner, NULL); 2262 return (struct bpf_list_node *)n; 2263 } 2264 2265 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) 2266 { 2267 return __bpf_list_del(head, false); 2268 } 2269 2270 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) 2271 { 2272 return __bpf_list_del(head, true); 2273 } 2274 2275 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 2276 struct bpf_rb_node *node) 2277 { 2278 struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; 2279 struct rb_root_cached *r = (struct rb_root_cached *)root; 2280 struct rb_node *n = &node_internal->rb_node; 2281 2282 /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or 2283 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n) 2284 */ 2285 if (READ_ONCE(node_internal->owner) != root) 2286 return NULL; 2287 2288 rb_erase_cached(n, r); 2289 RB_CLEAR_NODE(n); 2290 WRITE_ONCE(node_internal->owner, NULL); 2291 return (struct bpf_rb_node *)n; 2292 } 2293 2294 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF 2295 * program 2296 */ 2297 static int __bpf_rbtree_add(struct bpf_rb_root *root, 2298 struct bpf_rb_node_kern *node, 2299 void *less, struct btf_record *rec, u64 off) 2300 { 2301 struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; 2302 struct rb_node *parent = NULL, *n = &node->rb_node; 2303 bpf_callback_t cb = (bpf_callback_t)less; 2304 bool leftmost = true; 2305 2306 /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately 2307 * check the latter 2308 */ 2309 if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { 2310 /* Only called from BPF prog, no need to migrate_disable */ 2311 __bpf_obj_drop_impl((void *)n - off, rec, false); 2312 return -EINVAL; 2313 } 2314 2315 while (*link) { 2316 parent = *link; 2317 if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { 2318 link = &parent->rb_left; 2319 } else { 2320 link = &parent->rb_right; 2321 leftmost = false; 2322 } 2323 } 2324 2325 rb_link_node(n, parent, link); 2326 rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); 2327 WRITE_ONCE(node->owner, root); 2328 return 0; 2329 } 2330 2331 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 2332 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 2333 void *meta__ign, u64 off) 2334 { 2335 struct btf_struct_meta *meta = meta__ign; 2336 struct bpf_rb_node_kern *n = (void *)node; 2337 2338 return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); 2339 } 2340 2341 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) 2342 { 2343 struct rb_root_cached *r = (struct rb_root_cached *)root; 2344 2345 return (struct bpf_rb_node *)rb_first_cached(r); 2346 } 2347 2348 /** 2349 * bpf_task_acquire - Acquire a reference to a task. A task acquired by this 2350 * kfunc which is not stored in a map as a kptr, must be released by calling 2351 * bpf_task_release(). 2352 * @p: The task on which a reference is being acquired. 2353 */ 2354 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) 2355 { 2356 if (refcount_inc_not_zero(&p->rcu_users)) 2357 return p; 2358 return NULL; 2359 } 2360 2361 /** 2362 * bpf_task_release - Release the reference acquired on a task. 2363 * @p: The task on which a reference is being released. 2364 */ 2365 __bpf_kfunc void bpf_task_release(struct task_struct *p) 2366 { 2367 put_task_struct_rcu_user(p); 2368 } 2369 2370 __bpf_kfunc void bpf_task_release_dtor(void *p) 2371 { 2372 put_task_struct_rcu_user(p); 2373 } 2374 CFI_NOSEAL(bpf_task_release_dtor); 2375 2376 #ifdef CONFIG_CGROUPS 2377 /** 2378 * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by 2379 * this kfunc which is not stored in a map as a kptr, must be released by 2380 * calling bpf_cgroup_release(). 2381 * @cgrp: The cgroup on which a reference is being acquired. 2382 */ 2383 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) 2384 { 2385 return cgroup_tryget(cgrp) ? cgrp : NULL; 2386 } 2387 2388 /** 2389 * bpf_cgroup_release - Release the reference acquired on a cgroup. 2390 * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to 2391 * not be freed until the current grace period has ended, even if its refcount 2392 * drops to 0. 2393 * @cgrp: The cgroup on which a reference is being released. 2394 */ 2395 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) 2396 { 2397 cgroup_put(cgrp); 2398 } 2399 2400 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp) 2401 { 2402 cgroup_put(cgrp); 2403 } 2404 CFI_NOSEAL(bpf_cgroup_release_dtor); 2405 2406 /** 2407 * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor 2408 * array. A cgroup returned by this kfunc which is not subsequently stored in a 2409 * map, must be released by calling bpf_cgroup_release(). 2410 * @cgrp: The cgroup for which we're performing a lookup. 2411 * @level: The level of ancestor to look up. 2412 */ 2413 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) 2414 { 2415 struct cgroup *ancestor; 2416 2417 if (level > cgrp->level || level < 0) 2418 return NULL; 2419 2420 /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ 2421 ancestor = cgrp->ancestors[level]; 2422 if (!cgroup_tryget(ancestor)) 2423 return NULL; 2424 return ancestor; 2425 } 2426 2427 /** 2428 * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this 2429 * kfunc which is not subsequently stored in a map, must be released by calling 2430 * bpf_cgroup_release(). 2431 * @cgid: cgroup id. 2432 */ 2433 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) 2434 { 2435 struct cgroup *cgrp; 2436 2437 cgrp = cgroup_get_from_id(cgid); 2438 if (IS_ERR(cgrp)) 2439 return NULL; 2440 return cgrp; 2441 } 2442 2443 /** 2444 * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test 2445 * task's membership of cgroup ancestry. 2446 * @task: the task to be tested 2447 * @ancestor: possible ancestor of @task's cgroup 2448 * 2449 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 2450 * It follows all the same rules as cgroup_is_descendant, and only applies 2451 * to the default hierarchy. 2452 */ 2453 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, 2454 struct cgroup *ancestor) 2455 { 2456 long ret; 2457 2458 rcu_read_lock(); 2459 ret = task_under_cgroup_hierarchy(task, ancestor); 2460 rcu_read_unlock(); 2461 return ret; 2462 } 2463 2464 /** 2465 * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a 2466 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its 2467 * hierarchy ID. 2468 * @task: The target task 2469 * @hierarchy_id: The ID of a cgroup1 hierarchy 2470 * 2471 * On success, the cgroup is returen. On failure, NULL is returned. 2472 */ 2473 __bpf_kfunc struct cgroup * 2474 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) 2475 { 2476 struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id); 2477 2478 if (IS_ERR(cgrp)) 2479 return NULL; 2480 return cgrp; 2481 } 2482 #endif /* CONFIG_CGROUPS */ 2483 2484 /** 2485 * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up 2486 * in the root pid namespace idr. If a task is returned, it must either be 2487 * stored in a map, or released with bpf_task_release(). 2488 * @pid: The pid of the task being looked up. 2489 */ 2490 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) 2491 { 2492 struct task_struct *p; 2493 2494 rcu_read_lock(); 2495 p = find_task_by_pid_ns(pid, &init_pid_ns); 2496 if (p) 2497 p = bpf_task_acquire(p); 2498 rcu_read_unlock(); 2499 2500 return p; 2501 } 2502 2503 /** 2504 * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. 2505 * @p: The dynptr whose data slice to retrieve 2506 * @offset: Offset into the dynptr 2507 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2508 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2509 * length of the requested slice. This must be a constant. 2510 * 2511 * For non-skb and non-xdp type dynptrs, there is no difference between 2512 * bpf_dynptr_slice and bpf_dynptr_data. 2513 * 2514 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2515 * 2516 * If the intention is to write to the data slice, please use 2517 * bpf_dynptr_slice_rdwr. 2518 * 2519 * The user must check that the returned pointer is not null before using it. 2520 * 2521 * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice 2522 * does not change the underlying packet data pointers, so a call to 2523 * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in 2524 * the bpf program. 2525 * 2526 * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only 2527 * data slice (can be either direct pointer to the data or a pointer to the user 2528 * provided buffer, with its contents containing the data, if unable to obtain 2529 * direct pointer) 2530 */ 2531 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, 2532 void *buffer__opt, u32 buffer__szk) 2533 { 2534 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2535 enum bpf_dynptr_type type; 2536 u32 len = buffer__szk; 2537 int err; 2538 2539 if (!ptr->data) 2540 return NULL; 2541 2542 err = bpf_dynptr_check_off_len(ptr, offset, len); 2543 if (err) 2544 return NULL; 2545 2546 type = bpf_dynptr_get_type(ptr); 2547 2548 switch (type) { 2549 case BPF_DYNPTR_TYPE_LOCAL: 2550 case BPF_DYNPTR_TYPE_RINGBUF: 2551 return ptr->data + ptr->offset + offset; 2552 case BPF_DYNPTR_TYPE_SKB: 2553 if (buffer__opt) 2554 return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); 2555 else 2556 return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); 2557 case BPF_DYNPTR_TYPE_XDP: 2558 { 2559 void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); 2560 if (!IS_ERR_OR_NULL(xdp_ptr)) 2561 return xdp_ptr; 2562 2563 if (!buffer__opt) 2564 return NULL; 2565 bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); 2566 return buffer__opt; 2567 } 2568 default: 2569 WARN_ONCE(true, "unknown dynptr type %d\n", type); 2570 return NULL; 2571 } 2572 } 2573 2574 /** 2575 * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. 2576 * @p: The dynptr whose data slice to retrieve 2577 * @offset: Offset into the dynptr 2578 * @buffer__opt: User-provided buffer to copy contents into. May be NULL 2579 * @buffer__szk: Size (in bytes) of the buffer if present. This is the 2580 * length of the requested slice. This must be a constant. 2581 * 2582 * For non-skb and non-xdp type dynptrs, there is no difference between 2583 * bpf_dynptr_slice and bpf_dynptr_data. 2584 * 2585 * If buffer__opt is NULL, the call will fail if buffer_opt was needed. 2586 * 2587 * The returned pointer is writable and may point to either directly the dynptr 2588 * data at the requested offset or to the buffer if unable to obtain a direct 2589 * data pointer to (example: the requested slice is to the paged area of an skb 2590 * packet). In the case where the returned pointer is to the buffer, the user 2591 * is responsible for persisting writes through calling bpf_dynptr_write(). This 2592 * usually looks something like this pattern: 2593 * 2594 * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); 2595 * if (!eth) 2596 * return TC_ACT_SHOT; 2597 * 2598 * // mutate eth header // 2599 * 2600 * if (eth == buffer) 2601 * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); 2602 * 2603 * Please note that, as in the example above, the user must check that the 2604 * returned pointer is not null before using it. 2605 * 2606 * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr 2607 * does not change the underlying packet data pointers, so a call to 2608 * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in 2609 * the bpf program. 2610 * 2611 * Return: NULL if the call failed (eg invalid dynptr), pointer to a 2612 * data slice (can be either direct pointer to the data or a pointer to the user 2613 * provided buffer, with its contents containing the data, if unable to obtain 2614 * direct pointer) 2615 */ 2616 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, 2617 void *buffer__opt, u32 buffer__szk) 2618 { 2619 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2620 2621 if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) 2622 return NULL; 2623 2624 /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. 2625 * 2626 * For skb-type dynptrs, it is safe to write into the returned pointer 2627 * if the bpf program allows skb data writes. There are two possibilities 2628 * that may occur when calling bpf_dynptr_slice_rdwr: 2629 * 2630 * 1) The requested slice is in the head of the skb. In this case, the 2631 * returned pointer is directly to skb data, and if the skb is cloned, the 2632 * verifier will have uncloned it (see bpf_unclone_prologue()) already. 2633 * The pointer can be directly written into. 2634 * 2635 * 2) Some portion of the requested slice is in the paged buffer area. 2636 * In this case, the requested data will be copied out into the buffer 2637 * and the returned pointer will be a pointer to the buffer. The skb 2638 * will not be pulled. To persist the write, the user will need to call 2639 * bpf_dynptr_write(), which will pull the skb and commit the write. 2640 * 2641 * Similarly for xdp programs, if the requested slice is not across xdp 2642 * fragments, then a direct pointer will be returned, otherwise the data 2643 * will be copied out into the buffer and the user will need to call 2644 * bpf_dynptr_write() to commit changes. 2645 */ 2646 return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk); 2647 } 2648 2649 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) 2650 { 2651 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2652 u32 size; 2653 2654 if (!ptr->data || start > end) 2655 return -EINVAL; 2656 2657 size = __bpf_dynptr_size(ptr); 2658 2659 if (start > size || end > size) 2660 return -ERANGE; 2661 2662 ptr->offset += start; 2663 bpf_dynptr_set_size(ptr, end - start); 2664 2665 return 0; 2666 } 2667 2668 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p) 2669 { 2670 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2671 2672 return !ptr->data; 2673 } 2674 2675 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p) 2676 { 2677 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2678 2679 if (!ptr->data) 2680 return false; 2681 2682 return __bpf_dynptr_is_rdonly(ptr); 2683 } 2684 2685 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p) 2686 { 2687 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2688 2689 if (!ptr->data) 2690 return -EINVAL; 2691 2692 return __bpf_dynptr_size(ptr); 2693 } 2694 2695 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p, 2696 struct bpf_dynptr *clone__uninit) 2697 { 2698 struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit; 2699 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2700 2701 if (!ptr->data) { 2702 bpf_dynptr_set_null(clone); 2703 return -EINVAL; 2704 } 2705 2706 *clone = *ptr; 2707 2708 return 0; 2709 } 2710 2711 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) 2712 { 2713 return obj; 2714 } 2715 2716 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k) 2717 { 2718 return (void *)obj__ign; 2719 } 2720 2721 __bpf_kfunc void bpf_rcu_read_lock(void) 2722 { 2723 rcu_read_lock(); 2724 } 2725 2726 __bpf_kfunc void bpf_rcu_read_unlock(void) 2727 { 2728 rcu_read_unlock(); 2729 } 2730 2731 struct bpf_throw_ctx { 2732 struct bpf_prog_aux *aux; 2733 u64 sp; 2734 u64 bp; 2735 int cnt; 2736 }; 2737 2738 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp) 2739 { 2740 struct bpf_throw_ctx *ctx = cookie; 2741 struct bpf_prog *prog; 2742 2743 if (!is_bpf_text_address(ip)) 2744 return !ctx->cnt; 2745 prog = bpf_prog_ksym_find(ip); 2746 ctx->cnt++; 2747 if (bpf_is_subprog(prog)) 2748 return true; 2749 ctx->aux = prog->aux; 2750 ctx->sp = sp; 2751 ctx->bp = bp; 2752 return false; 2753 } 2754 2755 __bpf_kfunc void bpf_throw(u64 cookie) 2756 { 2757 struct bpf_throw_ctx ctx = {}; 2758 2759 arch_bpf_stack_walk(bpf_stack_walker, &ctx); 2760 WARN_ON_ONCE(!ctx.aux); 2761 if (ctx.aux) 2762 WARN_ON_ONCE(!ctx.aux->exception_boundary); 2763 WARN_ON_ONCE(!ctx.bp); 2764 WARN_ON_ONCE(!ctx.cnt); 2765 /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning 2766 * deeper stack depths than ctx.sp as we do not return from bpf_throw, 2767 * which skips compiler generated instrumentation to do the same. 2768 */ 2769 kasan_unpoison_task_stack_below((void *)(long)ctx.sp); 2770 ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); 2771 WARN(1, "A call to BPF exception callback should never return\n"); 2772 } 2773 2774 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) 2775 { 2776 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 2777 struct bpf_map *map = p__map; 2778 2779 BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq)); 2780 BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq)); 2781 2782 if (flags) 2783 return -EINVAL; 2784 2785 return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); 2786 } 2787 2788 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) 2789 { 2790 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 2791 struct bpf_work *w; 2792 2793 if (in_nmi()) 2794 return -EOPNOTSUPP; 2795 if (flags) 2796 return -EINVAL; 2797 w = READ_ONCE(async->work); 2798 if (!w || !READ_ONCE(w->cb.prog)) 2799 return -EINVAL; 2800 2801 schedule_work(&w->work); 2802 return 0; 2803 } 2804 2805 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, 2806 int (callback_fn)(void *map, int *key, void *value), 2807 unsigned int flags, 2808 void *aux__ign) 2809 { 2810 struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign; 2811 struct bpf_async_kern *async = (struct bpf_async_kern *)wq; 2812 2813 if (flags) 2814 return -EINVAL; 2815 2816 return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); 2817 } 2818 2819 __bpf_kfunc void bpf_preempt_disable(void) 2820 { 2821 preempt_disable(); 2822 } 2823 2824 __bpf_kfunc void bpf_preempt_enable(void) 2825 { 2826 preempt_enable(); 2827 } 2828 2829 struct bpf_iter_bits { 2830 __u64 __opaque[2]; 2831 } __aligned(8); 2832 2833 struct bpf_iter_bits_kern { 2834 union { 2835 unsigned long *bits; 2836 unsigned long bits_copy; 2837 }; 2838 u32 nr_bits; 2839 int bit; 2840 } __aligned(8); 2841 2842 /** 2843 * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area 2844 * @it: The new bpf_iter_bits to be created 2845 * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over 2846 * @nr_words: The size of the specified memory area, measured in 8-byte units. 2847 * Due to the limitation of memalloc, it can't be greater than 512. 2848 * 2849 * This function initializes a new bpf_iter_bits structure for iterating over 2850 * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It 2851 * copies the data of the memory area to the newly created bpf_iter_bits @it for 2852 * subsequent iteration operations. 2853 * 2854 * On success, 0 is returned. On failure, ERR is returned. 2855 */ 2856 __bpf_kfunc int 2857 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) 2858 { 2859 struct bpf_iter_bits_kern *kit = (void *)it; 2860 u32 nr_bytes = nr_words * sizeof(u64); 2861 u32 nr_bits = BYTES_TO_BITS(nr_bytes); 2862 int err; 2863 2864 BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits)); 2865 BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) != 2866 __alignof__(struct bpf_iter_bits)); 2867 2868 kit->nr_bits = 0; 2869 kit->bits_copy = 0; 2870 kit->bit = -1; 2871 2872 if (!unsafe_ptr__ign || !nr_words) 2873 return -EINVAL; 2874 2875 /* Optimization for u64 mask */ 2876 if (nr_bits == 64) { 2877 err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); 2878 if (err) 2879 return -EFAULT; 2880 2881 kit->nr_bits = nr_bits; 2882 return 0; 2883 } 2884 2885 /* Fallback to memalloc */ 2886 kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); 2887 if (!kit->bits) 2888 return -ENOMEM; 2889 2890 err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); 2891 if (err) { 2892 bpf_mem_free(&bpf_global_ma, kit->bits); 2893 return err; 2894 } 2895 2896 kit->nr_bits = nr_bits; 2897 return 0; 2898 } 2899 2900 /** 2901 * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits 2902 * @it: The bpf_iter_bits to be checked 2903 * 2904 * This function returns a pointer to a number representing the value of the 2905 * next bit in the bits. 2906 * 2907 * If there are no further bits available, it returns NULL. 2908 */ 2909 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it) 2910 { 2911 struct bpf_iter_bits_kern *kit = (void *)it; 2912 u32 nr_bits = kit->nr_bits; 2913 const unsigned long *bits; 2914 int bit; 2915 2916 if (nr_bits == 0) 2917 return NULL; 2918 2919 bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; 2920 bit = find_next_bit(bits, nr_bits, kit->bit + 1); 2921 if (bit >= nr_bits) { 2922 kit->nr_bits = 0; 2923 return NULL; 2924 } 2925 2926 kit->bit = bit; 2927 return &kit->bit; 2928 } 2929 2930 /** 2931 * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits 2932 * @it: The bpf_iter_bits to be destroyed 2933 * 2934 * Destroy the resource associated with the bpf_iter_bits. 2935 */ 2936 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it) 2937 { 2938 struct bpf_iter_bits_kern *kit = (void *)it; 2939 2940 if (kit->nr_bits <= 64) 2941 return; 2942 bpf_mem_free(&bpf_global_ma, kit->bits); 2943 } 2944 2945 __bpf_kfunc_end_defs(); 2946 2947 BTF_KFUNCS_START(generic_btf_ids) 2948 #ifdef CONFIG_CRASH_DUMP 2949 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) 2950 #endif 2951 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 2952 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) 2953 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) 2954 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE) 2955 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU) 2956 BTF_ID_FLAGS(func, bpf_list_push_front_impl) 2957 BTF_ID_FLAGS(func, bpf_list_push_back_impl) 2958 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) 2959 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) 2960 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 2961 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) 2962 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) 2963 BTF_ID_FLAGS(func, bpf_rbtree_add_impl) 2964 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) 2965 2966 #ifdef CONFIG_CGROUPS 2967 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 2968 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) 2969 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 2970 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) 2971 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) 2972 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL) 2973 #endif 2974 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) 2975 BTF_ID_FLAGS(func, bpf_throw) 2976 BTF_KFUNCS_END(generic_btf_ids) 2977 2978 static const struct btf_kfunc_id_set generic_kfunc_set = { 2979 .owner = THIS_MODULE, 2980 .set = &generic_btf_ids, 2981 }; 2982 2983 2984 BTF_ID_LIST(generic_dtor_ids) 2985 BTF_ID(struct, task_struct) 2986 BTF_ID(func, bpf_task_release_dtor) 2987 #ifdef CONFIG_CGROUPS 2988 BTF_ID(struct, cgroup) 2989 BTF_ID(func, bpf_cgroup_release_dtor) 2990 #endif 2991 2992 BTF_KFUNCS_START(common_btf_ids) 2993 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) 2994 BTF_ID_FLAGS(func, bpf_rdonly_cast) 2995 BTF_ID_FLAGS(func, bpf_rcu_read_lock) 2996 BTF_ID_FLAGS(func, bpf_rcu_read_unlock) 2997 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) 2998 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) 2999 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) 3000 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) 3001 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) 3002 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) 3003 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) 3004 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) 3005 #ifdef CONFIG_CGROUPS 3006 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) 3007 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) 3008 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) 3009 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3010 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) 3011 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) 3012 #endif 3013 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) 3014 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) 3015 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) 3016 BTF_ID_FLAGS(func, bpf_dynptr_adjust) 3017 BTF_ID_FLAGS(func, bpf_dynptr_is_null) 3018 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) 3019 BTF_ID_FLAGS(func, bpf_dynptr_size) 3020 BTF_ID_FLAGS(func, bpf_dynptr_clone) 3021 BTF_ID_FLAGS(func, bpf_modify_return_test_tp) 3022 BTF_ID_FLAGS(func, bpf_wq_init) 3023 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) 3024 BTF_ID_FLAGS(func, bpf_wq_start) 3025 BTF_ID_FLAGS(func, bpf_preempt_disable) 3026 BTF_ID_FLAGS(func, bpf_preempt_enable) 3027 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) 3028 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) 3029 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) 3030 BTF_KFUNCS_END(common_btf_ids) 3031 3032 static const struct btf_kfunc_id_set common_kfunc_set = { 3033 .owner = THIS_MODULE, 3034 .set = &common_btf_ids, 3035 }; 3036 3037 static int __init kfunc_init(void) 3038 { 3039 int ret; 3040 const struct btf_id_dtor_kfunc generic_dtors[] = { 3041 { 3042 .btf_id = generic_dtor_ids[0], 3043 .kfunc_btf_id = generic_dtor_ids[1] 3044 }, 3045 #ifdef CONFIG_CGROUPS 3046 { 3047 .btf_id = generic_dtor_ids[2], 3048 .kfunc_btf_id = generic_dtor_ids[3] 3049 }, 3050 #endif 3051 }; 3052 3053 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set); 3054 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set); 3055 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set); 3056 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); 3057 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set); 3058 ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, 3059 ARRAY_SIZE(generic_dtors), 3060 THIS_MODULE); 3061 return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); 3062 } 3063 3064 late_initcall(kfunc_init); 3065 3066 /* Get a pointer to dynptr data up to len bytes for read only access. If 3067 * the dynptr doesn't have continuous data up to len bytes, return NULL. 3068 */ 3069 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) 3070 { 3071 const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr; 3072 3073 return bpf_dynptr_slice(p, 0, NULL, len); 3074 } 3075 3076 /* Get a pointer to dynptr data up to len bytes for read write access. If 3077 * the dynptr doesn't have continuous data up to len bytes, or the dynptr 3078 * is read only, return NULL. 3079 */ 3080 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len) 3081 { 3082 if (__bpf_dynptr_is_rdonly(ptr)) 3083 return NULL; 3084 return (void *)__bpf_dynptr_data(ptr, len); 3085 } 3086
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.