1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #include <linux/bpf.h> 5 #include <linux/bpf-cgroup.h> 6 #include <linux/bpf_trace.h> 7 #include <linux/bpf_lirc.h> 8 #include <linux/bpf_verifier.h> 9 #include <linux/bsearch.h> 10 #include <linux/btf.h> 11 #include <linux/syscalls.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 #include <linux/vmalloc.h> 15 #include <linux/mmzone.h> 16 #include <linux/anon_inodes.h> 17 #include <linux/fdtable.h> 18 #include <linux/file.h> 19 #include <linux/fs.h> 20 #include <linux/license.h> 21 #include <linux/filter.h> 22 #include <linux/kernel.h> 23 #include <linux/idr.h> 24 #include <linux/cred.h> 25 #include <linux/timekeeping.h> 26 #include <linux/ctype.h> 27 #include <linux/nospec.h> 28 #include <linux/audit.h> 29 #include <uapi/linux/btf.h> 30 #include <linux/pgtable.h> 31 #include <linux/bpf_lsm.h> 32 #include <linux/poll.h> 33 #include <linux/sort.h> 34 #include <linux/bpf-netns.h> 35 #include <linux/rcupdate_trace.h> 36 #include <linux/memcontrol.h> 37 #include <linux/trace_events.h> 38 39 #include <net/netfilter/nf_bpf_link.h> 40 #include <net/netkit.h> 41 #include <net/tcx.h> 42 43 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ 44 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ 45 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 46 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) 47 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) 48 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ 49 IS_FD_HASH(map)) 50 51 #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) 52 53 DEFINE_PER_CPU(int, bpf_prog_active); 54 static DEFINE_IDR(prog_idr); 55 static DEFINE_SPINLOCK(prog_idr_lock); 56 static DEFINE_IDR(map_idr); 57 static DEFINE_SPINLOCK(map_idr_lock); 58 static DEFINE_IDR(link_idr); 59 static DEFINE_SPINLOCK(link_idr_lock); 60 61 int sysctl_unprivileged_bpf_disabled __read_mostly = 62 IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; 63 64 static const struct bpf_map_ops * const bpf_map_types[] = { 65 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 66 #define BPF_MAP_TYPE(_id, _ops) \ 67 [_id] = &_ops, 68 #define BPF_LINK_TYPE(_id, _name) 69 #include <linux/bpf_types.h> 70 #undef BPF_PROG_TYPE 71 #undef BPF_MAP_TYPE 72 #undef BPF_LINK_TYPE 73 }; 74 75 /* 76 * If we're handed a bigger struct than we know of, ensure all the unknown bits 77 * are 0 - i.e. new user-space does not rely on any kernel feature extensions 78 * we don't know about yet. 79 * 80 * There is a ToCToU between this function call and the following 81 * copy_from_user() call. However, this is not a concern since this function is 82 * meant to be a future-proofing of bits. 83 */ 84 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, 85 size_t expected_size, 86 size_t actual_size) 87 { 88 int res; 89 90 if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ 91 return -E2BIG; 92 93 if (actual_size <= expected_size) 94 return 0; 95 96 if (uaddr.is_kernel) 97 res = memchr_inv(uaddr.kernel + expected_size, 0, 98 actual_size - expected_size) == NULL; 99 else 100 res = check_zeroed_user(uaddr.user + expected_size, 101 actual_size - expected_size); 102 if (res < 0) 103 return res; 104 return res ? 0 : -E2BIG; 105 } 106 107 const struct bpf_map_ops bpf_map_offload_ops = { 108 .map_meta_equal = bpf_map_meta_equal, 109 .map_alloc = bpf_map_offload_map_alloc, 110 .map_free = bpf_map_offload_map_free, 111 .map_check_btf = map_check_no_btf, 112 .map_mem_usage = bpf_map_offload_map_mem_usage, 113 }; 114 115 static void bpf_map_write_active_inc(struct bpf_map *map) 116 { 117 atomic64_inc(&map->writecnt); 118 } 119 120 static void bpf_map_write_active_dec(struct bpf_map *map) 121 { 122 atomic64_dec(&map->writecnt); 123 } 124 125 bool bpf_map_write_active(const struct bpf_map *map) 126 { 127 return atomic64_read(&map->writecnt) != 0; 128 } 129 130 static u32 bpf_map_value_size(const struct bpf_map *map) 131 { 132 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 133 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || 134 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || 135 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 136 return round_up(map->value_size, 8) * num_possible_cpus(); 137 else if (IS_FD_MAP(map)) 138 return sizeof(u32); 139 else 140 return map->value_size; 141 } 142 143 static void maybe_wait_bpf_programs(struct bpf_map *map) 144 { 145 /* Wait for any running non-sleepable BPF programs to complete so that 146 * userspace, when we return to it, knows that all non-sleepable 147 * programs that could be running use the new map value. For sleepable 148 * BPF programs, synchronize_rcu_tasks_trace() should be used to wait 149 * for the completions of these programs, but considering the waiting 150 * time can be very long and userspace may think it will hang forever, 151 * so don't handle sleepable BPF programs now. 152 */ 153 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || 154 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) 155 synchronize_rcu(); 156 } 157 158 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 159 void *key, void *value, __u64 flags) 160 { 161 int err; 162 163 /* Need to create a kthread, thus must support schedule */ 164 if (bpf_map_is_offloaded(map)) { 165 return bpf_map_offload_update_elem(map, key, value, flags); 166 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || 167 map->map_type == BPF_MAP_TYPE_ARENA || 168 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 169 return map->ops->map_update_elem(map, key, value, flags); 170 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || 171 map->map_type == BPF_MAP_TYPE_SOCKMAP) { 172 return sock_map_update_elem_sys(map, key, value, flags); 173 } else if (IS_FD_PROG_ARRAY(map)) { 174 return bpf_fd_array_map_update_elem(map, map_file, key, value, 175 flags); 176 } 177 178 bpf_disable_instrumentation(); 179 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 180 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 181 err = bpf_percpu_hash_update(map, key, value, flags); 182 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 183 err = bpf_percpu_array_update(map, key, value, flags); 184 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 185 err = bpf_percpu_cgroup_storage_update(map, key, value, 186 flags); 187 } else if (IS_FD_ARRAY(map)) { 188 err = bpf_fd_array_map_update_elem(map, map_file, key, value, 189 flags); 190 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { 191 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, 192 flags); 193 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 194 /* rcu_read_lock() is not needed */ 195 err = bpf_fd_reuseport_array_update_elem(map, key, value, 196 flags); 197 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 198 map->map_type == BPF_MAP_TYPE_STACK || 199 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 200 err = map->ops->map_push_elem(map, value, flags); 201 } else { 202 rcu_read_lock(); 203 err = map->ops->map_update_elem(map, key, value, flags); 204 rcu_read_unlock(); 205 } 206 bpf_enable_instrumentation(); 207 208 return err; 209 } 210 211 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, 212 __u64 flags) 213 { 214 void *ptr; 215 int err; 216 217 if (bpf_map_is_offloaded(map)) 218 return bpf_map_offload_lookup_elem(map, key, value); 219 220 bpf_disable_instrumentation(); 221 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 222 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 223 err = bpf_percpu_hash_copy(map, key, value); 224 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 225 err = bpf_percpu_array_copy(map, key, value); 226 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { 227 err = bpf_percpu_cgroup_storage_copy(map, key, value); 228 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 229 err = bpf_stackmap_copy(map, key, value); 230 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { 231 err = bpf_fd_array_map_lookup_elem(map, key, value); 232 } else if (IS_FD_HASH(map)) { 233 err = bpf_fd_htab_map_lookup_elem(map, key, value); 234 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { 235 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); 236 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || 237 map->map_type == BPF_MAP_TYPE_STACK || 238 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 239 err = map->ops->map_peek_elem(map, value); 240 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 241 /* struct_ops map requires directly updating "value" */ 242 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); 243 } else { 244 rcu_read_lock(); 245 if (map->ops->map_lookup_elem_sys_only) 246 ptr = map->ops->map_lookup_elem_sys_only(map, key); 247 else 248 ptr = map->ops->map_lookup_elem(map, key); 249 if (IS_ERR(ptr)) { 250 err = PTR_ERR(ptr); 251 } else if (!ptr) { 252 err = -ENOENT; 253 } else { 254 err = 0; 255 if (flags & BPF_F_LOCK) 256 /* lock 'ptr' and copy everything but lock */ 257 copy_map_value_locked(map, value, ptr, true); 258 else 259 copy_map_value(map, value, ptr); 260 /* mask lock and timer, since value wasn't zero inited */ 261 check_and_init_map_value(map, value); 262 } 263 rcu_read_unlock(); 264 } 265 266 bpf_enable_instrumentation(); 267 268 return err; 269 } 270 271 /* Please, do not use this function outside from the map creation path 272 * (e.g. in map update path) without taking care of setting the active 273 * memory cgroup (see at bpf_map_kmalloc_node() for example). 274 */ 275 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) 276 { 277 /* We really just want to fail instead of triggering OOM killer 278 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 279 * which is used for lower order allocation requests. 280 * 281 * It has been observed that higher order allocation requests done by 282 * vmalloc with __GFP_NORETRY being set might fail due to not trying 283 * to reclaim memory from the page cache, thus we set 284 * __GFP_RETRY_MAYFAIL to avoid such situations. 285 */ 286 287 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); 288 unsigned int flags = 0; 289 unsigned long align = 1; 290 void *area; 291 292 if (size >= SIZE_MAX) 293 return NULL; 294 295 /* kmalloc()'ed memory can't be mmap()'ed */ 296 if (mmapable) { 297 BUG_ON(!PAGE_ALIGNED(size)); 298 align = SHMLBA; 299 flags = VM_USERMAP; 300 } else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 301 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, 302 numa_node); 303 if (area != NULL) 304 return area; 305 } 306 307 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 308 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, 309 flags, numa_node, __builtin_return_address(0)); 310 } 311 312 void *bpf_map_area_alloc(u64 size, int numa_node) 313 { 314 return __bpf_map_area_alloc(size, numa_node, false); 315 } 316 317 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) 318 { 319 return __bpf_map_area_alloc(size, numa_node, true); 320 } 321 322 void bpf_map_area_free(void *area) 323 { 324 kvfree(area); 325 } 326 327 static u32 bpf_map_flags_retain_permanent(u32 flags) 328 { 329 /* Some map creation flags are not tied to the map object but 330 * rather to the map fd instead, so they have no meaning upon 331 * map object inspection since multiple file descriptors with 332 * different (access) properties can exist here. Thus, given 333 * this has zero meaning for the map itself, lets clear these 334 * from here. 335 */ 336 return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); 337 } 338 339 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) 340 { 341 map->map_type = attr->map_type; 342 map->key_size = attr->key_size; 343 map->value_size = attr->value_size; 344 map->max_entries = attr->max_entries; 345 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); 346 map->numa_node = bpf_map_attr_numa_node(attr); 347 map->map_extra = attr->map_extra; 348 } 349 350 static int bpf_map_alloc_id(struct bpf_map *map) 351 { 352 int id; 353 354 idr_preload(GFP_KERNEL); 355 spin_lock_bh(&map_idr_lock); 356 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); 357 if (id > 0) 358 map->id = id; 359 spin_unlock_bh(&map_idr_lock); 360 idr_preload_end(); 361 362 if (WARN_ON_ONCE(!id)) 363 return -ENOSPC; 364 365 return id > 0 ? 0 : id; 366 } 367 368 void bpf_map_free_id(struct bpf_map *map) 369 { 370 unsigned long flags; 371 372 /* Offloaded maps are removed from the IDR store when their device 373 * disappears - even if someone holds an fd to them they are unusable, 374 * the memory is gone, all ops will fail; they are simply waiting for 375 * refcnt to drop to be freed. 376 */ 377 if (!map->id) 378 return; 379 380 spin_lock_irqsave(&map_idr_lock, flags); 381 382 idr_remove(&map_idr, map->id); 383 map->id = 0; 384 385 spin_unlock_irqrestore(&map_idr_lock, flags); 386 } 387 388 #ifdef CONFIG_MEMCG 389 static void bpf_map_save_memcg(struct bpf_map *map) 390 { 391 /* Currently if a map is created by a process belonging to the root 392 * memory cgroup, get_obj_cgroup_from_current() will return NULL. 393 * So we have to check map->objcg for being NULL each time it's 394 * being used. 395 */ 396 if (memcg_bpf_enabled()) 397 map->objcg = get_obj_cgroup_from_current(); 398 } 399 400 static void bpf_map_release_memcg(struct bpf_map *map) 401 { 402 if (map->objcg) 403 obj_cgroup_put(map->objcg); 404 } 405 406 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) 407 { 408 if (map->objcg) 409 return get_mem_cgroup_from_objcg(map->objcg); 410 411 return root_mem_cgroup; 412 } 413 414 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 415 int node) 416 { 417 struct mem_cgroup *memcg, *old_memcg; 418 void *ptr; 419 420 memcg = bpf_map_get_memcg(map); 421 old_memcg = set_active_memcg(memcg); 422 ptr = kmalloc_node(size, flags | __GFP_ACCOUNT, node); 423 set_active_memcg(old_memcg); 424 mem_cgroup_put(memcg); 425 426 return ptr; 427 } 428 429 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 430 { 431 struct mem_cgroup *memcg, *old_memcg; 432 void *ptr; 433 434 memcg = bpf_map_get_memcg(map); 435 old_memcg = set_active_memcg(memcg); 436 ptr = kzalloc(size, flags | __GFP_ACCOUNT); 437 set_active_memcg(old_memcg); 438 mem_cgroup_put(memcg); 439 440 return ptr; 441 } 442 443 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, 444 gfp_t flags) 445 { 446 struct mem_cgroup *memcg, *old_memcg; 447 void *ptr; 448 449 memcg = bpf_map_get_memcg(map); 450 old_memcg = set_active_memcg(memcg); 451 ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT); 452 set_active_memcg(old_memcg); 453 mem_cgroup_put(memcg); 454 455 return ptr; 456 } 457 458 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 459 size_t align, gfp_t flags) 460 { 461 struct mem_cgroup *memcg, *old_memcg; 462 void __percpu *ptr; 463 464 memcg = bpf_map_get_memcg(map); 465 old_memcg = set_active_memcg(memcg); 466 ptr = __alloc_percpu_gfp(size, align, flags | __GFP_ACCOUNT); 467 set_active_memcg(old_memcg); 468 mem_cgroup_put(memcg); 469 470 return ptr; 471 } 472 473 #else 474 static void bpf_map_save_memcg(struct bpf_map *map) 475 { 476 } 477 478 static void bpf_map_release_memcg(struct bpf_map *map) 479 { 480 } 481 #endif 482 483 int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid, 484 unsigned long nr_pages, struct page **pages) 485 { 486 unsigned long i, j; 487 struct page *pg; 488 int ret = 0; 489 #ifdef CONFIG_MEMCG 490 struct mem_cgroup *memcg, *old_memcg; 491 492 memcg = bpf_map_get_memcg(map); 493 old_memcg = set_active_memcg(memcg); 494 #endif 495 for (i = 0; i < nr_pages; i++) { 496 pg = alloc_pages_node(nid, gfp | __GFP_ACCOUNT, 0); 497 498 if (pg) { 499 pages[i] = pg; 500 continue; 501 } 502 for (j = 0; j < i; j++) 503 __free_page(pages[j]); 504 ret = -ENOMEM; 505 break; 506 } 507 508 #ifdef CONFIG_MEMCG 509 set_active_memcg(old_memcg); 510 mem_cgroup_put(memcg); 511 #endif 512 return ret; 513 } 514 515 516 static int btf_field_cmp(const void *a, const void *b) 517 { 518 const struct btf_field *f1 = a, *f2 = b; 519 520 if (f1->offset < f2->offset) 521 return -1; 522 else if (f1->offset > f2->offset) 523 return 1; 524 return 0; 525 } 526 527 struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, 528 u32 field_mask) 529 { 530 struct btf_field *field; 531 532 if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask)) 533 return NULL; 534 field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp); 535 if (!field || !(field->type & field_mask)) 536 return NULL; 537 return field; 538 } 539 540 void btf_record_free(struct btf_record *rec) 541 { 542 int i; 543 544 if (IS_ERR_OR_NULL(rec)) 545 return; 546 for (i = 0; i < rec->cnt; i++) { 547 switch (rec->fields[i].type) { 548 case BPF_KPTR_UNREF: 549 case BPF_KPTR_REF: 550 case BPF_KPTR_PERCPU: 551 if (rec->fields[i].kptr.module) 552 module_put(rec->fields[i].kptr.module); 553 btf_put(rec->fields[i].kptr.btf); 554 break; 555 case BPF_LIST_HEAD: 556 case BPF_LIST_NODE: 557 case BPF_RB_ROOT: 558 case BPF_RB_NODE: 559 case BPF_SPIN_LOCK: 560 case BPF_TIMER: 561 case BPF_REFCOUNT: 562 case BPF_WORKQUEUE: 563 /* Nothing to release */ 564 break; 565 default: 566 WARN_ON_ONCE(1); 567 continue; 568 } 569 } 570 kfree(rec); 571 } 572 573 void bpf_map_free_record(struct bpf_map *map) 574 { 575 btf_record_free(map->record); 576 map->record = NULL; 577 } 578 579 struct btf_record *btf_record_dup(const struct btf_record *rec) 580 { 581 const struct btf_field *fields; 582 struct btf_record *new_rec; 583 int ret, size, i; 584 585 if (IS_ERR_OR_NULL(rec)) 586 return NULL; 587 size = offsetof(struct btf_record, fields[rec->cnt]); 588 new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); 589 if (!new_rec) 590 return ERR_PTR(-ENOMEM); 591 /* Do a deep copy of the btf_record */ 592 fields = rec->fields; 593 new_rec->cnt = 0; 594 for (i = 0; i < rec->cnt; i++) { 595 switch (fields[i].type) { 596 case BPF_KPTR_UNREF: 597 case BPF_KPTR_REF: 598 case BPF_KPTR_PERCPU: 599 btf_get(fields[i].kptr.btf); 600 if (fields[i].kptr.module && !try_module_get(fields[i].kptr.module)) { 601 ret = -ENXIO; 602 goto free; 603 } 604 break; 605 case BPF_LIST_HEAD: 606 case BPF_LIST_NODE: 607 case BPF_RB_ROOT: 608 case BPF_RB_NODE: 609 case BPF_SPIN_LOCK: 610 case BPF_TIMER: 611 case BPF_REFCOUNT: 612 case BPF_WORKQUEUE: 613 /* Nothing to acquire */ 614 break; 615 default: 616 ret = -EFAULT; 617 WARN_ON_ONCE(1); 618 goto free; 619 } 620 new_rec->cnt++; 621 } 622 return new_rec; 623 free: 624 btf_record_free(new_rec); 625 return ERR_PTR(ret); 626 } 627 628 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b) 629 { 630 bool a_has_fields = !IS_ERR_OR_NULL(rec_a), b_has_fields = !IS_ERR_OR_NULL(rec_b); 631 int size; 632 633 if (!a_has_fields && !b_has_fields) 634 return true; 635 if (a_has_fields != b_has_fields) 636 return false; 637 if (rec_a->cnt != rec_b->cnt) 638 return false; 639 size = offsetof(struct btf_record, fields[rec_a->cnt]); 640 /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused 641 * members are zeroed out. So memcmp is safe to do without worrying 642 * about padding/unused fields. 643 * 644 * While spin_lock, timer, and kptr have no relation to map BTF, 645 * list_head metadata is specific to map BTF, the btf and value_rec 646 * members in particular. btf is the map BTF, while value_rec points to 647 * btf_record in that map BTF. 648 * 649 * So while by default, we don't rely on the map BTF (which the records 650 * were parsed from) matching for both records, which is not backwards 651 * compatible, in case list_head is part of it, we implicitly rely on 652 * that by way of depending on memcmp succeeding for it. 653 */ 654 return !memcmp(rec_a, rec_b, size); 655 } 656 657 void bpf_obj_free_timer(const struct btf_record *rec, void *obj) 658 { 659 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_TIMER))) 660 return; 661 bpf_timer_cancel_and_free(obj + rec->timer_off); 662 } 663 664 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj) 665 { 666 if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE))) 667 return; 668 bpf_wq_cancel_and_free(obj + rec->wq_off); 669 } 670 671 void bpf_obj_free_fields(const struct btf_record *rec, void *obj) 672 { 673 const struct btf_field *fields; 674 int i; 675 676 if (IS_ERR_OR_NULL(rec)) 677 return; 678 fields = rec->fields; 679 for (i = 0; i < rec->cnt; i++) { 680 struct btf_struct_meta *pointee_struct_meta; 681 const struct btf_field *field = &fields[i]; 682 void *field_ptr = obj + field->offset; 683 void *xchgd_field; 684 685 switch (fields[i].type) { 686 case BPF_SPIN_LOCK: 687 break; 688 case BPF_TIMER: 689 bpf_timer_cancel_and_free(field_ptr); 690 break; 691 case BPF_WORKQUEUE: 692 bpf_wq_cancel_and_free(field_ptr); 693 break; 694 case BPF_KPTR_UNREF: 695 WRITE_ONCE(*(u64 *)field_ptr, 0); 696 break; 697 case BPF_KPTR_REF: 698 case BPF_KPTR_PERCPU: 699 xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); 700 if (!xchgd_field) 701 break; 702 703 if (!btf_is_kernel(field->kptr.btf)) { 704 pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, 705 field->kptr.btf_id); 706 migrate_disable(); 707 __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? 708 pointee_struct_meta->record : NULL, 709 fields[i].type == BPF_KPTR_PERCPU); 710 migrate_enable(); 711 } else { 712 field->kptr.dtor(xchgd_field); 713 } 714 break; 715 case BPF_LIST_HEAD: 716 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 717 continue; 718 bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); 719 break; 720 case BPF_RB_ROOT: 721 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 722 continue; 723 bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); 724 break; 725 case BPF_LIST_NODE: 726 case BPF_RB_NODE: 727 case BPF_REFCOUNT: 728 break; 729 default: 730 WARN_ON_ONCE(1); 731 continue; 732 } 733 } 734 } 735 736 static void bpf_map_free(struct bpf_map *map) 737 { 738 struct btf_record *rec = map->record; 739 struct btf *btf = map->btf; 740 741 /* implementation dependent freeing */ 742 map->ops->map_free(map); 743 /* Delay freeing of btf_record for maps, as map_free 744 * callback usually needs access to them. It is better to do it here 745 * than require each callback to do the free itself manually. 746 * 747 * Note that the btf_record stashed in map->inner_map_meta->record was 748 * already freed using the map_free callback for map in map case which 749 * eventually calls bpf_map_free_meta, since inner_map_meta is only a 750 * template bpf_map struct used during verification. 751 */ 752 btf_record_free(rec); 753 /* Delay freeing of btf for maps, as map_free callback may need 754 * struct_meta info which will be freed with btf_put(). 755 */ 756 btf_put(btf); 757 } 758 759 /* called from workqueue */ 760 static void bpf_map_free_deferred(struct work_struct *work) 761 { 762 struct bpf_map *map = container_of(work, struct bpf_map, work); 763 764 security_bpf_map_free(map); 765 bpf_map_release_memcg(map); 766 bpf_map_free(map); 767 } 768 769 static void bpf_map_put_uref(struct bpf_map *map) 770 { 771 if (atomic64_dec_and_test(&map->usercnt)) { 772 if (map->ops->map_release_uref) 773 map->ops->map_release_uref(map); 774 } 775 } 776 777 static void bpf_map_free_in_work(struct bpf_map *map) 778 { 779 INIT_WORK(&map->work, bpf_map_free_deferred); 780 /* Avoid spawning kworkers, since they all might contend 781 * for the same mutex like slab_mutex. 782 */ 783 queue_work(system_unbound_wq, &map->work); 784 } 785 786 static void bpf_map_free_rcu_gp(struct rcu_head *rcu) 787 { 788 bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); 789 } 790 791 static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) 792 { 793 if (rcu_trace_implies_rcu_gp()) 794 bpf_map_free_rcu_gp(rcu); 795 else 796 call_rcu(rcu, bpf_map_free_rcu_gp); 797 } 798 799 /* decrement map refcnt and schedule it for freeing via workqueue 800 * (underlying map implementation ops->map_free() might sleep) 801 */ 802 void bpf_map_put(struct bpf_map *map) 803 { 804 if (atomic64_dec_and_test(&map->refcnt)) { 805 /* bpf_map_free_id() must be called first */ 806 bpf_map_free_id(map); 807 808 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); 809 if (READ_ONCE(map->free_after_mult_rcu_gp)) 810 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); 811 else if (READ_ONCE(map->free_after_rcu_gp)) 812 call_rcu(&map->rcu, bpf_map_free_rcu_gp); 813 else 814 bpf_map_free_in_work(map); 815 } 816 } 817 EXPORT_SYMBOL_GPL(bpf_map_put); 818 819 void bpf_map_put_with_uref(struct bpf_map *map) 820 { 821 bpf_map_put_uref(map); 822 bpf_map_put(map); 823 } 824 825 static int bpf_map_release(struct inode *inode, struct file *filp) 826 { 827 struct bpf_map *map = filp->private_data; 828 829 if (map->ops->map_release) 830 map->ops->map_release(map, filp); 831 832 bpf_map_put_with_uref(map); 833 return 0; 834 } 835 836 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) 837 { 838 fmode_t mode = f.file->f_mode; 839 840 /* Our file permissions may have been overridden by global 841 * map permissions facing syscall side. 842 */ 843 if (READ_ONCE(map->frozen)) 844 mode &= ~FMODE_CAN_WRITE; 845 return mode; 846 } 847 848 #ifdef CONFIG_PROC_FS 849 /* Show the memory usage of a bpf map */ 850 static u64 bpf_map_memory_usage(const struct bpf_map *map) 851 { 852 return map->ops->map_mem_usage(map); 853 } 854 855 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) 856 { 857 struct bpf_map *map = filp->private_data; 858 u32 type = 0, jited = 0; 859 860 if (map_type_contains_progs(map)) { 861 spin_lock(&map->owner.lock); 862 type = map->owner.type; 863 jited = map->owner.jited; 864 spin_unlock(&map->owner.lock); 865 } 866 867 seq_printf(m, 868 "map_type:\t%u\n" 869 "key_size:\t%u\n" 870 "value_size:\t%u\n" 871 "max_entries:\t%u\n" 872 "map_flags:\t%#x\n" 873 "map_extra:\t%#llx\n" 874 "memlock:\t%llu\n" 875 "map_id:\t%u\n" 876 "frozen:\t%u\n", 877 map->map_type, 878 map->key_size, 879 map->value_size, 880 map->max_entries, 881 map->map_flags, 882 (unsigned long long)map->map_extra, 883 bpf_map_memory_usage(map), 884 map->id, 885 READ_ONCE(map->frozen)); 886 if (type) { 887 seq_printf(m, "owner_prog_type:\t%u\n", type); 888 seq_printf(m, "owner_jited:\t%u\n", jited); 889 } 890 } 891 #endif 892 893 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, 894 loff_t *ppos) 895 { 896 /* We need this handler such that alloc_file() enables 897 * f_mode with FMODE_CAN_READ. 898 */ 899 return -EINVAL; 900 } 901 902 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, 903 size_t siz, loff_t *ppos) 904 { 905 /* We need this handler such that alloc_file() enables 906 * f_mode with FMODE_CAN_WRITE. 907 */ 908 return -EINVAL; 909 } 910 911 /* called for any extra memory-mapped regions (except initial) */ 912 static void bpf_map_mmap_open(struct vm_area_struct *vma) 913 { 914 struct bpf_map *map = vma->vm_file->private_data; 915 916 if (vma->vm_flags & VM_MAYWRITE) 917 bpf_map_write_active_inc(map); 918 } 919 920 /* called for all unmapped memory region (including initial) */ 921 static void bpf_map_mmap_close(struct vm_area_struct *vma) 922 { 923 struct bpf_map *map = vma->vm_file->private_data; 924 925 if (vma->vm_flags & VM_MAYWRITE) 926 bpf_map_write_active_dec(map); 927 } 928 929 static const struct vm_operations_struct bpf_map_default_vmops = { 930 .open = bpf_map_mmap_open, 931 .close = bpf_map_mmap_close, 932 }; 933 934 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) 935 { 936 struct bpf_map *map = filp->private_data; 937 int err; 938 939 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) 940 return -ENOTSUPP; 941 942 if (!(vma->vm_flags & VM_SHARED)) 943 return -EINVAL; 944 945 mutex_lock(&map->freeze_mutex); 946 947 if (vma->vm_flags & VM_WRITE) { 948 if (map->frozen) { 949 err = -EPERM; 950 goto out; 951 } 952 /* map is meant to be read-only, so do not allow mapping as 953 * writable, because it's possible to leak a writable page 954 * reference and allows user-space to still modify it after 955 * freezing, while verifier will assume contents do not change 956 */ 957 if (map->map_flags & BPF_F_RDONLY_PROG) { 958 err = -EACCES; 959 goto out; 960 } 961 } 962 963 /* set default open/close callbacks */ 964 vma->vm_ops = &bpf_map_default_vmops; 965 vma->vm_private_data = map; 966 vm_flags_clear(vma, VM_MAYEXEC); 967 if (!(vma->vm_flags & VM_WRITE)) 968 /* disallow re-mapping with PROT_WRITE */ 969 vm_flags_clear(vma, VM_MAYWRITE); 970 971 err = map->ops->map_mmap(map, vma); 972 if (err) 973 goto out; 974 975 if (vma->vm_flags & VM_MAYWRITE) 976 bpf_map_write_active_inc(map); 977 out: 978 mutex_unlock(&map->freeze_mutex); 979 return err; 980 } 981 982 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts) 983 { 984 struct bpf_map *map = filp->private_data; 985 986 if (map->ops->map_poll) 987 return map->ops->map_poll(map, filp, pts); 988 989 return EPOLLERR; 990 } 991 992 static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr, 993 unsigned long len, unsigned long pgoff, 994 unsigned long flags) 995 { 996 struct bpf_map *map = filp->private_data; 997 998 if (map->ops->map_get_unmapped_area) 999 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); 1000 #ifdef CONFIG_MMU 1001 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); 1002 #else 1003 return addr; 1004 #endif 1005 } 1006 1007 const struct file_operations bpf_map_fops = { 1008 #ifdef CONFIG_PROC_FS 1009 .show_fdinfo = bpf_map_show_fdinfo, 1010 #endif 1011 .release = bpf_map_release, 1012 .read = bpf_dummy_read, 1013 .write = bpf_dummy_write, 1014 .mmap = bpf_map_mmap, 1015 .poll = bpf_map_poll, 1016 .get_unmapped_area = bpf_get_unmapped_area, 1017 }; 1018 1019 int bpf_map_new_fd(struct bpf_map *map, int flags) 1020 { 1021 int ret; 1022 1023 ret = security_bpf_map(map, OPEN_FMODE(flags)); 1024 if (ret < 0) 1025 return ret; 1026 1027 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, 1028 flags | O_CLOEXEC); 1029 } 1030 1031 int bpf_get_file_flag(int flags) 1032 { 1033 if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) 1034 return -EINVAL; 1035 if (flags & BPF_F_RDONLY) 1036 return O_RDONLY; 1037 if (flags & BPF_F_WRONLY) 1038 return O_WRONLY; 1039 return O_RDWR; 1040 } 1041 1042 /* helper macro to check that unused fields 'union bpf_attr' are zero */ 1043 #define CHECK_ATTR(CMD) \ 1044 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \ 1045 sizeof(attr->CMD##_LAST_FIELD), 0, \ 1046 sizeof(*attr) - \ 1047 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 1048 sizeof(attr->CMD##_LAST_FIELD)) != NULL 1049 1050 /* dst and src must have at least "size" number of bytes. 1051 * Return strlen on success and < 0 on error. 1052 */ 1053 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 1054 { 1055 const char *end = src + size; 1056 const char *orig_src = src; 1057 1058 memset(dst, 0, size); 1059 /* Copy all isalnum(), '_' and '.' chars. */ 1060 while (src < end && *src) { 1061 if (!isalnum(*src) && 1062 *src != '_' && *src != '.') 1063 return -EINVAL; 1064 *dst++ = *src++; 1065 } 1066 1067 /* No '\0' found in "size" number of bytes */ 1068 if (src == end) 1069 return -EINVAL; 1070 1071 return src - orig_src; 1072 } 1073 1074 int map_check_no_btf(const struct bpf_map *map, 1075 const struct btf *btf, 1076 const struct btf_type *key_type, 1077 const struct btf_type *value_type) 1078 { 1079 return -ENOTSUPP; 1080 } 1081 1082 static int map_check_btf(struct bpf_map *map, struct bpf_token *token, 1083 const struct btf *btf, u32 btf_key_id, u32 btf_value_id) 1084 { 1085 const struct btf_type *key_type, *value_type; 1086 u32 key_size, value_size; 1087 int ret = 0; 1088 1089 /* Some maps allow key to be unspecified. */ 1090 if (btf_key_id) { 1091 key_type = btf_type_id_size(btf, &btf_key_id, &key_size); 1092 if (!key_type || key_size != map->key_size) 1093 return -EINVAL; 1094 } else { 1095 key_type = btf_type_by_id(btf, 0); 1096 if (!map->ops->map_check_btf) 1097 return -EINVAL; 1098 } 1099 1100 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 1101 if (!value_type || value_size != map->value_size) 1102 return -EINVAL; 1103 1104 map->record = btf_parse_fields(btf, value_type, 1105 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1106 BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE, 1107 map->value_size); 1108 if (!IS_ERR_OR_NULL(map->record)) { 1109 int i; 1110 1111 if (!bpf_token_capable(token, CAP_BPF)) { 1112 ret = -EPERM; 1113 goto free_map_tab; 1114 } 1115 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { 1116 ret = -EACCES; 1117 goto free_map_tab; 1118 } 1119 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { 1120 switch (map->record->field_mask & (1 << i)) { 1121 case 0: 1122 continue; 1123 case BPF_SPIN_LOCK: 1124 if (map->map_type != BPF_MAP_TYPE_HASH && 1125 map->map_type != BPF_MAP_TYPE_ARRAY && 1126 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 1127 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1128 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1129 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1130 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1131 ret = -EOPNOTSUPP; 1132 goto free_map_tab; 1133 } 1134 break; 1135 case BPF_TIMER: 1136 case BPF_WORKQUEUE: 1137 if (map->map_type != BPF_MAP_TYPE_HASH && 1138 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1139 map->map_type != BPF_MAP_TYPE_ARRAY) { 1140 ret = -EOPNOTSUPP; 1141 goto free_map_tab; 1142 } 1143 break; 1144 case BPF_KPTR_UNREF: 1145 case BPF_KPTR_REF: 1146 case BPF_KPTR_PERCPU: 1147 case BPF_REFCOUNT: 1148 if (map->map_type != BPF_MAP_TYPE_HASH && 1149 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 1150 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1151 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && 1152 map->map_type != BPF_MAP_TYPE_ARRAY && 1153 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 1154 map->map_type != BPF_MAP_TYPE_SK_STORAGE && 1155 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1156 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1157 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1158 ret = -EOPNOTSUPP; 1159 goto free_map_tab; 1160 } 1161 break; 1162 case BPF_LIST_HEAD: 1163 case BPF_RB_ROOT: 1164 if (map->map_type != BPF_MAP_TYPE_HASH && 1165 map->map_type != BPF_MAP_TYPE_LRU_HASH && 1166 map->map_type != BPF_MAP_TYPE_ARRAY) { 1167 ret = -EOPNOTSUPP; 1168 goto free_map_tab; 1169 } 1170 break; 1171 default: 1172 /* Fail if map_type checks are missing for a field type */ 1173 ret = -EOPNOTSUPP; 1174 goto free_map_tab; 1175 } 1176 } 1177 } 1178 1179 ret = btf_check_and_fixup_fields(btf, map->record); 1180 if (ret < 0) 1181 goto free_map_tab; 1182 1183 if (map->ops->map_check_btf) { 1184 ret = map->ops->map_check_btf(map, btf, key_type, value_type); 1185 if (ret < 0) 1186 goto free_map_tab; 1187 } 1188 1189 return ret; 1190 free_map_tab: 1191 bpf_map_free_record(map); 1192 return ret; 1193 } 1194 1195 static bool bpf_net_capable(void) 1196 { 1197 return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN); 1198 } 1199 1200 #define BPF_MAP_CREATE_LAST_FIELD map_token_fd 1201 /* called via syscall */ 1202 static int map_create(union bpf_attr *attr) 1203 { 1204 const struct bpf_map_ops *ops; 1205 struct bpf_token *token = NULL; 1206 int numa_node = bpf_map_attr_numa_node(attr); 1207 u32 map_type = attr->map_type; 1208 struct bpf_map *map; 1209 bool token_flag; 1210 int f_flags; 1211 int err; 1212 1213 err = CHECK_ATTR(BPF_MAP_CREATE); 1214 if (err) 1215 return -EINVAL; 1216 1217 /* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it 1218 * to avoid per-map type checks tripping on unknown flag 1219 */ 1220 token_flag = attr->map_flags & BPF_F_TOKEN_FD; 1221 attr->map_flags &= ~BPF_F_TOKEN_FD; 1222 1223 if (attr->btf_vmlinux_value_type_id) { 1224 if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || 1225 attr->btf_key_type_id || attr->btf_value_type_id) 1226 return -EINVAL; 1227 } else if (attr->btf_key_type_id && !attr->btf_value_type_id) { 1228 return -EINVAL; 1229 } 1230 1231 if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER && 1232 attr->map_type != BPF_MAP_TYPE_ARENA && 1233 attr->map_extra != 0) 1234 return -EINVAL; 1235 1236 f_flags = bpf_get_file_flag(attr->map_flags); 1237 if (f_flags < 0) 1238 return f_flags; 1239 1240 if (numa_node != NUMA_NO_NODE && 1241 ((unsigned int)numa_node >= nr_node_ids || 1242 !node_online(numa_node))) 1243 return -EINVAL; 1244 1245 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ 1246 map_type = attr->map_type; 1247 if (map_type >= ARRAY_SIZE(bpf_map_types)) 1248 return -EINVAL; 1249 map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types)); 1250 ops = bpf_map_types[map_type]; 1251 if (!ops) 1252 return -EINVAL; 1253 1254 if (ops->map_alloc_check) { 1255 err = ops->map_alloc_check(attr); 1256 if (err) 1257 return err; 1258 } 1259 if (attr->map_ifindex) 1260 ops = &bpf_map_offload_ops; 1261 if (!ops->map_mem_usage) 1262 return -EINVAL; 1263 1264 if (token_flag) { 1265 token = bpf_token_get_from_fd(attr->map_token_fd); 1266 if (IS_ERR(token)) 1267 return PTR_ERR(token); 1268 1269 /* if current token doesn't grant map creation permissions, 1270 * then we can't use this token, so ignore it and rely on 1271 * system-wide capabilities checks 1272 */ 1273 if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) || 1274 !bpf_token_allow_map_type(token, attr->map_type)) { 1275 bpf_token_put(token); 1276 token = NULL; 1277 } 1278 } 1279 1280 err = -EPERM; 1281 1282 /* Intent here is for unprivileged_bpf_disabled to block BPF map 1283 * creation for unprivileged users; other actions depend 1284 * on fd availability and access to bpffs, so are dependent on 1285 * object creation success. Even with unprivileged BPF disabled, 1286 * capability checks are still carried out. 1287 */ 1288 if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF)) 1289 goto put_token; 1290 1291 /* check privileged map type permissions */ 1292 switch (map_type) { 1293 case BPF_MAP_TYPE_ARRAY: 1294 case BPF_MAP_TYPE_PERCPU_ARRAY: 1295 case BPF_MAP_TYPE_PROG_ARRAY: 1296 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 1297 case BPF_MAP_TYPE_CGROUP_ARRAY: 1298 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 1299 case BPF_MAP_TYPE_HASH: 1300 case BPF_MAP_TYPE_PERCPU_HASH: 1301 case BPF_MAP_TYPE_HASH_OF_MAPS: 1302 case BPF_MAP_TYPE_RINGBUF: 1303 case BPF_MAP_TYPE_USER_RINGBUF: 1304 case BPF_MAP_TYPE_CGROUP_STORAGE: 1305 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 1306 /* unprivileged */ 1307 break; 1308 case BPF_MAP_TYPE_SK_STORAGE: 1309 case BPF_MAP_TYPE_INODE_STORAGE: 1310 case BPF_MAP_TYPE_TASK_STORAGE: 1311 case BPF_MAP_TYPE_CGRP_STORAGE: 1312 case BPF_MAP_TYPE_BLOOM_FILTER: 1313 case BPF_MAP_TYPE_LPM_TRIE: 1314 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 1315 case BPF_MAP_TYPE_STACK_TRACE: 1316 case BPF_MAP_TYPE_QUEUE: 1317 case BPF_MAP_TYPE_STACK: 1318 case BPF_MAP_TYPE_LRU_HASH: 1319 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 1320 case BPF_MAP_TYPE_STRUCT_OPS: 1321 case BPF_MAP_TYPE_CPUMAP: 1322 case BPF_MAP_TYPE_ARENA: 1323 if (!bpf_token_capable(token, CAP_BPF)) 1324 goto put_token; 1325 break; 1326 case BPF_MAP_TYPE_SOCKMAP: 1327 case BPF_MAP_TYPE_SOCKHASH: 1328 case BPF_MAP_TYPE_DEVMAP: 1329 case BPF_MAP_TYPE_DEVMAP_HASH: 1330 case BPF_MAP_TYPE_XSKMAP: 1331 if (!bpf_token_capable(token, CAP_NET_ADMIN)) 1332 goto put_token; 1333 break; 1334 default: 1335 WARN(1, "unsupported map type %d", map_type); 1336 goto put_token; 1337 } 1338 1339 map = ops->map_alloc(attr); 1340 if (IS_ERR(map)) { 1341 err = PTR_ERR(map); 1342 goto put_token; 1343 } 1344 map->ops = ops; 1345 map->map_type = map_type; 1346 1347 err = bpf_obj_name_cpy(map->name, attr->map_name, 1348 sizeof(attr->map_name)); 1349 if (err < 0) 1350 goto free_map; 1351 1352 atomic64_set(&map->refcnt, 1); 1353 atomic64_set(&map->usercnt, 1); 1354 mutex_init(&map->freeze_mutex); 1355 spin_lock_init(&map->owner.lock); 1356 1357 if (attr->btf_key_type_id || attr->btf_value_type_id || 1358 /* Even the map's value is a kernel's struct, 1359 * the bpf_prog.o must have BTF to begin with 1360 * to figure out the corresponding kernel's 1361 * counter part. Thus, attr->btf_fd has 1362 * to be valid also. 1363 */ 1364 attr->btf_vmlinux_value_type_id) { 1365 struct btf *btf; 1366 1367 btf = btf_get_by_fd(attr->btf_fd); 1368 if (IS_ERR(btf)) { 1369 err = PTR_ERR(btf); 1370 goto free_map; 1371 } 1372 if (btf_is_kernel(btf)) { 1373 btf_put(btf); 1374 err = -EACCES; 1375 goto free_map; 1376 } 1377 map->btf = btf; 1378 1379 if (attr->btf_value_type_id) { 1380 err = map_check_btf(map, token, btf, attr->btf_key_type_id, 1381 attr->btf_value_type_id); 1382 if (err) 1383 goto free_map; 1384 } 1385 1386 map->btf_key_type_id = attr->btf_key_type_id; 1387 map->btf_value_type_id = attr->btf_value_type_id; 1388 map->btf_vmlinux_value_type_id = 1389 attr->btf_vmlinux_value_type_id; 1390 } 1391 1392 err = security_bpf_map_create(map, attr, token); 1393 if (err) 1394 goto free_map_sec; 1395 1396 err = bpf_map_alloc_id(map); 1397 if (err) 1398 goto free_map_sec; 1399 1400 bpf_map_save_memcg(map); 1401 bpf_token_put(token); 1402 1403 err = bpf_map_new_fd(map, f_flags); 1404 if (err < 0) { 1405 /* failed to allocate fd. 1406 * bpf_map_put_with_uref() is needed because the above 1407 * bpf_map_alloc_id() has published the map 1408 * to the userspace and the userspace may 1409 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. 1410 */ 1411 bpf_map_put_with_uref(map); 1412 return err; 1413 } 1414 1415 return err; 1416 1417 free_map_sec: 1418 security_bpf_map_free(map); 1419 free_map: 1420 bpf_map_free(map); 1421 put_token: 1422 bpf_token_put(token); 1423 return err; 1424 } 1425 1426 /* if error is returned, fd is released. 1427 * On success caller should complete fd access with matching fdput() 1428 */ 1429 struct bpf_map *__bpf_map_get(struct fd f) 1430 { 1431 if (!f.file) 1432 return ERR_PTR(-EBADF); 1433 if (f.file->f_op != &bpf_map_fops) { 1434 fdput(f); 1435 return ERR_PTR(-EINVAL); 1436 } 1437 1438 return f.file->private_data; 1439 } 1440 1441 void bpf_map_inc(struct bpf_map *map) 1442 { 1443 atomic64_inc(&map->refcnt); 1444 } 1445 EXPORT_SYMBOL_GPL(bpf_map_inc); 1446 1447 void bpf_map_inc_with_uref(struct bpf_map *map) 1448 { 1449 atomic64_inc(&map->refcnt); 1450 atomic64_inc(&map->usercnt); 1451 } 1452 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); 1453 1454 struct bpf_map *bpf_map_get(u32 ufd) 1455 { 1456 struct fd f = fdget(ufd); 1457 struct bpf_map *map; 1458 1459 map = __bpf_map_get(f); 1460 if (IS_ERR(map)) 1461 return map; 1462 1463 bpf_map_inc(map); 1464 fdput(f); 1465 1466 return map; 1467 } 1468 EXPORT_SYMBOL(bpf_map_get); 1469 1470 struct bpf_map *bpf_map_get_with_uref(u32 ufd) 1471 { 1472 struct fd f = fdget(ufd); 1473 struct bpf_map *map; 1474 1475 map = __bpf_map_get(f); 1476 if (IS_ERR(map)) 1477 return map; 1478 1479 bpf_map_inc_with_uref(map); 1480 fdput(f); 1481 1482 return map; 1483 } 1484 1485 /* map_idr_lock should have been held or the map should have been 1486 * protected by rcu read lock. 1487 */ 1488 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 1489 { 1490 int refold; 1491 1492 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); 1493 if (!refold) 1494 return ERR_PTR(-ENOENT); 1495 if (uref) 1496 atomic64_inc(&map->usercnt); 1497 1498 return map; 1499 } 1500 1501 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) 1502 { 1503 spin_lock_bh(&map_idr_lock); 1504 map = __bpf_map_inc_not_zero(map, false); 1505 spin_unlock_bh(&map_idr_lock); 1506 1507 return map; 1508 } 1509 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 1510 1511 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 1512 { 1513 return -ENOTSUPP; 1514 } 1515 1516 static void *__bpf_copy_key(void __user *ukey, u64 key_size) 1517 { 1518 if (key_size) 1519 return vmemdup_user(ukey, key_size); 1520 1521 if (ukey) 1522 return ERR_PTR(-EINVAL); 1523 1524 return NULL; 1525 } 1526 1527 static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size) 1528 { 1529 if (key_size) 1530 return kvmemdup_bpfptr(ukey, key_size); 1531 1532 if (!bpfptr_is_null(ukey)) 1533 return ERR_PTR(-EINVAL); 1534 1535 return NULL; 1536 } 1537 1538 /* last field in 'union bpf_attr' used by this command */ 1539 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags 1540 1541 static int map_lookup_elem(union bpf_attr *attr) 1542 { 1543 void __user *ukey = u64_to_user_ptr(attr->key); 1544 void __user *uvalue = u64_to_user_ptr(attr->value); 1545 int ufd = attr->map_fd; 1546 struct bpf_map *map; 1547 void *key, *value; 1548 u32 value_size; 1549 struct fd f; 1550 int err; 1551 1552 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) 1553 return -EINVAL; 1554 1555 if (attr->flags & ~BPF_F_LOCK) 1556 return -EINVAL; 1557 1558 f = fdget(ufd); 1559 map = __bpf_map_get(f); 1560 if (IS_ERR(map)) 1561 return PTR_ERR(map); 1562 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1563 err = -EPERM; 1564 goto err_put; 1565 } 1566 1567 if ((attr->flags & BPF_F_LOCK) && 1568 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1569 err = -EINVAL; 1570 goto err_put; 1571 } 1572 1573 key = __bpf_copy_key(ukey, map->key_size); 1574 if (IS_ERR(key)) { 1575 err = PTR_ERR(key); 1576 goto err_put; 1577 } 1578 1579 value_size = bpf_map_value_size(map); 1580 1581 err = -ENOMEM; 1582 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1583 if (!value) 1584 goto free_key; 1585 1586 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 1587 if (copy_from_user(value, uvalue, value_size)) 1588 err = -EFAULT; 1589 else 1590 err = bpf_map_copy_value(map, key, value, attr->flags); 1591 goto free_value; 1592 } 1593 1594 err = bpf_map_copy_value(map, key, value, attr->flags); 1595 if (err) 1596 goto free_value; 1597 1598 err = -EFAULT; 1599 if (copy_to_user(uvalue, value, value_size) != 0) 1600 goto free_value; 1601 1602 err = 0; 1603 1604 free_value: 1605 kvfree(value); 1606 free_key: 1607 kvfree(key); 1608 err_put: 1609 fdput(f); 1610 return err; 1611 } 1612 1613 1614 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags 1615 1616 static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) 1617 { 1618 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1619 bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel); 1620 int ufd = attr->map_fd; 1621 struct bpf_map *map; 1622 void *key, *value; 1623 u32 value_size; 1624 struct fd f; 1625 int err; 1626 1627 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM)) 1628 return -EINVAL; 1629 1630 f = fdget(ufd); 1631 map = __bpf_map_get(f); 1632 if (IS_ERR(map)) 1633 return PTR_ERR(map); 1634 bpf_map_write_active_inc(map); 1635 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1636 err = -EPERM; 1637 goto err_put; 1638 } 1639 1640 if ((attr->flags & BPF_F_LOCK) && 1641 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1642 err = -EINVAL; 1643 goto err_put; 1644 } 1645 1646 key = ___bpf_copy_key(ukey, map->key_size); 1647 if (IS_ERR(key)) { 1648 err = PTR_ERR(key); 1649 goto err_put; 1650 } 1651 1652 value_size = bpf_map_value_size(map); 1653 value = kvmemdup_bpfptr(uvalue, value_size); 1654 if (IS_ERR(value)) { 1655 err = PTR_ERR(value); 1656 goto free_key; 1657 } 1658 1659 err = bpf_map_update_value(map, f.file, key, value, attr->flags); 1660 if (!err) 1661 maybe_wait_bpf_programs(map); 1662 1663 kvfree(value); 1664 free_key: 1665 kvfree(key); 1666 err_put: 1667 bpf_map_write_active_dec(map); 1668 fdput(f); 1669 return err; 1670 } 1671 1672 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key 1673 1674 static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) 1675 { 1676 bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); 1677 int ufd = attr->map_fd; 1678 struct bpf_map *map; 1679 struct fd f; 1680 void *key; 1681 int err; 1682 1683 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM)) 1684 return -EINVAL; 1685 1686 f = fdget(ufd); 1687 map = __bpf_map_get(f); 1688 if (IS_ERR(map)) 1689 return PTR_ERR(map); 1690 bpf_map_write_active_inc(map); 1691 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 1692 err = -EPERM; 1693 goto err_put; 1694 } 1695 1696 key = ___bpf_copy_key(ukey, map->key_size); 1697 if (IS_ERR(key)) { 1698 err = PTR_ERR(key); 1699 goto err_put; 1700 } 1701 1702 if (bpf_map_is_offloaded(map)) { 1703 err = bpf_map_offload_delete_elem(map, key); 1704 goto out; 1705 } else if (IS_FD_PROG_ARRAY(map) || 1706 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 1707 /* These maps require sleepable context */ 1708 err = map->ops->map_delete_elem(map, key); 1709 goto out; 1710 } 1711 1712 bpf_disable_instrumentation(); 1713 rcu_read_lock(); 1714 err = map->ops->map_delete_elem(map, key); 1715 rcu_read_unlock(); 1716 bpf_enable_instrumentation(); 1717 if (!err) 1718 maybe_wait_bpf_programs(map); 1719 out: 1720 kvfree(key); 1721 err_put: 1722 bpf_map_write_active_dec(map); 1723 fdput(f); 1724 return err; 1725 } 1726 1727 /* last field in 'union bpf_attr' used by this command */ 1728 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key 1729 1730 static int map_get_next_key(union bpf_attr *attr) 1731 { 1732 void __user *ukey = u64_to_user_ptr(attr->key); 1733 void __user *unext_key = u64_to_user_ptr(attr->next_key); 1734 int ufd = attr->map_fd; 1735 struct bpf_map *map; 1736 void *key, *next_key; 1737 struct fd f; 1738 int err; 1739 1740 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY)) 1741 return -EINVAL; 1742 1743 f = fdget(ufd); 1744 map = __bpf_map_get(f); 1745 if (IS_ERR(map)) 1746 return PTR_ERR(map); 1747 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 1748 err = -EPERM; 1749 goto err_put; 1750 } 1751 1752 if (ukey) { 1753 key = __bpf_copy_key(ukey, map->key_size); 1754 if (IS_ERR(key)) { 1755 err = PTR_ERR(key); 1756 goto err_put; 1757 } 1758 } else { 1759 key = NULL; 1760 } 1761 1762 err = -ENOMEM; 1763 next_key = kvmalloc(map->key_size, GFP_USER); 1764 if (!next_key) 1765 goto free_key; 1766 1767 if (bpf_map_is_offloaded(map)) { 1768 err = bpf_map_offload_get_next_key(map, key, next_key); 1769 goto out; 1770 } 1771 1772 rcu_read_lock(); 1773 err = map->ops->map_get_next_key(map, key, next_key); 1774 rcu_read_unlock(); 1775 out: 1776 if (err) 1777 goto free_next_key; 1778 1779 err = -EFAULT; 1780 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 1781 goto free_next_key; 1782 1783 err = 0; 1784 1785 free_next_key: 1786 kvfree(next_key); 1787 free_key: 1788 kvfree(key); 1789 err_put: 1790 fdput(f); 1791 return err; 1792 } 1793 1794 int generic_map_delete_batch(struct bpf_map *map, 1795 const union bpf_attr *attr, 1796 union bpf_attr __user *uattr) 1797 { 1798 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1799 u32 cp, max_count; 1800 int err = 0; 1801 void *key; 1802 1803 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1804 return -EINVAL; 1805 1806 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1807 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1808 return -EINVAL; 1809 } 1810 1811 max_count = attr->batch.count; 1812 if (!max_count) 1813 return 0; 1814 1815 if (put_user(0, &uattr->batch.count)) 1816 return -EFAULT; 1817 1818 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1819 if (!key) 1820 return -ENOMEM; 1821 1822 for (cp = 0; cp < max_count; cp++) { 1823 err = -EFAULT; 1824 if (copy_from_user(key, keys + cp * map->key_size, 1825 map->key_size)) 1826 break; 1827 1828 if (bpf_map_is_offloaded(map)) { 1829 err = bpf_map_offload_delete_elem(map, key); 1830 break; 1831 } 1832 1833 bpf_disable_instrumentation(); 1834 rcu_read_lock(); 1835 err = map->ops->map_delete_elem(map, key); 1836 rcu_read_unlock(); 1837 bpf_enable_instrumentation(); 1838 if (err) 1839 break; 1840 cond_resched(); 1841 } 1842 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1843 err = -EFAULT; 1844 1845 kvfree(key); 1846 1847 return err; 1848 } 1849 1850 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, 1851 const union bpf_attr *attr, 1852 union bpf_attr __user *uattr) 1853 { 1854 void __user *values = u64_to_user_ptr(attr->batch.values); 1855 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1856 u32 value_size, cp, max_count; 1857 void *key, *value; 1858 int err = 0; 1859 1860 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1861 return -EINVAL; 1862 1863 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1864 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 1865 return -EINVAL; 1866 } 1867 1868 value_size = bpf_map_value_size(map); 1869 1870 max_count = attr->batch.count; 1871 if (!max_count) 1872 return 0; 1873 1874 if (put_user(0, &uattr->batch.count)) 1875 return -EFAULT; 1876 1877 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1878 if (!key) 1879 return -ENOMEM; 1880 1881 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 1882 if (!value) { 1883 kvfree(key); 1884 return -ENOMEM; 1885 } 1886 1887 for (cp = 0; cp < max_count; cp++) { 1888 err = -EFAULT; 1889 if (copy_from_user(key, keys + cp * map->key_size, 1890 map->key_size) || 1891 copy_from_user(value, values + cp * value_size, value_size)) 1892 break; 1893 1894 err = bpf_map_update_value(map, map_file, key, value, 1895 attr->batch.elem_flags); 1896 1897 if (err) 1898 break; 1899 cond_resched(); 1900 } 1901 1902 if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) 1903 err = -EFAULT; 1904 1905 kvfree(value); 1906 kvfree(key); 1907 1908 return err; 1909 } 1910 1911 #define MAP_LOOKUP_RETRIES 3 1912 1913 int generic_map_lookup_batch(struct bpf_map *map, 1914 const union bpf_attr *attr, 1915 union bpf_attr __user *uattr) 1916 { 1917 void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch); 1918 void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); 1919 void __user *values = u64_to_user_ptr(attr->batch.values); 1920 void __user *keys = u64_to_user_ptr(attr->batch.keys); 1921 void *buf, *buf_prevkey, *prev_key, *key, *value; 1922 int err, retry = MAP_LOOKUP_RETRIES; 1923 u32 value_size, cp, max_count; 1924 1925 if (attr->batch.elem_flags & ~BPF_F_LOCK) 1926 return -EINVAL; 1927 1928 if ((attr->batch.elem_flags & BPF_F_LOCK) && 1929 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) 1930 return -EINVAL; 1931 1932 value_size = bpf_map_value_size(map); 1933 1934 max_count = attr->batch.count; 1935 if (!max_count) 1936 return 0; 1937 1938 if (put_user(0, &uattr->batch.count)) 1939 return -EFAULT; 1940 1941 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); 1942 if (!buf_prevkey) 1943 return -ENOMEM; 1944 1945 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); 1946 if (!buf) { 1947 kvfree(buf_prevkey); 1948 return -ENOMEM; 1949 } 1950 1951 err = -EFAULT; 1952 prev_key = NULL; 1953 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) 1954 goto free_buf; 1955 key = buf; 1956 value = key + map->key_size; 1957 if (ubatch) 1958 prev_key = buf_prevkey; 1959 1960 for (cp = 0; cp < max_count;) { 1961 rcu_read_lock(); 1962 err = map->ops->map_get_next_key(map, prev_key, key); 1963 rcu_read_unlock(); 1964 if (err) 1965 break; 1966 err = bpf_map_copy_value(map, key, value, 1967 attr->batch.elem_flags); 1968 1969 if (err == -ENOENT) { 1970 if (retry) { 1971 retry--; 1972 continue; 1973 } 1974 err = -EINTR; 1975 break; 1976 } 1977 1978 if (err) 1979 goto free_buf; 1980 1981 if (copy_to_user(keys + cp * map->key_size, key, 1982 map->key_size)) { 1983 err = -EFAULT; 1984 goto free_buf; 1985 } 1986 if (copy_to_user(values + cp * value_size, value, value_size)) { 1987 err = -EFAULT; 1988 goto free_buf; 1989 } 1990 1991 if (!prev_key) 1992 prev_key = buf_prevkey; 1993 1994 swap(prev_key, key); 1995 retry = MAP_LOOKUP_RETRIES; 1996 cp++; 1997 cond_resched(); 1998 } 1999 2000 if (err == -EFAULT) 2001 goto free_buf; 2002 2003 if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) || 2004 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) 2005 err = -EFAULT; 2006 2007 free_buf: 2008 kvfree(buf_prevkey); 2009 kvfree(buf); 2010 return err; 2011 } 2012 2013 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD flags 2014 2015 static int map_lookup_and_delete_elem(union bpf_attr *attr) 2016 { 2017 void __user *ukey = u64_to_user_ptr(attr->key); 2018 void __user *uvalue = u64_to_user_ptr(attr->value); 2019 int ufd = attr->map_fd; 2020 struct bpf_map *map; 2021 void *key, *value; 2022 u32 value_size; 2023 struct fd f; 2024 int err; 2025 2026 if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) 2027 return -EINVAL; 2028 2029 if (attr->flags & ~BPF_F_LOCK) 2030 return -EINVAL; 2031 2032 f = fdget(ufd); 2033 map = __bpf_map_get(f); 2034 if (IS_ERR(map)) 2035 return PTR_ERR(map); 2036 bpf_map_write_active_inc(map); 2037 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || 2038 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 2039 err = -EPERM; 2040 goto err_put; 2041 } 2042 2043 if (attr->flags && 2044 (map->map_type == BPF_MAP_TYPE_QUEUE || 2045 map->map_type == BPF_MAP_TYPE_STACK)) { 2046 err = -EINVAL; 2047 goto err_put; 2048 } 2049 2050 if ((attr->flags & BPF_F_LOCK) && 2051 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 2052 err = -EINVAL; 2053 goto err_put; 2054 } 2055 2056 key = __bpf_copy_key(ukey, map->key_size); 2057 if (IS_ERR(key)) { 2058 err = PTR_ERR(key); 2059 goto err_put; 2060 } 2061 2062 value_size = bpf_map_value_size(map); 2063 2064 err = -ENOMEM; 2065 value = kvmalloc(value_size, GFP_USER | __GFP_NOWARN); 2066 if (!value) 2067 goto free_key; 2068 2069 err = -ENOTSUPP; 2070 if (map->map_type == BPF_MAP_TYPE_QUEUE || 2071 map->map_type == BPF_MAP_TYPE_STACK) { 2072 err = map->ops->map_pop_elem(map, value); 2073 } else if (map->map_type == BPF_MAP_TYPE_HASH || 2074 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 2075 map->map_type == BPF_MAP_TYPE_LRU_HASH || 2076 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 2077 if (!bpf_map_is_offloaded(map)) { 2078 bpf_disable_instrumentation(); 2079 rcu_read_lock(); 2080 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); 2081 rcu_read_unlock(); 2082 bpf_enable_instrumentation(); 2083 } 2084 } 2085 2086 if (err) 2087 goto free_value; 2088 2089 if (copy_to_user(uvalue, value, value_size) != 0) { 2090 err = -EFAULT; 2091 goto free_value; 2092 } 2093 2094 err = 0; 2095 2096 free_value: 2097 kvfree(value); 2098 free_key: 2099 kvfree(key); 2100 err_put: 2101 bpf_map_write_active_dec(map); 2102 fdput(f); 2103 return err; 2104 } 2105 2106 #define BPF_MAP_FREEZE_LAST_FIELD map_fd 2107 2108 static int map_freeze(const union bpf_attr *attr) 2109 { 2110 int err = 0, ufd = attr->map_fd; 2111 struct bpf_map *map; 2112 struct fd f; 2113 2114 if (CHECK_ATTR(BPF_MAP_FREEZE)) 2115 return -EINVAL; 2116 2117 f = fdget(ufd); 2118 map = __bpf_map_get(f); 2119 if (IS_ERR(map)) 2120 return PTR_ERR(map); 2121 2122 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) { 2123 fdput(f); 2124 return -ENOTSUPP; 2125 } 2126 2127 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 2128 fdput(f); 2129 return -EPERM; 2130 } 2131 2132 mutex_lock(&map->freeze_mutex); 2133 if (bpf_map_write_active(map)) { 2134 err = -EBUSY; 2135 goto err_put; 2136 } 2137 if (READ_ONCE(map->frozen)) { 2138 err = -EBUSY; 2139 goto err_put; 2140 } 2141 2142 WRITE_ONCE(map->frozen, true); 2143 err_put: 2144 mutex_unlock(&map->freeze_mutex); 2145 fdput(f); 2146 return err; 2147 } 2148 2149 static const struct bpf_prog_ops * const bpf_prog_types[] = { 2150 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 2151 [_id] = & _name ## _prog_ops, 2152 #define BPF_MAP_TYPE(_id, _ops) 2153 #define BPF_LINK_TYPE(_id, _name) 2154 #include <linux/bpf_types.h> 2155 #undef BPF_PROG_TYPE 2156 #undef BPF_MAP_TYPE 2157 #undef BPF_LINK_TYPE 2158 }; 2159 2160 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 2161 { 2162 const struct bpf_prog_ops *ops; 2163 2164 if (type >= ARRAY_SIZE(bpf_prog_types)) 2165 return -EINVAL; 2166 type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types)); 2167 ops = bpf_prog_types[type]; 2168 if (!ops) 2169 return -EINVAL; 2170 2171 if (!bpf_prog_is_offloaded(prog->aux)) 2172 prog->aux->ops = ops; 2173 else 2174 prog->aux->ops = &bpf_offload_prog_ops; 2175 prog->type = type; 2176 return 0; 2177 } 2178 2179 enum bpf_audit { 2180 BPF_AUDIT_LOAD, 2181 BPF_AUDIT_UNLOAD, 2182 BPF_AUDIT_MAX, 2183 }; 2184 2185 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = { 2186 [BPF_AUDIT_LOAD] = "LOAD", 2187 [BPF_AUDIT_UNLOAD] = "UNLOAD", 2188 }; 2189 2190 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op) 2191 { 2192 struct audit_context *ctx = NULL; 2193 struct audit_buffer *ab; 2194 2195 if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX)) 2196 return; 2197 if (audit_enabled == AUDIT_OFF) 2198 return; 2199 if (!in_irq() && !irqs_disabled()) 2200 ctx = audit_context(); 2201 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 2202 if (unlikely(!ab)) 2203 return; 2204 audit_log_format(ab, "prog-id=%u op=%s", 2205 prog->aux->id, bpf_audit_str[op]); 2206 audit_log_end(ab); 2207 } 2208 2209 static int bpf_prog_alloc_id(struct bpf_prog *prog) 2210 { 2211 int id; 2212 2213 idr_preload(GFP_KERNEL); 2214 spin_lock_bh(&prog_idr_lock); 2215 id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); 2216 if (id > 0) 2217 prog->aux->id = id; 2218 spin_unlock_bh(&prog_idr_lock); 2219 idr_preload_end(); 2220 2221 /* id is in [1, INT_MAX) */ 2222 if (WARN_ON_ONCE(!id)) 2223 return -ENOSPC; 2224 2225 return id > 0 ? 0 : id; 2226 } 2227 2228 void bpf_prog_free_id(struct bpf_prog *prog) 2229 { 2230 unsigned long flags; 2231 2232 /* cBPF to eBPF migrations are currently not in the idr store. 2233 * Offloaded programs are removed from the store when their device 2234 * disappears - even if someone grabs an fd to them they are unusable, 2235 * simply waiting for refcnt to drop to be freed. 2236 */ 2237 if (!prog->aux->id) 2238 return; 2239 2240 spin_lock_irqsave(&prog_idr_lock, flags); 2241 idr_remove(&prog_idr, prog->aux->id); 2242 prog->aux->id = 0; 2243 spin_unlock_irqrestore(&prog_idr_lock, flags); 2244 } 2245 2246 static void __bpf_prog_put_rcu(struct rcu_head *rcu) 2247 { 2248 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 2249 2250 kvfree(aux->func_info); 2251 kfree(aux->func_info_aux); 2252 free_uid(aux->user); 2253 security_bpf_prog_free(aux->prog); 2254 bpf_prog_free(aux->prog); 2255 } 2256 2257 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) 2258 { 2259 bpf_prog_kallsyms_del_all(prog); 2260 btf_put(prog->aux->btf); 2261 module_put(prog->aux->mod); 2262 kvfree(prog->aux->jited_linfo); 2263 kvfree(prog->aux->linfo); 2264 kfree(prog->aux->kfunc_tab); 2265 if (prog->aux->attach_btf) 2266 btf_put(prog->aux->attach_btf); 2267 2268 if (deferred) { 2269 if (prog->sleepable) 2270 call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); 2271 else 2272 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 2273 } else { 2274 __bpf_prog_put_rcu(&prog->aux->rcu); 2275 } 2276 } 2277 2278 static void bpf_prog_put_deferred(struct work_struct *work) 2279 { 2280 struct bpf_prog_aux *aux; 2281 struct bpf_prog *prog; 2282 2283 aux = container_of(work, struct bpf_prog_aux, work); 2284 prog = aux->prog; 2285 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2286 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2287 bpf_prog_free_id(prog); 2288 __bpf_prog_put_noref(prog, true); 2289 } 2290 2291 static void __bpf_prog_put(struct bpf_prog *prog) 2292 { 2293 struct bpf_prog_aux *aux = prog->aux; 2294 2295 if (atomic64_dec_and_test(&aux->refcnt)) { 2296 if (in_irq() || irqs_disabled()) { 2297 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2298 schedule_work(&aux->work); 2299 } else { 2300 bpf_prog_put_deferred(&aux->work); 2301 } 2302 } 2303 } 2304 2305 void bpf_prog_put(struct bpf_prog *prog) 2306 { 2307 __bpf_prog_put(prog); 2308 } 2309 EXPORT_SYMBOL_GPL(bpf_prog_put); 2310 2311 static int bpf_prog_release(struct inode *inode, struct file *filp) 2312 { 2313 struct bpf_prog *prog = filp->private_data; 2314 2315 bpf_prog_put(prog); 2316 return 0; 2317 } 2318 2319 struct bpf_prog_kstats { 2320 u64 nsecs; 2321 u64 cnt; 2322 u64 misses; 2323 }; 2324 2325 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog) 2326 { 2327 struct bpf_prog_stats *stats; 2328 unsigned int flags; 2329 2330 stats = this_cpu_ptr(prog->stats); 2331 flags = u64_stats_update_begin_irqsave(&stats->syncp); 2332 u64_stats_inc(&stats->misses); 2333 u64_stats_update_end_irqrestore(&stats->syncp, flags); 2334 } 2335 2336 static void bpf_prog_get_stats(const struct bpf_prog *prog, 2337 struct bpf_prog_kstats *stats) 2338 { 2339 u64 nsecs = 0, cnt = 0, misses = 0; 2340 int cpu; 2341 2342 for_each_possible_cpu(cpu) { 2343 const struct bpf_prog_stats *st; 2344 unsigned int start; 2345 u64 tnsecs, tcnt, tmisses; 2346 2347 st = per_cpu_ptr(prog->stats, cpu); 2348 do { 2349 start = u64_stats_fetch_begin(&st->syncp); 2350 tnsecs = u64_stats_read(&st->nsecs); 2351 tcnt = u64_stats_read(&st->cnt); 2352 tmisses = u64_stats_read(&st->misses); 2353 } while (u64_stats_fetch_retry(&st->syncp, start)); 2354 nsecs += tnsecs; 2355 cnt += tcnt; 2356 misses += tmisses; 2357 } 2358 stats->nsecs = nsecs; 2359 stats->cnt = cnt; 2360 stats->misses = misses; 2361 } 2362 2363 #ifdef CONFIG_PROC_FS 2364 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 2365 { 2366 const struct bpf_prog *prog = filp->private_data; 2367 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 2368 struct bpf_prog_kstats stats; 2369 2370 bpf_prog_get_stats(prog, &stats); 2371 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 2372 seq_printf(m, 2373 "prog_type:\t%u\n" 2374 "prog_jited:\t%u\n" 2375 "prog_tag:\t%s\n" 2376 "memlock:\t%llu\n" 2377 "prog_id:\t%u\n" 2378 "run_time_ns:\t%llu\n" 2379 "run_cnt:\t%llu\n" 2380 "recursion_misses:\t%llu\n" 2381 "verified_insns:\t%u\n", 2382 prog->type, 2383 prog->jited, 2384 prog_tag, 2385 prog->pages * 1ULL << PAGE_SHIFT, 2386 prog->aux->id, 2387 stats.nsecs, 2388 stats.cnt, 2389 stats.misses, 2390 prog->aux->verified_insns); 2391 } 2392 #endif 2393 2394 const struct file_operations bpf_prog_fops = { 2395 #ifdef CONFIG_PROC_FS 2396 .show_fdinfo = bpf_prog_show_fdinfo, 2397 #endif 2398 .release = bpf_prog_release, 2399 .read = bpf_dummy_read, 2400 .write = bpf_dummy_write, 2401 }; 2402 2403 int bpf_prog_new_fd(struct bpf_prog *prog) 2404 { 2405 int ret; 2406 2407 ret = security_bpf_prog(prog); 2408 if (ret < 0) 2409 return ret; 2410 2411 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, 2412 O_RDWR | O_CLOEXEC); 2413 } 2414 2415 static struct bpf_prog *____bpf_prog_get(struct fd f) 2416 { 2417 if (!f.file) 2418 return ERR_PTR(-EBADF); 2419 if (f.file->f_op != &bpf_prog_fops) { 2420 fdput(f); 2421 return ERR_PTR(-EINVAL); 2422 } 2423 2424 return f.file->private_data; 2425 } 2426 2427 void bpf_prog_add(struct bpf_prog *prog, int i) 2428 { 2429 atomic64_add(i, &prog->aux->refcnt); 2430 } 2431 EXPORT_SYMBOL_GPL(bpf_prog_add); 2432 2433 void bpf_prog_sub(struct bpf_prog *prog, int i) 2434 { 2435 /* Only to be used for undoing previous bpf_prog_add() in some 2436 * error path. We still know that another entity in our call 2437 * path holds a reference to the program, thus atomic_sub() can 2438 * be safely used in such cases! 2439 */ 2440 WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); 2441 } 2442 EXPORT_SYMBOL_GPL(bpf_prog_sub); 2443 2444 void bpf_prog_inc(struct bpf_prog *prog) 2445 { 2446 atomic64_inc(&prog->aux->refcnt); 2447 } 2448 EXPORT_SYMBOL_GPL(bpf_prog_inc); 2449 2450 /* prog_idr_lock should have been held */ 2451 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) 2452 { 2453 int refold; 2454 2455 refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); 2456 2457 if (!refold) 2458 return ERR_PTR(-ENOENT); 2459 2460 return prog; 2461 } 2462 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 2463 2464 bool bpf_prog_get_ok(struct bpf_prog *prog, 2465 enum bpf_prog_type *attach_type, bool attach_drv) 2466 { 2467 /* not an attachment, just a refcount inc, always allow */ 2468 if (!attach_type) 2469 return true; 2470 2471 if (prog->type != *attach_type) 2472 return false; 2473 if (bpf_prog_is_offloaded(prog->aux) && !attach_drv) 2474 return false; 2475 2476 return true; 2477 } 2478 2479 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 2480 bool attach_drv) 2481 { 2482 struct fd f = fdget(ufd); 2483 struct bpf_prog *prog; 2484 2485 prog = ____bpf_prog_get(f); 2486 if (IS_ERR(prog)) 2487 return prog; 2488 if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 2489 prog = ERR_PTR(-EINVAL); 2490 goto out; 2491 } 2492 2493 bpf_prog_inc(prog); 2494 out: 2495 fdput(f); 2496 return prog; 2497 } 2498 2499 struct bpf_prog *bpf_prog_get(u32 ufd) 2500 { 2501 return __bpf_prog_get(ufd, NULL, false); 2502 } 2503 2504 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 2505 bool attach_drv) 2506 { 2507 return __bpf_prog_get(ufd, &type, attach_drv); 2508 } 2509 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 2510 2511 /* Initially all BPF programs could be loaded w/o specifying 2512 * expected_attach_type. Later for some of them specifying expected_attach_type 2513 * at load time became required so that program could be validated properly. 2514 * Programs of types that are allowed to be loaded both w/ and w/o (for 2515 * backward compatibility) expected_attach_type, should have the default attach 2516 * type assigned to expected_attach_type for the latter case, so that it can be 2517 * validated later at attach time. 2518 * 2519 * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if 2520 * prog type requires it but has some attach types that have to be backward 2521 * compatible. 2522 */ 2523 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) 2524 { 2525 switch (attr->prog_type) { 2526 case BPF_PROG_TYPE_CGROUP_SOCK: 2527 /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't 2528 * exist so checking for non-zero is the way to go here. 2529 */ 2530 if (!attr->expected_attach_type) 2531 attr->expected_attach_type = 2532 BPF_CGROUP_INET_SOCK_CREATE; 2533 break; 2534 case BPF_PROG_TYPE_SK_REUSEPORT: 2535 if (!attr->expected_attach_type) 2536 attr->expected_attach_type = 2537 BPF_SK_REUSEPORT_SELECT; 2538 break; 2539 } 2540 } 2541 2542 static int 2543 bpf_prog_load_check_attach(enum bpf_prog_type prog_type, 2544 enum bpf_attach_type expected_attach_type, 2545 struct btf *attach_btf, u32 btf_id, 2546 struct bpf_prog *dst_prog) 2547 { 2548 if (btf_id) { 2549 if (btf_id > BTF_MAX_TYPE) 2550 return -EINVAL; 2551 2552 if (!attach_btf && !dst_prog) 2553 return -EINVAL; 2554 2555 switch (prog_type) { 2556 case BPF_PROG_TYPE_TRACING: 2557 case BPF_PROG_TYPE_LSM: 2558 case BPF_PROG_TYPE_STRUCT_OPS: 2559 case BPF_PROG_TYPE_EXT: 2560 break; 2561 default: 2562 return -EINVAL; 2563 } 2564 } 2565 2566 if (attach_btf && (!btf_id || dst_prog)) 2567 return -EINVAL; 2568 2569 if (dst_prog && prog_type != BPF_PROG_TYPE_TRACING && 2570 prog_type != BPF_PROG_TYPE_EXT) 2571 return -EINVAL; 2572 2573 switch (prog_type) { 2574 case BPF_PROG_TYPE_CGROUP_SOCK: 2575 switch (expected_attach_type) { 2576 case BPF_CGROUP_INET_SOCK_CREATE: 2577 case BPF_CGROUP_INET_SOCK_RELEASE: 2578 case BPF_CGROUP_INET4_POST_BIND: 2579 case BPF_CGROUP_INET6_POST_BIND: 2580 return 0; 2581 default: 2582 return -EINVAL; 2583 } 2584 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2585 switch (expected_attach_type) { 2586 case BPF_CGROUP_INET4_BIND: 2587 case BPF_CGROUP_INET6_BIND: 2588 case BPF_CGROUP_INET4_CONNECT: 2589 case BPF_CGROUP_INET6_CONNECT: 2590 case BPF_CGROUP_UNIX_CONNECT: 2591 case BPF_CGROUP_INET4_GETPEERNAME: 2592 case BPF_CGROUP_INET6_GETPEERNAME: 2593 case BPF_CGROUP_UNIX_GETPEERNAME: 2594 case BPF_CGROUP_INET4_GETSOCKNAME: 2595 case BPF_CGROUP_INET6_GETSOCKNAME: 2596 case BPF_CGROUP_UNIX_GETSOCKNAME: 2597 case BPF_CGROUP_UDP4_SENDMSG: 2598 case BPF_CGROUP_UDP6_SENDMSG: 2599 case BPF_CGROUP_UNIX_SENDMSG: 2600 case BPF_CGROUP_UDP4_RECVMSG: 2601 case BPF_CGROUP_UDP6_RECVMSG: 2602 case BPF_CGROUP_UNIX_RECVMSG: 2603 return 0; 2604 default: 2605 return -EINVAL; 2606 } 2607 case BPF_PROG_TYPE_CGROUP_SKB: 2608 switch (expected_attach_type) { 2609 case BPF_CGROUP_INET_INGRESS: 2610 case BPF_CGROUP_INET_EGRESS: 2611 return 0; 2612 default: 2613 return -EINVAL; 2614 } 2615 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2616 switch (expected_attach_type) { 2617 case BPF_CGROUP_SETSOCKOPT: 2618 case BPF_CGROUP_GETSOCKOPT: 2619 return 0; 2620 default: 2621 return -EINVAL; 2622 } 2623 case BPF_PROG_TYPE_SK_LOOKUP: 2624 if (expected_attach_type == BPF_SK_LOOKUP) 2625 return 0; 2626 return -EINVAL; 2627 case BPF_PROG_TYPE_SK_REUSEPORT: 2628 switch (expected_attach_type) { 2629 case BPF_SK_REUSEPORT_SELECT: 2630 case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: 2631 return 0; 2632 default: 2633 return -EINVAL; 2634 } 2635 case BPF_PROG_TYPE_NETFILTER: 2636 if (expected_attach_type == BPF_NETFILTER) 2637 return 0; 2638 return -EINVAL; 2639 case BPF_PROG_TYPE_SYSCALL: 2640 case BPF_PROG_TYPE_EXT: 2641 if (expected_attach_type) 2642 return -EINVAL; 2643 fallthrough; 2644 default: 2645 return 0; 2646 } 2647 } 2648 2649 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type) 2650 { 2651 switch (prog_type) { 2652 case BPF_PROG_TYPE_SCHED_CLS: 2653 case BPF_PROG_TYPE_SCHED_ACT: 2654 case BPF_PROG_TYPE_XDP: 2655 case BPF_PROG_TYPE_LWT_IN: 2656 case BPF_PROG_TYPE_LWT_OUT: 2657 case BPF_PROG_TYPE_LWT_XMIT: 2658 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 2659 case BPF_PROG_TYPE_SK_SKB: 2660 case BPF_PROG_TYPE_SK_MSG: 2661 case BPF_PROG_TYPE_FLOW_DISSECTOR: 2662 case BPF_PROG_TYPE_CGROUP_DEVICE: 2663 case BPF_PROG_TYPE_CGROUP_SOCK: 2664 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 2665 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 2666 case BPF_PROG_TYPE_CGROUP_SYSCTL: 2667 case BPF_PROG_TYPE_SOCK_OPS: 2668 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2669 case BPF_PROG_TYPE_NETFILTER: 2670 return true; 2671 case BPF_PROG_TYPE_CGROUP_SKB: 2672 /* always unpriv */ 2673 case BPF_PROG_TYPE_SK_REUSEPORT: 2674 /* equivalent to SOCKET_FILTER. need CAP_BPF only */ 2675 default: 2676 return false; 2677 } 2678 } 2679 2680 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type) 2681 { 2682 switch (prog_type) { 2683 case BPF_PROG_TYPE_KPROBE: 2684 case BPF_PROG_TYPE_TRACEPOINT: 2685 case BPF_PROG_TYPE_PERF_EVENT: 2686 case BPF_PROG_TYPE_RAW_TRACEPOINT: 2687 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 2688 case BPF_PROG_TYPE_TRACING: 2689 case BPF_PROG_TYPE_LSM: 2690 case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */ 2691 case BPF_PROG_TYPE_EXT: /* extends any prog */ 2692 return true; 2693 default: 2694 return false; 2695 } 2696 } 2697 2698 /* last field in 'union bpf_attr' used by this command */ 2699 #define BPF_PROG_LOAD_LAST_FIELD prog_token_fd 2700 2701 static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) 2702 { 2703 enum bpf_prog_type type = attr->prog_type; 2704 struct bpf_prog *prog, *dst_prog = NULL; 2705 struct btf *attach_btf = NULL; 2706 struct bpf_token *token = NULL; 2707 bool bpf_cap; 2708 int err; 2709 char license[128]; 2710 2711 if (CHECK_ATTR(BPF_PROG_LOAD)) 2712 return -EINVAL; 2713 2714 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 2715 BPF_F_ANY_ALIGNMENT | 2716 BPF_F_TEST_STATE_FREQ | 2717 BPF_F_SLEEPABLE | 2718 BPF_F_TEST_RND_HI32 | 2719 BPF_F_XDP_HAS_FRAGS | 2720 BPF_F_XDP_DEV_BOUND_ONLY | 2721 BPF_F_TEST_REG_INVARIANTS | 2722 BPF_F_TOKEN_FD)) 2723 return -EINVAL; 2724 2725 bpf_prog_load_fixup_attach_type(attr); 2726 2727 if (attr->prog_flags & BPF_F_TOKEN_FD) { 2728 token = bpf_token_get_from_fd(attr->prog_token_fd); 2729 if (IS_ERR(token)) 2730 return PTR_ERR(token); 2731 /* if current token doesn't grant prog loading permissions, 2732 * then we can't use this token, so ignore it and rely on 2733 * system-wide capabilities checks 2734 */ 2735 if (!bpf_token_allow_cmd(token, BPF_PROG_LOAD) || 2736 !bpf_token_allow_prog_type(token, attr->prog_type, 2737 attr->expected_attach_type)) { 2738 bpf_token_put(token); 2739 token = NULL; 2740 } 2741 } 2742 2743 bpf_cap = bpf_token_capable(token, CAP_BPF); 2744 err = -EPERM; 2745 2746 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 2747 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && 2748 !bpf_cap) 2749 goto put_token; 2750 2751 /* Intent here is for unprivileged_bpf_disabled to block BPF program 2752 * creation for unprivileged users; other actions depend 2753 * on fd availability and access to bpffs, so are dependent on 2754 * object creation success. Even with unprivileged BPF disabled, 2755 * capability checks are still carried out for these 2756 * and other operations. 2757 */ 2758 if (sysctl_unprivileged_bpf_disabled && !bpf_cap) 2759 goto put_token; 2760 2761 if (attr->insn_cnt == 0 || 2762 attr->insn_cnt > (bpf_cap ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) { 2763 err = -E2BIG; 2764 goto put_token; 2765 } 2766 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 2767 type != BPF_PROG_TYPE_CGROUP_SKB && 2768 !bpf_cap) 2769 goto put_token; 2770 2771 if (is_net_admin_prog_type(type) && !bpf_token_capable(token, CAP_NET_ADMIN)) 2772 goto put_token; 2773 if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON)) 2774 goto put_token; 2775 2776 /* attach_prog_fd/attach_btf_obj_fd can specify fd of either bpf_prog 2777 * or btf, we need to check which one it is 2778 */ 2779 if (attr->attach_prog_fd) { 2780 dst_prog = bpf_prog_get(attr->attach_prog_fd); 2781 if (IS_ERR(dst_prog)) { 2782 dst_prog = NULL; 2783 attach_btf = btf_get_by_fd(attr->attach_btf_obj_fd); 2784 if (IS_ERR(attach_btf)) { 2785 err = -EINVAL; 2786 goto put_token; 2787 } 2788 if (!btf_is_kernel(attach_btf)) { 2789 /* attaching through specifying bpf_prog's BTF 2790 * objects directly might be supported eventually 2791 */ 2792 btf_put(attach_btf); 2793 err = -ENOTSUPP; 2794 goto put_token; 2795 } 2796 } 2797 } else if (attr->attach_btf_id) { 2798 /* fall back to vmlinux BTF, if BTF type ID is specified */ 2799 attach_btf = bpf_get_btf_vmlinux(); 2800 if (IS_ERR(attach_btf)) { 2801 err = PTR_ERR(attach_btf); 2802 goto put_token; 2803 } 2804 if (!attach_btf) { 2805 err = -EINVAL; 2806 goto put_token; 2807 } 2808 btf_get(attach_btf); 2809 } 2810 2811 if (bpf_prog_load_check_attach(type, attr->expected_attach_type, 2812 attach_btf, attr->attach_btf_id, 2813 dst_prog)) { 2814 if (dst_prog) 2815 bpf_prog_put(dst_prog); 2816 if (attach_btf) 2817 btf_put(attach_btf); 2818 err = -EINVAL; 2819 goto put_token; 2820 } 2821 2822 /* plain bpf_prog allocation */ 2823 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); 2824 if (!prog) { 2825 if (dst_prog) 2826 bpf_prog_put(dst_prog); 2827 if (attach_btf) 2828 btf_put(attach_btf); 2829 err = -EINVAL; 2830 goto put_token; 2831 } 2832 2833 prog->expected_attach_type = attr->expected_attach_type; 2834 prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); 2835 prog->aux->attach_btf = attach_btf; 2836 prog->aux->attach_btf_id = attr->attach_btf_id; 2837 prog->aux->dst_prog = dst_prog; 2838 prog->aux->dev_bound = !!attr->prog_ifindex; 2839 prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; 2840 2841 /* move token into prog->aux, reuse taken refcnt */ 2842 prog->aux->token = token; 2843 token = NULL; 2844 2845 prog->aux->user = get_current_user(); 2846 prog->len = attr->insn_cnt; 2847 2848 err = -EFAULT; 2849 if (copy_from_bpfptr(prog->insns, 2850 make_bpfptr(attr->insns, uattr.is_kernel), 2851 bpf_prog_insn_size(prog)) != 0) 2852 goto free_prog; 2853 /* copy eBPF program license from user space */ 2854 if (strncpy_from_bpfptr(license, 2855 make_bpfptr(attr->license, uattr.is_kernel), 2856 sizeof(license) - 1) < 0) 2857 goto free_prog; 2858 license[sizeof(license) - 1] = 0; 2859 2860 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 2861 prog->gpl_compatible = license_is_gpl_compatible(license) ? 1 : 0; 2862 2863 prog->orig_prog = NULL; 2864 prog->jited = 0; 2865 2866 atomic64_set(&prog->aux->refcnt, 1); 2867 2868 if (bpf_prog_is_dev_bound(prog->aux)) { 2869 err = bpf_prog_dev_bound_init(prog, attr); 2870 if (err) 2871 goto free_prog; 2872 } 2873 2874 if (type == BPF_PROG_TYPE_EXT && dst_prog && 2875 bpf_prog_is_dev_bound(dst_prog->aux)) { 2876 err = bpf_prog_dev_bound_inherit(prog, dst_prog); 2877 if (err) 2878 goto free_prog; 2879 } 2880 2881 /* 2882 * Bookkeeping for managing the program attachment chain. 2883 * 2884 * It might be tempting to set attach_tracing_prog flag at the attachment 2885 * time, but this will not prevent from loading bunch of tracing prog 2886 * first, then attach them one to another. 2887 * 2888 * The flag attach_tracing_prog is set for the whole program lifecycle, and 2889 * doesn't have to be cleared in bpf_tracing_link_release, since tracing 2890 * programs cannot change attachment target. 2891 */ 2892 if (type == BPF_PROG_TYPE_TRACING && dst_prog && 2893 dst_prog->type == BPF_PROG_TYPE_TRACING) { 2894 prog->aux->attach_tracing_prog = true; 2895 } 2896 2897 /* find program type: socket_filter vs tracing_filter */ 2898 err = find_prog_type(type, prog); 2899 if (err < 0) 2900 goto free_prog; 2901 2902 prog->aux->load_time = ktime_get_boottime_ns(); 2903 err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2904 sizeof(attr->prog_name)); 2905 if (err < 0) 2906 goto free_prog; 2907 2908 err = security_bpf_prog_load(prog, attr, token); 2909 if (err) 2910 goto free_prog_sec; 2911 2912 /* run eBPF verifier */ 2913 err = bpf_check(&prog, attr, uattr, uattr_size); 2914 if (err < 0) 2915 goto free_used_maps; 2916 2917 prog = bpf_prog_select_runtime(prog, &err); 2918 if (err < 0) 2919 goto free_used_maps; 2920 2921 err = bpf_prog_alloc_id(prog); 2922 if (err) 2923 goto free_used_maps; 2924 2925 /* Upon success of bpf_prog_alloc_id(), the BPF prog is 2926 * effectively publicly exposed. However, retrieving via 2927 * bpf_prog_get_fd_by_id() will take another reference, 2928 * therefore it cannot be gone underneath us. 2929 * 2930 * Only for the time /after/ successful bpf_prog_new_fd() 2931 * and before returning to userspace, we might just hold 2932 * one reference and any parallel close on that fd could 2933 * rip everything out. Hence, below notifications must 2934 * happen before bpf_prog_new_fd(). 2935 * 2936 * Also, any failure handling from this point onwards must 2937 * be using bpf_prog_put() given the program is exposed. 2938 */ 2939 bpf_prog_kallsyms_add(prog); 2940 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 2941 bpf_audit_prog(prog, BPF_AUDIT_LOAD); 2942 2943 err = bpf_prog_new_fd(prog); 2944 if (err < 0) 2945 bpf_prog_put(prog); 2946 return err; 2947 2948 free_used_maps: 2949 /* In case we have subprogs, we need to wait for a grace 2950 * period before we can tear down JIT memory since symbols 2951 * are already exposed under kallsyms. 2952 */ 2953 __bpf_prog_put_noref(prog, prog->aux->real_func_cnt); 2954 return err; 2955 2956 free_prog_sec: 2957 security_bpf_prog_free(prog); 2958 free_prog: 2959 free_uid(prog->aux->user); 2960 if (prog->aux->attach_btf) 2961 btf_put(prog->aux->attach_btf); 2962 bpf_prog_free(prog); 2963 put_token: 2964 bpf_token_put(token); 2965 return err; 2966 } 2967 2968 #define BPF_OBJ_LAST_FIELD path_fd 2969 2970 static int bpf_obj_pin(const union bpf_attr *attr) 2971 { 2972 int path_fd; 2973 2974 if (CHECK_ATTR(BPF_OBJ) || attr->file_flags & ~BPF_F_PATH_FD) 2975 return -EINVAL; 2976 2977 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2978 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2979 return -EINVAL; 2980 2981 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2982 return bpf_obj_pin_user(attr->bpf_fd, path_fd, 2983 u64_to_user_ptr(attr->pathname)); 2984 } 2985 2986 static int bpf_obj_get(const union bpf_attr *attr) 2987 { 2988 int path_fd; 2989 2990 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || 2991 attr->file_flags & ~(BPF_OBJ_FLAG_MASK | BPF_F_PATH_FD)) 2992 return -EINVAL; 2993 2994 /* path_fd has to be accompanied by BPF_F_PATH_FD flag */ 2995 if (!(attr->file_flags & BPF_F_PATH_FD) && attr->path_fd) 2996 return -EINVAL; 2997 2998 path_fd = attr->file_flags & BPF_F_PATH_FD ? attr->path_fd : AT_FDCWD; 2999 return bpf_obj_get_user(path_fd, u64_to_user_ptr(attr->pathname), 3000 attr->file_flags); 3001 } 3002 3003 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 3004 const struct bpf_link_ops *ops, struct bpf_prog *prog) 3005 { 3006 WARN_ON(ops->dealloc && ops->dealloc_deferred); 3007 atomic64_set(&link->refcnt, 1); 3008 link->type = type; 3009 link->id = 0; 3010 link->ops = ops; 3011 link->prog = prog; 3012 } 3013 3014 static void bpf_link_free_id(int id) 3015 { 3016 if (!id) 3017 return; 3018 3019 spin_lock_bh(&link_idr_lock); 3020 idr_remove(&link_idr, id); 3021 spin_unlock_bh(&link_idr_lock); 3022 } 3023 3024 /* Clean up bpf_link and corresponding anon_inode file and FD. After 3025 * anon_inode is created, bpf_link can't be just kfree()'d due to deferred 3026 * anon_inode's release() call. This helper marks bpf_link as 3027 * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt 3028 * is not decremented, it's the responsibility of a calling code that failed 3029 * to complete bpf_link initialization. 3030 * This helper eventually calls link's dealloc callback, but does not call 3031 * link's release callback. 3032 */ 3033 void bpf_link_cleanup(struct bpf_link_primer *primer) 3034 { 3035 primer->link->prog = NULL; 3036 bpf_link_free_id(primer->id); 3037 fput(primer->file); 3038 put_unused_fd(primer->fd); 3039 } 3040 3041 void bpf_link_inc(struct bpf_link *link) 3042 { 3043 atomic64_inc(&link->refcnt); 3044 } 3045 3046 static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu) 3047 { 3048 struct bpf_link *link = container_of(rcu, struct bpf_link, rcu); 3049 3050 /* free bpf_link and its containing memory */ 3051 link->ops->dealloc_deferred(link); 3052 } 3053 3054 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) 3055 { 3056 if (rcu_trace_implies_rcu_gp()) 3057 bpf_link_defer_dealloc_rcu_gp(rcu); 3058 else 3059 call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp); 3060 } 3061 3062 /* bpf_link_free is guaranteed to be called from process context */ 3063 static void bpf_link_free(struct bpf_link *link) 3064 { 3065 const struct bpf_link_ops *ops = link->ops; 3066 bool sleepable = false; 3067 3068 bpf_link_free_id(link->id); 3069 if (link->prog) { 3070 sleepable = link->prog->sleepable; 3071 /* detach BPF program, clean up used resources */ 3072 ops->release(link); 3073 bpf_prog_put(link->prog); 3074 } 3075 if (ops->dealloc_deferred) { 3076 /* schedule BPF link deallocation; if underlying BPF program 3077 * is sleepable, we need to first wait for RCU tasks trace 3078 * sync, then go through "classic" RCU grace period 3079 */ 3080 if (sleepable) 3081 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); 3082 else 3083 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); 3084 } else if (ops->dealloc) 3085 ops->dealloc(link); 3086 } 3087 3088 static void bpf_link_put_deferred(struct work_struct *work) 3089 { 3090 struct bpf_link *link = container_of(work, struct bpf_link, work); 3091 3092 bpf_link_free(link); 3093 } 3094 3095 /* bpf_link_put might be called from atomic context. It needs to be called 3096 * from sleepable context in order to acquire sleeping locks during the process. 3097 */ 3098 void bpf_link_put(struct bpf_link *link) 3099 { 3100 if (!atomic64_dec_and_test(&link->refcnt)) 3101 return; 3102 3103 INIT_WORK(&link->work, bpf_link_put_deferred); 3104 schedule_work(&link->work); 3105 } 3106 EXPORT_SYMBOL(bpf_link_put); 3107 3108 static void bpf_link_put_direct(struct bpf_link *link) 3109 { 3110 if (!atomic64_dec_and_test(&link->refcnt)) 3111 return; 3112 bpf_link_free(link); 3113 } 3114 3115 static int bpf_link_release(struct inode *inode, struct file *filp) 3116 { 3117 struct bpf_link *link = filp->private_data; 3118 3119 bpf_link_put_direct(link); 3120 return 0; 3121 } 3122 3123 #ifdef CONFIG_PROC_FS 3124 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) 3125 #define BPF_MAP_TYPE(_id, _ops) 3126 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name, 3127 static const char *bpf_link_type_strs[] = { 3128 [BPF_LINK_TYPE_UNSPEC] = "<invalid>", 3129 #include <linux/bpf_types.h> 3130 }; 3131 #undef BPF_PROG_TYPE 3132 #undef BPF_MAP_TYPE 3133 #undef BPF_LINK_TYPE 3134 3135 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) 3136 { 3137 const struct bpf_link *link = filp->private_data; 3138 const struct bpf_prog *prog = link->prog; 3139 char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; 3140 3141 seq_printf(m, 3142 "link_type:\t%s\n" 3143 "link_id:\t%u\n", 3144 bpf_link_type_strs[link->type], 3145 link->id); 3146 if (prog) { 3147 bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); 3148 seq_printf(m, 3149 "prog_tag:\t%s\n" 3150 "prog_id:\t%u\n", 3151 prog_tag, 3152 prog->aux->id); 3153 } 3154 if (link->ops->show_fdinfo) 3155 link->ops->show_fdinfo(link, m); 3156 } 3157 #endif 3158 3159 static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts) 3160 { 3161 struct bpf_link *link = file->private_data; 3162 3163 return link->ops->poll(file, pts); 3164 } 3165 3166 static const struct file_operations bpf_link_fops = { 3167 #ifdef CONFIG_PROC_FS 3168 .show_fdinfo = bpf_link_show_fdinfo, 3169 #endif 3170 .release = bpf_link_release, 3171 .read = bpf_dummy_read, 3172 .write = bpf_dummy_write, 3173 }; 3174 3175 static const struct file_operations bpf_link_fops_poll = { 3176 #ifdef CONFIG_PROC_FS 3177 .show_fdinfo = bpf_link_show_fdinfo, 3178 #endif 3179 .release = bpf_link_release, 3180 .read = bpf_dummy_read, 3181 .write = bpf_dummy_write, 3182 .poll = bpf_link_poll, 3183 }; 3184 3185 static int bpf_link_alloc_id(struct bpf_link *link) 3186 { 3187 int id; 3188 3189 idr_preload(GFP_KERNEL); 3190 spin_lock_bh(&link_idr_lock); 3191 id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC); 3192 spin_unlock_bh(&link_idr_lock); 3193 idr_preload_end(); 3194 3195 return id; 3196 } 3197 3198 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file, 3199 * reserving unused FD and allocating ID from link_idr. This is to be paired 3200 * with bpf_link_settle() to install FD and ID and expose bpf_link to 3201 * user-space, if bpf_link is successfully attached. If not, bpf_link and 3202 * pre-allocated resources are to be freed with bpf_cleanup() call. All the 3203 * transient state is passed around in struct bpf_link_primer. 3204 * This is preferred way to create and initialize bpf_link, especially when 3205 * there are complicated and expensive operations in between creating bpf_link 3206 * itself and attaching it to BPF hook. By using bpf_link_prime() and 3207 * bpf_link_settle() kernel code using bpf_link doesn't have to perform 3208 * expensive (and potentially failing) roll back operations in a rare case 3209 * that file, FD, or ID can't be allocated. 3210 */ 3211 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 3212 { 3213 struct file *file; 3214 int fd, id; 3215 3216 fd = get_unused_fd_flags(O_CLOEXEC); 3217 if (fd < 0) 3218 return fd; 3219 3220 3221 id = bpf_link_alloc_id(link); 3222 if (id < 0) { 3223 put_unused_fd(fd); 3224 return id; 3225 } 3226 3227 file = anon_inode_getfile("bpf_link", 3228 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3229 link, O_CLOEXEC); 3230 if (IS_ERR(file)) { 3231 bpf_link_free_id(id); 3232 put_unused_fd(fd); 3233 return PTR_ERR(file); 3234 } 3235 3236 primer->link = link; 3237 primer->file = file; 3238 primer->fd = fd; 3239 primer->id = id; 3240 return 0; 3241 } 3242 3243 int bpf_link_settle(struct bpf_link_primer *primer) 3244 { 3245 /* make bpf_link fetchable by ID */ 3246 spin_lock_bh(&link_idr_lock); 3247 primer->link->id = primer->id; 3248 spin_unlock_bh(&link_idr_lock); 3249 /* make bpf_link fetchable by FD */ 3250 fd_install(primer->fd, primer->file); 3251 /* pass through installed FD */ 3252 return primer->fd; 3253 } 3254 3255 int bpf_link_new_fd(struct bpf_link *link) 3256 { 3257 return anon_inode_getfd("bpf-link", 3258 link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3259 link, O_CLOEXEC); 3260 } 3261 3262 struct bpf_link *bpf_link_get_from_fd(u32 ufd) 3263 { 3264 struct fd f = fdget(ufd); 3265 struct bpf_link *link; 3266 3267 if (!f.file) 3268 return ERR_PTR(-EBADF); 3269 if (f.file->f_op != &bpf_link_fops && f.file->f_op != &bpf_link_fops_poll) { 3270 fdput(f); 3271 return ERR_PTR(-EINVAL); 3272 } 3273 3274 link = f.file->private_data; 3275 bpf_link_inc(link); 3276 fdput(f); 3277 3278 return link; 3279 } 3280 EXPORT_SYMBOL(bpf_link_get_from_fd); 3281 3282 static void bpf_tracing_link_release(struct bpf_link *link) 3283 { 3284 struct bpf_tracing_link *tr_link = 3285 container_of(link, struct bpf_tracing_link, link.link); 3286 3287 WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, 3288 tr_link->trampoline)); 3289 3290 bpf_trampoline_put(tr_link->trampoline); 3291 3292 /* tgt_prog is NULL if target is a kernel function */ 3293 if (tr_link->tgt_prog) 3294 bpf_prog_put(tr_link->tgt_prog); 3295 } 3296 3297 static void bpf_tracing_link_dealloc(struct bpf_link *link) 3298 { 3299 struct bpf_tracing_link *tr_link = 3300 container_of(link, struct bpf_tracing_link, link.link); 3301 3302 kfree(tr_link); 3303 } 3304 3305 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link, 3306 struct seq_file *seq) 3307 { 3308 struct bpf_tracing_link *tr_link = 3309 container_of(link, struct bpf_tracing_link, link.link); 3310 u32 target_btf_id, target_obj_id; 3311 3312 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3313 &target_obj_id, &target_btf_id); 3314 seq_printf(seq, 3315 "attach_type:\t%d\n" 3316 "target_obj_id:\t%u\n" 3317 "target_btf_id:\t%u\n", 3318 tr_link->attach_type, 3319 target_obj_id, 3320 target_btf_id); 3321 } 3322 3323 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link, 3324 struct bpf_link_info *info) 3325 { 3326 struct bpf_tracing_link *tr_link = 3327 container_of(link, struct bpf_tracing_link, link.link); 3328 3329 info->tracing.attach_type = tr_link->attach_type; 3330 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3331 &info->tracing.target_obj_id, 3332 &info->tracing.target_btf_id); 3333 3334 return 0; 3335 } 3336 3337 static const struct bpf_link_ops bpf_tracing_link_lops = { 3338 .release = bpf_tracing_link_release, 3339 .dealloc = bpf_tracing_link_dealloc, 3340 .show_fdinfo = bpf_tracing_link_show_fdinfo, 3341 .fill_link_info = bpf_tracing_link_fill_link_info, 3342 }; 3343 3344 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3345 int tgt_prog_fd, 3346 u32 btf_id, 3347 u64 bpf_cookie) 3348 { 3349 struct bpf_link_primer link_primer; 3350 struct bpf_prog *tgt_prog = NULL; 3351 struct bpf_trampoline *tr = NULL; 3352 struct bpf_tracing_link *link; 3353 u64 key = 0; 3354 int err; 3355 3356 switch (prog->type) { 3357 case BPF_PROG_TYPE_TRACING: 3358 if (prog->expected_attach_type != BPF_TRACE_FENTRY && 3359 prog->expected_attach_type != BPF_TRACE_FEXIT && 3360 prog->expected_attach_type != BPF_MODIFY_RETURN) { 3361 err = -EINVAL; 3362 goto out_put_prog; 3363 } 3364 break; 3365 case BPF_PROG_TYPE_EXT: 3366 if (prog->expected_attach_type != 0) { 3367 err = -EINVAL; 3368 goto out_put_prog; 3369 } 3370 break; 3371 case BPF_PROG_TYPE_LSM: 3372 if (prog->expected_attach_type != BPF_LSM_MAC) { 3373 err = -EINVAL; 3374 goto out_put_prog; 3375 } 3376 break; 3377 default: 3378 err = -EINVAL; 3379 goto out_put_prog; 3380 } 3381 3382 if (!!tgt_prog_fd != !!btf_id) { 3383 err = -EINVAL; 3384 goto out_put_prog; 3385 } 3386 3387 if (tgt_prog_fd) { 3388 /* 3389 * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this 3390 * part would be changed to implement the same for 3391 * BPF_PROG_TYPE_TRACING, do not forget to update the way how 3392 * attach_tracing_prog flag is set. 3393 */ 3394 if (prog->type != BPF_PROG_TYPE_EXT) { 3395 err = -EINVAL; 3396 goto out_put_prog; 3397 } 3398 3399 tgt_prog = bpf_prog_get(tgt_prog_fd); 3400 if (IS_ERR(tgt_prog)) { 3401 err = PTR_ERR(tgt_prog); 3402 tgt_prog = NULL; 3403 goto out_put_prog; 3404 } 3405 3406 key = bpf_trampoline_compute_key(tgt_prog, NULL, btf_id); 3407 } 3408 3409 link = kzalloc(sizeof(*link), GFP_USER); 3410 if (!link) { 3411 err = -ENOMEM; 3412 goto out_put_prog; 3413 } 3414 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3415 &bpf_tracing_link_lops, prog); 3416 link->attach_type = prog->expected_attach_type; 3417 link->link.cookie = bpf_cookie; 3418 3419 mutex_lock(&prog->aux->dst_mutex); 3420 3421 /* There are a few possible cases here: 3422 * 3423 * - if prog->aux->dst_trampoline is set, the program was just loaded 3424 * and not yet attached to anything, so we can use the values stored 3425 * in prog->aux 3426 * 3427 * - if prog->aux->dst_trampoline is NULL, the program has already been 3428 * attached to a target and its initial target was cleared (below) 3429 * 3430 * - if tgt_prog != NULL, the caller specified tgt_prog_fd + 3431 * target_btf_id using the link_create API. 3432 * 3433 * - if tgt_prog == NULL when this function was called using the old 3434 * raw_tracepoint_open API, and we need a target from prog->aux 3435 * 3436 * - if prog->aux->dst_trampoline and tgt_prog is NULL, the program 3437 * was detached and is going for re-attachment. 3438 * 3439 * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf 3440 * are NULL, then program was already attached and user did not provide 3441 * tgt_prog_fd so we have no way to find out or create trampoline 3442 */ 3443 if (!prog->aux->dst_trampoline && !tgt_prog) { 3444 /* 3445 * Allow re-attach for TRACING and LSM programs. If it's 3446 * currently linked, bpf_trampoline_link_prog will fail. 3447 * EXT programs need to specify tgt_prog_fd, so they 3448 * re-attach in separate code path. 3449 */ 3450 if (prog->type != BPF_PROG_TYPE_TRACING && 3451 prog->type != BPF_PROG_TYPE_LSM) { 3452 err = -EINVAL; 3453 goto out_unlock; 3454 } 3455 /* We can allow re-attach only if we have valid attach_btf. */ 3456 if (!prog->aux->attach_btf) { 3457 err = -EINVAL; 3458 goto out_unlock; 3459 } 3460 btf_id = prog->aux->attach_btf_id; 3461 key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id); 3462 } 3463 3464 if (!prog->aux->dst_trampoline || 3465 (key && key != prog->aux->dst_trampoline->key)) { 3466 /* If there is no saved target, or the specified target is 3467 * different from the destination specified at load time, we 3468 * need a new trampoline and a check for compatibility 3469 */ 3470 struct bpf_attach_target_info tgt_info = {}; 3471 3472 err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id, 3473 &tgt_info); 3474 if (err) 3475 goto out_unlock; 3476 3477 if (tgt_info.tgt_mod) { 3478 module_put(prog->aux->mod); 3479 prog->aux->mod = tgt_info.tgt_mod; 3480 } 3481 3482 tr = bpf_trampoline_get(key, &tgt_info); 3483 if (!tr) { 3484 err = -ENOMEM; 3485 goto out_unlock; 3486 } 3487 } else { 3488 /* The caller didn't specify a target, or the target was the 3489 * same as the destination supplied during program load. This 3490 * means we can reuse the trampoline and reference from program 3491 * load time, and there is no need to allocate a new one. This 3492 * can only happen once for any program, as the saved values in 3493 * prog->aux are cleared below. 3494 */ 3495 tr = prog->aux->dst_trampoline; 3496 tgt_prog = prog->aux->dst_prog; 3497 } 3498 3499 err = bpf_link_prime(&link->link.link, &link_primer); 3500 if (err) 3501 goto out_unlock; 3502 3503 err = bpf_trampoline_link_prog(&link->link, tr); 3504 if (err) { 3505 bpf_link_cleanup(&link_primer); 3506 link = NULL; 3507 goto out_unlock; 3508 } 3509 3510 link->tgt_prog = tgt_prog; 3511 link->trampoline = tr; 3512 3513 /* Always clear the trampoline and target prog from prog->aux to make 3514 * sure the original attach destination is not kept alive after a 3515 * program is (re-)attached to another target. 3516 */ 3517 if (prog->aux->dst_prog && 3518 (tgt_prog_fd || tr != prog->aux->dst_trampoline)) 3519 /* got extra prog ref from syscall, or attaching to different prog */ 3520 bpf_prog_put(prog->aux->dst_prog); 3521 if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline) 3522 /* we allocated a new trampoline, so free the old one */ 3523 bpf_trampoline_put(prog->aux->dst_trampoline); 3524 3525 prog->aux->dst_prog = NULL; 3526 prog->aux->dst_trampoline = NULL; 3527 mutex_unlock(&prog->aux->dst_mutex); 3528 3529 return bpf_link_settle(&link_primer); 3530 out_unlock: 3531 if (tr && tr != prog->aux->dst_trampoline) 3532 bpf_trampoline_put(tr); 3533 mutex_unlock(&prog->aux->dst_mutex); 3534 kfree(link); 3535 out_put_prog: 3536 if (tgt_prog_fd && tgt_prog) 3537 bpf_prog_put(tgt_prog); 3538 return err; 3539 } 3540 3541 static void bpf_raw_tp_link_release(struct bpf_link *link) 3542 { 3543 struct bpf_raw_tp_link *raw_tp = 3544 container_of(link, struct bpf_raw_tp_link, link); 3545 3546 bpf_probe_unregister(raw_tp->btp, raw_tp); 3547 bpf_put_raw_tracepoint(raw_tp->btp); 3548 } 3549 3550 static void bpf_raw_tp_link_dealloc(struct bpf_link *link) 3551 { 3552 struct bpf_raw_tp_link *raw_tp = 3553 container_of(link, struct bpf_raw_tp_link, link); 3554 3555 kfree(raw_tp); 3556 } 3557 3558 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link, 3559 struct seq_file *seq) 3560 { 3561 struct bpf_raw_tp_link *raw_tp_link = 3562 container_of(link, struct bpf_raw_tp_link, link); 3563 3564 seq_printf(seq, 3565 "tp_name:\t%s\n", 3566 raw_tp_link->btp->tp->name); 3567 } 3568 3569 static int bpf_copy_to_user(char __user *ubuf, const char *buf, u32 ulen, 3570 u32 len) 3571 { 3572 if (ulen >= len + 1) { 3573 if (copy_to_user(ubuf, buf, len + 1)) 3574 return -EFAULT; 3575 } else { 3576 char zero = '\0'; 3577 3578 if (copy_to_user(ubuf, buf, ulen - 1)) 3579 return -EFAULT; 3580 if (put_user(zero, ubuf + ulen - 1)) 3581 return -EFAULT; 3582 return -ENOSPC; 3583 } 3584 3585 return 0; 3586 } 3587 3588 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, 3589 struct bpf_link_info *info) 3590 { 3591 struct bpf_raw_tp_link *raw_tp_link = 3592 container_of(link, struct bpf_raw_tp_link, link); 3593 char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name); 3594 const char *tp_name = raw_tp_link->btp->tp->name; 3595 u32 ulen = info->raw_tracepoint.tp_name_len; 3596 size_t tp_len = strlen(tp_name); 3597 3598 if (!ulen ^ !ubuf) 3599 return -EINVAL; 3600 3601 info->raw_tracepoint.tp_name_len = tp_len + 1; 3602 3603 if (!ubuf) 3604 return 0; 3605 3606 return bpf_copy_to_user(ubuf, tp_name, ulen, tp_len); 3607 } 3608 3609 static const struct bpf_link_ops bpf_raw_tp_link_lops = { 3610 .release = bpf_raw_tp_link_release, 3611 .dealloc_deferred = bpf_raw_tp_link_dealloc, 3612 .show_fdinfo = bpf_raw_tp_link_show_fdinfo, 3613 .fill_link_info = bpf_raw_tp_link_fill_link_info, 3614 }; 3615 3616 #ifdef CONFIG_PERF_EVENTS 3617 struct bpf_perf_link { 3618 struct bpf_link link; 3619 struct file *perf_file; 3620 }; 3621 3622 static void bpf_perf_link_release(struct bpf_link *link) 3623 { 3624 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3625 struct perf_event *event = perf_link->perf_file->private_data; 3626 3627 perf_event_free_bpf_prog(event); 3628 fput(perf_link->perf_file); 3629 } 3630 3631 static void bpf_perf_link_dealloc(struct bpf_link *link) 3632 { 3633 struct bpf_perf_link *perf_link = container_of(link, struct bpf_perf_link, link); 3634 3635 kfree(perf_link); 3636 } 3637 3638 static int bpf_perf_link_fill_common(const struct perf_event *event, 3639 char __user *uname, u32 ulen, 3640 u64 *probe_offset, u64 *probe_addr, 3641 u32 *fd_type, unsigned long *missed) 3642 { 3643 const char *buf; 3644 u32 prog_id; 3645 size_t len; 3646 int err; 3647 3648 if (!ulen ^ !uname) 3649 return -EINVAL; 3650 3651 err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf, 3652 probe_offset, probe_addr, missed); 3653 if (err) 3654 return err; 3655 if (!uname) 3656 return 0; 3657 if (buf) { 3658 len = strlen(buf); 3659 err = bpf_copy_to_user(uname, buf, ulen, len); 3660 if (err) 3661 return err; 3662 } else { 3663 char zero = '\0'; 3664 3665 if (put_user(zero, uname)) 3666 return -EFAULT; 3667 } 3668 return 0; 3669 } 3670 3671 #ifdef CONFIG_KPROBE_EVENTS 3672 static int bpf_perf_link_fill_kprobe(const struct perf_event *event, 3673 struct bpf_link_info *info) 3674 { 3675 unsigned long missed; 3676 char __user *uname; 3677 u64 addr, offset; 3678 u32 ulen, type; 3679 int err; 3680 3681 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); 3682 ulen = info->perf_event.kprobe.name_len; 3683 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr, 3684 &type, &missed); 3685 if (err) 3686 return err; 3687 if (type == BPF_FD_TYPE_KRETPROBE) 3688 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; 3689 else 3690 info->perf_event.type = BPF_PERF_EVENT_KPROBE; 3691 3692 info->perf_event.kprobe.offset = offset; 3693 info->perf_event.kprobe.missed = missed; 3694 if (!kallsyms_show_value(current_cred())) 3695 addr = 0; 3696 info->perf_event.kprobe.addr = addr; 3697 info->perf_event.kprobe.cookie = event->bpf_cookie; 3698 return 0; 3699 } 3700 #endif 3701 3702 #ifdef CONFIG_UPROBE_EVENTS 3703 static int bpf_perf_link_fill_uprobe(const struct perf_event *event, 3704 struct bpf_link_info *info) 3705 { 3706 char __user *uname; 3707 u64 addr, offset; 3708 u32 ulen, type; 3709 int err; 3710 3711 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); 3712 ulen = info->perf_event.uprobe.name_len; 3713 err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr, 3714 &type, NULL); 3715 if (err) 3716 return err; 3717 3718 if (type == BPF_FD_TYPE_URETPROBE) 3719 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; 3720 else 3721 info->perf_event.type = BPF_PERF_EVENT_UPROBE; 3722 info->perf_event.uprobe.offset = offset; 3723 info->perf_event.uprobe.cookie = event->bpf_cookie; 3724 return 0; 3725 } 3726 #endif 3727 3728 static int bpf_perf_link_fill_probe(const struct perf_event *event, 3729 struct bpf_link_info *info) 3730 { 3731 #ifdef CONFIG_KPROBE_EVENTS 3732 if (event->tp_event->flags & TRACE_EVENT_FL_KPROBE) 3733 return bpf_perf_link_fill_kprobe(event, info); 3734 #endif 3735 #ifdef CONFIG_UPROBE_EVENTS 3736 if (event->tp_event->flags & TRACE_EVENT_FL_UPROBE) 3737 return bpf_perf_link_fill_uprobe(event, info); 3738 #endif 3739 return -EOPNOTSUPP; 3740 } 3741 3742 static int bpf_perf_link_fill_tracepoint(const struct perf_event *event, 3743 struct bpf_link_info *info) 3744 { 3745 char __user *uname; 3746 u32 ulen; 3747 3748 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); 3749 ulen = info->perf_event.tracepoint.name_len; 3750 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; 3751 info->perf_event.tracepoint.cookie = event->bpf_cookie; 3752 return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL); 3753 } 3754 3755 static int bpf_perf_link_fill_perf_event(const struct perf_event *event, 3756 struct bpf_link_info *info) 3757 { 3758 info->perf_event.event.type = event->attr.type; 3759 info->perf_event.event.config = event->attr.config; 3760 info->perf_event.event.cookie = event->bpf_cookie; 3761 info->perf_event.type = BPF_PERF_EVENT_EVENT; 3762 return 0; 3763 } 3764 3765 static int bpf_perf_link_fill_link_info(const struct bpf_link *link, 3766 struct bpf_link_info *info) 3767 { 3768 struct bpf_perf_link *perf_link; 3769 const struct perf_event *event; 3770 3771 perf_link = container_of(link, struct bpf_perf_link, link); 3772 event = perf_get_event(perf_link->perf_file); 3773 if (IS_ERR(event)) 3774 return PTR_ERR(event); 3775 3776 switch (event->prog->type) { 3777 case BPF_PROG_TYPE_PERF_EVENT: 3778 return bpf_perf_link_fill_perf_event(event, info); 3779 case BPF_PROG_TYPE_TRACEPOINT: 3780 return bpf_perf_link_fill_tracepoint(event, info); 3781 case BPF_PROG_TYPE_KPROBE: 3782 return bpf_perf_link_fill_probe(event, info); 3783 default: 3784 return -EOPNOTSUPP; 3785 } 3786 } 3787 3788 static const struct bpf_link_ops bpf_perf_link_lops = { 3789 .release = bpf_perf_link_release, 3790 .dealloc = bpf_perf_link_dealloc, 3791 .fill_link_info = bpf_perf_link_fill_link_info, 3792 }; 3793 3794 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3795 { 3796 struct bpf_link_primer link_primer; 3797 struct bpf_perf_link *link; 3798 struct perf_event *event; 3799 struct file *perf_file; 3800 int err; 3801 3802 if (attr->link_create.flags) 3803 return -EINVAL; 3804 3805 perf_file = perf_event_get(attr->link_create.target_fd); 3806 if (IS_ERR(perf_file)) 3807 return PTR_ERR(perf_file); 3808 3809 link = kzalloc(sizeof(*link), GFP_USER); 3810 if (!link) { 3811 err = -ENOMEM; 3812 goto out_put_file; 3813 } 3814 bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 3815 link->perf_file = perf_file; 3816 3817 err = bpf_link_prime(&link->link, &link_primer); 3818 if (err) { 3819 kfree(link); 3820 goto out_put_file; 3821 } 3822 3823 event = perf_file->private_data; 3824 err = perf_event_set_bpf_prog(event, prog, attr->link_create.perf_event.bpf_cookie); 3825 if (err) { 3826 bpf_link_cleanup(&link_primer); 3827 goto out_put_file; 3828 } 3829 /* perf_event_set_bpf_prog() doesn't take its own refcnt on prog */ 3830 bpf_prog_inc(prog); 3831 3832 return bpf_link_settle(&link_primer); 3833 3834 out_put_file: 3835 fput(perf_file); 3836 return err; 3837 } 3838 #else 3839 static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 3840 { 3841 return -EOPNOTSUPP; 3842 } 3843 #endif /* CONFIG_PERF_EVENTS */ 3844 3845 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 3846 const char __user *user_tp_name, u64 cookie) 3847 { 3848 struct bpf_link_primer link_primer; 3849 struct bpf_raw_tp_link *link; 3850 struct bpf_raw_event_map *btp; 3851 const char *tp_name; 3852 char buf[128]; 3853 int err; 3854 3855 switch (prog->type) { 3856 case BPF_PROG_TYPE_TRACING: 3857 case BPF_PROG_TYPE_EXT: 3858 case BPF_PROG_TYPE_LSM: 3859 if (user_tp_name) 3860 /* The attach point for this category of programs 3861 * should be specified via btf_id during program load. 3862 */ 3863 return -EINVAL; 3864 if (prog->type == BPF_PROG_TYPE_TRACING && 3865 prog->expected_attach_type == BPF_TRACE_RAW_TP) { 3866 tp_name = prog->aux->attach_func_name; 3867 break; 3868 } 3869 return bpf_tracing_prog_attach(prog, 0, 0, 0); 3870 case BPF_PROG_TYPE_RAW_TRACEPOINT: 3871 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 3872 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) 3873 return -EFAULT; 3874 buf[sizeof(buf) - 1] = 0; 3875 tp_name = buf; 3876 break; 3877 default: 3878 return -EINVAL; 3879 } 3880 3881 btp = bpf_get_raw_tracepoint(tp_name); 3882 if (!btp) 3883 return -ENOENT; 3884 3885 link = kzalloc(sizeof(*link), GFP_USER); 3886 if (!link) { 3887 err = -ENOMEM; 3888 goto out_put_btp; 3889 } 3890 bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 3891 &bpf_raw_tp_link_lops, prog); 3892 link->btp = btp; 3893 link->cookie = cookie; 3894 3895 err = bpf_link_prime(&link->link, &link_primer); 3896 if (err) { 3897 kfree(link); 3898 goto out_put_btp; 3899 } 3900 3901 err = bpf_probe_register(link->btp, link); 3902 if (err) { 3903 bpf_link_cleanup(&link_primer); 3904 goto out_put_btp; 3905 } 3906 3907 return bpf_link_settle(&link_primer); 3908 3909 out_put_btp: 3910 bpf_put_raw_tracepoint(btp); 3911 return err; 3912 } 3913 3914 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie 3915 3916 static int bpf_raw_tracepoint_open(const union bpf_attr *attr) 3917 { 3918 struct bpf_prog *prog; 3919 void __user *tp_name; 3920 __u64 cookie; 3921 int fd; 3922 3923 if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) 3924 return -EINVAL; 3925 3926 prog = bpf_prog_get(attr->raw_tracepoint.prog_fd); 3927 if (IS_ERR(prog)) 3928 return PTR_ERR(prog); 3929 3930 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); 3931 cookie = attr->raw_tracepoint.cookie; 3932 fd = bpf_raw_tp_link_attach(prog, tp_name, cookie); 3933 if (fd < 0) 3934 bpf_prog_put(prog); 3935 return fd; 3936 } 3937 3938 static enum bpf_prog_type 3939 attach_type_to_prog_type(enum bpf_attach_type attach_type) 3940 { 3941 switch (attach_type) { 3942 case BPF_CGROUP_INET_INGRESS: 3943 case BPF_CGROUP_INET_EGRESS: 3944 return BPF_PROG_TYPE_CGROUP_SKB; 3945 case BPF_CGROUP_INET_SOCK_CREATE: 3946 case BPF_CGROUP_INET_SOCK_RELEASE: 3947 case BPF_CGROUP_INET4_POST_BIND: 3948 case BPF_CGROUP_INET6_POST_BIND: 3949 return BPF_PROG_TYPE_CGROUP_SOCK; 3950 case BPF_CGROUP_INET4_BIND: 3951 case BPF_CGROUP_INET6_BIND: 3952 case BPF_CGROUP_INET4_CONNECT: 3953 case BPF_CGROUP_INET6_CONNECT: 3954 case BPF_CGROUP_UNIX_CONNECT: 3955 case BPF_CGROUP_INET4_GETPEERNAME: 3956 case BPF_CGROUP_INET6_GETPEERNAME: 3957 case BPF_CGROUP_UNIX_GETPEERNAME: 3958 case BPF_CGROUP_INET4_GETSOCKNAME: 3959 case BPF_CGROUP_INET6_GETSOCKNAME: 3960 case BPF_CGROUP_UNIX_GETSOCKNAME: 3961 case BPF_CGROUP_UDP4_SENDMSG: 3962 case BPF_CGROUP_UDP6_SENDMSG: 3963 case BPF_CGROUP_UNIX_SENDMSG: 3964 case BPF_CGROUP_UDP4_RECVMSG: 3965 case BPF_CGROUP_UDP6_RECVMSG: 3966 case BPF_CGROUP_UNIX_RECVMSG: 3967 return BPF_PROG_TYPE_CGROUP_SOCK_ADDR; 3968 case BPF_CGROUP_SOCK_OPS: 3969 return BPF_PROG_TYPE_SOCK_OPS; 3970 case BPF_CGROUP_DEVICE: 3971 return BPF_PROG_TYPE_CGROUP_DEVICE; 3972 case BPF_SK_MSG_VERDICT: 3973 return BPF_PROG_TYPE_SK_MSG; 3974 case BPF_SK_SKB_STREAM_PARSER: 3975 case BPF_SK_SKB_STREAM_VERDICT: 3976 case BPF_SK_SKB_VERDICT: 3977 return BPF_PROG_TYPE_SK_SKB; 3978 case BPF_LIRC_MODE2: 3979 return BPF_PROG_TYPE_LIRC_MODE2; 3980 case BPF_FLOW_DISSECTOR: 3981 return BPF_PROG_TYPE_FLOW_DISSECTOR; 3982 case BPF_CGROUP_SYSCTL: 3983 return BPF_PROG_TYPE_CGROUP_SYSCTL; 3984 case BPF_CGROUP_GETSOCKOPT: 3985 case BPF_CGROUP_SETSOCKOPT: 3986 return BPF_PROG_TYPE_CGROUP_SOCKOPT; 3987 case BPF_TRACE_ITER: 3988 case BPF_TRACE_RAW_TP: 3989 case BPF_TRACE_FENTRY: 3990 case BPF_TRACE_FEXIT: 3991 case BPF_MODIFY_RETURN: 3992 return BPF_PROG_TYPE_TRACING; 3993 case BPF_LSM_MAC: 3994 return BPF_PROG_TYPE_LSM; 3995 case BPF_SK_LOOKUP: 3996 return BPF_PROG_TYPE_SK_LOOKUP; 3997 case BPF_XDP: 3998 return BPF_PROG_TYPE_XDP; 3999 case BPF_LSM_CGROUP: 4000 return BPF_PROG_TYPE_LSM; 4001 case BPF_TCX_INGRESS: 4002 case BPF_TCX_EGRESS: 4003 case BPF_NETKIT_PRIMARY: 4004 case BPF_NETKIT_PEER: 4005 return BPF_PROG_TYPE_SCHED_CLS; 4006 default: 4007 return BPF_PROG_TYPE_UNSPEC; 4008 } 4009 } 4010 4011 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, 4012 enum bpf_attach_type attach_type) 4013 { 4014 enum bpf_prog_type ptype; 4015 4016 switch (prog->type) { 4017 case BPF_PROG_TYPE_CGROUP_SOCK: 4018 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4019 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4020 case BPF_PROG_TYPE_SK_LOOKUP: 4021 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 4022 case BPF_PROG_TYPE_CGROUP_SKB: 4023 if (!bpf_token_capable(prog->aux->token, CAP_NET_ADMIN)) 4024 /* cg-skb progs can be loaded by unpriv user. 4025 * check permissions at attach time. 4026 */ 4027 return -EPERM; 4028 4029 ptype = attach_type_to_prog_type(attach_type); 4030 if (prog->type != ptype) 4031 return -EINVAL; 4032 4033 return prog->enforce_expected_attach_type && 4034 prog->expected_attach_type != attach_type ? 4035 -EINVAL : 0; 4036 case BPF_PROG_TYPE_EXT: 4037 return 0; 4038 case BPF_PROG_TYPE_NETFILTER: 4039 if (attach_type != BPF_NETFILTER) 4040 return -EINVAL; 4041 return 0; 4042 case BPF_PROG_TYPE_PERF_EVENT: 4043 case BPF_PROG_TYPE_TRACEPOINT: 4044 if (attach_type != BPF_PERF_EVENT) 4045 return -EINVAL; 4046 return 0; 4047 case BPF_PROG_TYPE_KPROBE: 4048 if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && 4049 attach_type != BPF_TRACE_KPROBE_MULTI) 4050 return -EINVAL; 4051 if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION && 4052 attach_type != BPF_TRACE_KPROBE_SESSION) 4053 return -EINVAL; 4054 if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && 4055 attach_type != BPF_TRACE_UPROBE_MULTI) 4056 return -EINVAL; 4057 if (attach_type != BPF_PERF_EVENT && 4058 attach_type != BPF_TRACE_KPROBE_MULTI && 4059 attach_type != BPF_TRACE_KPROBE_SESSION && 4060 attach_type != BPF_TRACE_UPROBE_MULTI) 4061 return -EINVAL; 4062 return 0; 4063 case BPF_PROG_TYPE_SCHED_CLS: 4064 if (attach_type != BPF_TCX_INGRESS && 4065 attach_type != BPF_TCX_EGRESS && 4066 attach_type != BPF_NETKIT_PRIMARY && 4067 attach_type != BPF_NETKIT_PEER) 4068 return -EINVAL; 4069 return 0; 4070 default: 4071 ptype = attach_type_to_prog_type(attach_type); 4072 if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) 4073 return -EINVAL; 4074 return 0; 4075 } 4076 } 4077 4078 #define BPF_PROG_ATTACH_LAST_FIELD expected_revision 4079 4080 #define BPF_F_ATTACH_MASK_BASE \ 4081 (BPF_F_ALLOW_OVERRIDE | \ 4082 BPF_F_ALLOW_MULTI | \ 4083 BPF_F_REPLACE) 4084 4085 #define BPF_F_ATTACH_MASK_MPROG \ 4086 (BPF_F_REPLACE | \ 4087 BPF_F_BEFORE | \ 4088 BPF_F_AFTER | \ 4089 BPF_F_ID | \ 4090 BPF_F_LINK) 4091 4092 static int bpf_prog_attach(const union bpf_attr *attr) 4093 { 4094 enum bpf_prog_type ptype; 4095 struct bpf_prog *prog; 4096 int ret; 4097 4098 if (CHECK_ATTR(BPF_PROG_ATTACH)) 4099 return -EINVAL; 4100 4101 ptype = attach_type_to_prog_type(attr->attach_type); 4102 if (ptype == BPF_PROG_TYPE_UNSPEC) 4103 return -EINVAL; 4104 if (bpf_mprog_supported(ptype)) { 4105 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4106 return -EINVAL; 4107 } else { 4108 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE) 4109 return -EINVAL; 4110 if (attr->relative_fd || 4111 attr->expected_revision) 4112 return -EINVAL; 4113 } 4114 4115 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4116 if (IS_ERR(prog)) 4117 return PTR_ERR(prog); 4118 4119 if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) { 4120 bpf_prog_put(prog); 4121 return -EINVAL; 4122 } 4123 4124 switch (ptype) { 4125 case BPF_PROG_TYPE_SK_SKB: 4126 case BPF_PROG_TYPE_SK_MSG: 4127 ret = sock_map_get_from_fd(attr, prog); 4128 break; 4129 case BPF_PROG_TYPE_LIRC_MODE2: 4130 ret = lirc_prog_attach(attr, prog); 4131 break; 4132 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4133 ret = netns_bpf_prog_attach(attr, prog); 4134 break; 4135 case BPF_PROG_TYPE_CGROUP_DEVICE: 4136 case BPF_PROG_TYPE_CGROUP_SKB: 4137 case BPF_PROG_TYPE_CGROUP_SOCK: 4138 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4139 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4140 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4141 case BPF_PROG_TYPE_SOCK_OPS: 4142 case BPF_PROG_TYPE_LSM: 4143 if (ptype == BPF_PROG_TYPE_LSM && 4144 prog->expected_attach_type != BPF_LSM_CGROUP) 4145 ret = -EINVAL; 4146 else 4147 ret = cgroup_bpf_prog_attach(attr, ptype, prog); 4148 break; 4149 case BPF_PROG_TYPE_SCHED_CLS: 4150 if (attr->attach_type == BPF_TCX_INGRESS || 4151 attr->attach_type == BPF_TCX_EGRESS) 4152 ret = tcx_prog_attach(attr, prog); 4153 else 4154 ret = netkit_prog_attach(attr, prog); 4155 break; 4156 default: 4157 ret = -EINVAL; 4158 } 4159 4160 if (ret) 4161 bpf_prog_put(prog); 4162 return ret; 4163 } 4164 4165 #define BPF_PROG_DETACH_LAST_FIELD expected_revision 4166 4167 static int bpf_prog_detach(const union bpf_attr *attr) 4168 { 4169 struct bpf_prog *prog = NULL; 4170 enum bpf_prog_type ptype; 4171 int ret; 4172 4173 if (CHECK_ATTR(BPF_PROG_DETACH)) 4174 return -EINVAL; 4175 4176 ptype = attach_type_to_prog_type(attr->attach_type); 4177 if (bpf_mprog_supported(ptype)) { 4178 if (ptype == BPF_PROG_TYPE_UNSPEC) 4179 return -EINVAL; 4180 if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG) 4181 return -EINVAL; 4182 if (attr->attach_bpf_fd) { 4183 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 4184 if (IS_ERR(prog)) 4185 return PTR_ERR(prog); 4186 } 4187 } else if (attr->attach_flags || 4188 attr->relative_fd || 4189 attr->expected_revision) { 4190 return -EINVAL; 4191 } 4192 4193 switch (ptype) { 4194 case BPF_PROG_TYPE_SK_MSG: 4195 case BPF_PROG_TYPE_SK_SKB: 4196 ret = sock_map_prog_detach(attr, ptype); 4197 break; 4198 case BPF_PROG_TYPE_LIRC_MODE2: 4199 ret = lirc_prog_detach(attr); 4200 break; 4201 case BPF_PROG_TYPE_FLOW_DISSECTOR: 4202 ret = netns_bpf_prog_detach(attr, ptype); 4203 break; 4204 case BPF_PROG_TYPE_CGROUP_DEVICE: 4205 case BPF_PROG_TYPE_CGROUP_SKB: 4206 case BPF_PROG_TYPE_CGROUP_SOCK: 4207 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 4208 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 4209 case BPF_PROG_TYPE_CGROUP_SYSCTL: 4210 case BPF_PROG_TYPE_SOCK_OPS: 4211 case BPF_PROG_TYPE_LSM: 4212 ret = cgroup_bpf_prog_detach(attr, ptype); 4213 break; 4214 case BPF_PROG_TYPE_SCHED_CLS: 4215 if (attr->attach_type == BPF_TCX_INGRESS || 4216 attr->attach_type == BPF_TCX_EGRESS) 4217 ret = tcx_prog_detach(attr, prog); 4218 else 4219 ret = netkit_prog_detach(attr, prog); 4220 break; 4221 default: 4222 ret = -EINVAL; 4223 } 4224 4225 if (prog) 4226 bpf_prog_put(prog); 4227 return ret; 4228 } 4229 4230 #define BPF_PROG_QUERY_LAST_FIELD query.revision 4231 4232 static int bpf_prog_query(const union bpf_attr *attr, 4233 union bpf_attr __user *uattr) 4234 { 4235 if (!bpf_net_capable()) 4236 return -EPERM; 4237 if (CHECK_ATTR(BPF_PROG_QUERY)) 4238 return -EINVAL; 4239 if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE) 4240 return -EINVAL; 4241 4242 switch (attr->query.attach_type) { 4243 case BPF_CGROUP_INET_INGRESS: 4244 case BPF_CGROUP_INET_EGRESS: 4245 case BPF_CGROUP_INET_SOCK_CREATE: 4246 case BPF_CGROUP_INET_SOCK_RELEASE: 4247 case BPF_CGROUP_INET4_BIND: 4248 case BPF_CGROUP_INET6_BIND: 4249 case BPF_CGROUP_INET4_POST_BIND: 4250 case BPF_CGROUP_INET6_POST_BIND: 4251 case BPF_CGROUP_INET4_CONNECT: 4252 case BPF_CGROUP_INET6_CONNECT: 4253 case BPF_CGROUP_UNIX_CONNECT: 4254 case BPF_CGROUP_INET4_GETPEERNAME: 4255 case BPF_CGROUP_INET6_GETPEERNAME: 4256 case BPF_CGROUP_UNIX_GETPEERNAME: 4257 case BPF_CGROUP_INET4_GETSOCKNAME: 4258 case BPF_CGROUP_INET6_GETSOCKNAME: 4259 case BPF_CGROUP_UNIX_GETSOCKNAME: 4260 case BPF_CGROUP_UDP4_SENDMSG: 4261 case BPF_CGROUP_UDP6_SENDMSG: 4262 case BPF_CGROUP_UNIX_SENDMSG: 4263 case BPF_CGROUP_UDP4_RECVMSG: 4264 case BPF_CGROUP_UDP6_RECVMSG: 4265 case BPF_CGROUP_UNIX_RECVMSG: 4266 case BPF_CGROUP_SOCK_OPS: 4267 case BPF_CGROUP_DEVICE: 4268 case BPF_CGROUP_SYSCTL: 4269 case BPF_CGROUP_GETSOCKOPT: 4270 case BPF_CGROUP_SETSOCKOPT: 4271 case BPF_LSM_CGROUP: 4272 return cgroup_bpf_prog_query(attr, uattr); 4273 case BPF_LIRC_MODE2: 4274 return lirc_prog_query(attr, uattr); 4275 case BPF_FLOW_DISSECTOR: 4276 case BPF_SK_LOOKUP: 4277 return netns_bpf_prog_query(attr, uattr); 4278 case BPF_SK_SKB_STREAM_PARSER: 4279 case BPF_SK_SKB_STREAM_VERDICT: 4280 case BPF_SK_MSG_VERDICT: 4281 case BPF_SK_SKB_VERDICT: 4282 return sock_map_bpf_prog_query(attr, uattr); 4283 case BPF_TCX_INGRESS: 4284 case BPF_TCX_EGRESS: 4285 return tcx_prog_query(attr, uattr); 4286 case BPF_NETKIT_PRIMARY: 4287 case BPF_NETKIT_PEER: 4288 return netkit_prog_query(attr, uattr); 4289 default: 4290 return -EINVAL; 4291 } 4292 } 4293 4294 #define BPF_PROG_TEST_RUN_LAST_FIELD test.batch_size 4295 4296 static int bpf_prog_test_run(const union bpf_attr *attr, 4297 union bpf_attr __user *uattr) 4298 { 4299 struct bpf_prog *prog; 4300 int ret = -ENOTSUPP; 4301 4302 if (CHECK_ATTR(BPF_PROG_TEST_RUN)) 4303 return -EINVAL; 4304 4305 if ((attr->test.ctx_size_in && !attr->test.ctx_in) || 4306 (!attr->test.ctx_size_in && attr->test.ctx_in)) 4307 return -EINVAL; 4308 4309 if ((attr->test.ctx_size_out && !attr->test.ctx_out) || 4310 (!attr->test.ctx_size_out && attr->test.ctx_out)) 4311 return -EINVAL; 4312 4313 prog = bpf_prog_get(attr->test.prog_fd); 4314 if (IS_ERR(prog)) 4315 return PTR_ERR(prog); 4316 4317 if (prog->aux->ops->test_run) 4318 ret = prog->aux->ops->test_run(prog, attr, uattr); 4319 4320 bpf_prog_put(prog); 4321 return ret; 4322 } 4323 4324 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id 4325 4326 static int bpf_obj_get_next_id(const union bpf_attr *attr, 4327 union bpf_attr __user *uattr, 4328 struct idr *idr, 4329 spinlock_t *lock) 4330 { 4331 u32 next_id = attr->start_id; 4332 int err = 0; 4333 4334 if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) 4335 return -EINVAL; 4336 4337 if (!capable(CAP_SYS_ADMIN)) 4338 return -EPERM; 4339 4340 next_id++; 4341 spin_lock_bh(lock); 4342 if (!idr_get_next(idr, &next_id)) 4343 err = -ENOENT; 4344 spin_unlock_bh(lock); 4345 4346 if (!err) 4347 err = put_user(next_id, &uattr->next_id); 4348 4349 return err; 4350 } 4351 4352 struct bpf_map *bpf_map_get_curr_or_next(u32 *id) 4353 { 4354 struct bpf_map *map; 4355 4356 spin_lock_bh(&map_idr_lock); 4357 again: 4358 map = idr_get_next(&map_idr, id); 4359 if (map) { 4360 map = __bpf_map_inc_not_zero(map, false); 4361 if (IS_ERR(map)) { 4362 (*id)++; 4363 goto again; 4364 } 4365 } 4366 spin_unlock_bh(&map_idr_lock); 4367 4368 return map; 4369 } 4370 4371 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) 4372 { 4373 struct bpf_prog *prog; 4374 4375 spin_lock_bh(&prog_idr_lock); 4376 again: 4377 prog = idr_get_next(&prog_idr, id); 4378 if (prog) { 4379 prog = bpf_prog_inc_not_zero(prog); 4380 if (IS_ERR(prog)) { 4381 (*id)++; 4382 goto again; 4383 } 4384 } 4385 spin_unlock_bh(&prog_idr_lock); 4386 4387 return prog; 4388 } 4389 4390 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id 4391 4392 struct bpf_prog *bpf_prog_by_id(u32 id) 4393 { 4394 struct bpf_prog *prog; 4395 4396 if (!id) 4397 return ERR_PTR(-ENOENT); 4398 4399 spin_lock_bh(&prog_idr_lock); 4400 prog = idr_find(&prog_idr, id); 4401 if (prog) 4402 prog = bpf_prog_inc_not_zero(prog); 4403 else 4404 prog = ERR_PTR(-ENOENT); 4405 spin_unlock_bh(&prog_idr_lock); 4406 return prog; 4407 } 4408 4409 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) 4410 { 4411 struct bpf_prog *prog; 4412 u32 id = attr->prog_id; 4413 int fd; 4414 4415 if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) 4416 return -EINVAL; 4417 4418 if (!capable(CAP_SYS_ADMIN)) 4419 return -EPERM; 4420 4421 prog = bpf_prog_by_id(id); 4422 if (IS_ERR(prog)) 4423 return PTR_ERR(prog); 4424 4425 fd = bpf_prog_new_fd(prog); 4426 if (fd < 0) 4427 bpf_prog_put(prog); 4428 4429 return fd; 4430 } 4431 4432 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags 4433 4434 static int bpf_map_get_fd_by_id(const union bpf_attr *attr) 4435 { 4436 struct bpf_map *map; 4437 u32 id = attr->map_id; 4438 int f_flags; 4439 int fd; 4440 4441 if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || 4442 attr->open_flags & ~BPF_OBJ_FLAG_MASK) 4443 return -EINVAL; 4444 4445 if (!capable(CAP_SYS_ADMIN)) 4446 return -EPERM; 4447 4448 f_flags = bpf_get_file_flag(attr->open_flags); 4449 if (f_flags < 0) 4450 return f_flags; 4451 4452 spin_lock_bh(&map_idr_lock); 4453 map = idr_find(&map_idr, id); 4454 if (map) 4455 map = __bpf_map_inc_not_zero(map, true); 4456 else 4457 map = ERR_PTR(-ENOENT); 4458 spin_unlock_bh(&map_idr_lock); 4459 4460 if (IS_ERR(map)) 4461 return PTR_ERR(map); 4462 4463 fd = bpf_map_new_fd(map, f_flags); 4464 if (fd < 0) 4465 bpf_map_put_with_uref(map); 4466 4467 return fd; 4468 } 4469 4470 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, 4471 unsigned long addr, u32 *off, 4472 u32 *type) 4473 { 4474 const struct bpf_map *map; 4475 int i; 4476 4477 mutex_lock(&prog->aux->used_maps_mutex); 4478 for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { 4479 map = prog->aux->used_maps[i]; 4480 if (map == (void *)addr) { 4481 *type = BPF_PSEUDO_MAP_FD; 4482 goto out; 4483 } 4484 if (!map->ops->map_direct_value_meta) 4485 continue; 4486 if (!map->ops->map_direct_value_meta(map, addr, off)) { 4487 *type = BPF_PSEUDO_MAP_VALUE; 4488 goto out; 4489 } 4490 } 4491 map = NULL; 4492 4493 out: 4494 mutex_unlock(&prog->aux->used_maps_mutex); 4495 return map; 4496 } 4497 4498 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, 4499 const struct cred *f_cred) 4500 { 4501 const struct bpf_map *map; 4502 struct bpf_insn *insns; 4503 u32 off, type; 4504 u64 imm; 4505 u8 code; 4506 int i; 4507 4508 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), 4509 GFP_USER); 4510 if (!insns) 4511 return insns; 4512 4513 for (i = 0; i < prog->len; i++) { 4514 code = insns[i].code; 4515 4516 if (code == (BPF_JMP | BPF_TAIL_CALL)) { 4517 insns[i].code = BPF_JMP | BPF_CALL; 4518 insns[i].imm = BPF_FUNC_tail_call; 4519 /* fall-through */ 4520 } 4521 if (code == (BPF_JMP | BPF_CALL) || 4522 code == (BPF_JMP | BPF_CALL_ARGS)) { 4523 if (code == (BPF_JMP | BPF_CALL_ARGS)) 4524 insns[i].code = BPF_JMP | BPF_CALL; 4525 if (!bpf_dump_raw_ok(f_cred)) 4526 insns[i].imm = 0; 4527 continue; 4528 } 4529 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { 4530 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; 4531 continue; 4532 } 4533 4534 if ((BPF_CLASS(code) == BPF_LDX || BPF_CLASS(code) == BPF_STX || 4535 BPF_CLASS(code) == BPF_ST) && BPF_MODE(code) == BPF_PROBE_MEM32) { 4536 insns[i].code = BPF_CLASS(code) | BPF_SIZE(code) | BPF_MEM; 4537 continue; 4538 } 4539 4540 if (code != (BPF_LD | BPF_IMM | BPF_DW)) 4541 continue; 4542 4543 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; 4544 map = bpf_map_from_imm(prog, imm, &off, &type); 4545 if (map) { 4546 insns[i].src_reg = type; 4547 insns[i].imm = map->id; 4548 insns[i + 1].imm = off; 4549 continue; 4550 } 4551 } 4552 4553 return insns; 4554 } 4555 4556 static int set_info_rec_size(struct bpf_prog_info *info) 4557 { 4558 /* 4559 * Ensure info.*_rec_size is the same as kernel expected size 4560 * 4561 * or 4562 * 4563 * Only allow zero *_rec_size if both _rec_size and _cnt are 4564 * zero. In this case, the kernel will set the expected 4565 * _rec_size back to the info. 4566 */ 4567 4568 if ((info->nr_func_info || info->func_info_rec_size) && 4569 info->func_info_rec_size != sizeof(struct bpf_func_info)) 4570 return -EINVAL; 4571 4572 if ((info->nr_line_info || info->line_info_rec_size) && 4573 info->line_info_rec_size != sizeof(struct bpf_line_info)) 4574 return -EINVAL; 4575 4576 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && 4577 info->jited_line_info_rec_size != sizeof(__u64)) 4578 return -EINVAL; 4579 4580 info->func_info_rec_size = sizeof(struct bpf_func_info); 4581 info->line_info_rec_size = sizeof(struct bpf_line_info); 4582 info->jited_line_info_rec_size = sizeof(__u64); 4583 4584 return 0; 4585 } 4586 4587 static int bpf_prog_get_info_by_fd(struct file *file, 4588 struct bpf_prog *prog, 4589 const union bpf_attr *attr, 4590 union bpf_attr __user *uattr) 4591 { 4592 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4593 struct btf *attach_btf = bpf_prog_get_target_btf(prog); 4594 struct bpf_prog_info info; 4595 u32 info_len = attr->info.info_len; 4596 struct bpf_prog_kstats stats; 4597 char __user *uinsns; 4598 u32 ulen; 4599 int err; 4600 4601 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4602 if (err) 4603 return err; 4604 info_len = min_t(u32, sizeof(info), info_len); 4605 4606 memset(&info, 0, sizeof(info)); 4607 if (copy_from_user(&info, uinfo, info_len)) 4608 return -EFAULT; 4609 4610 info.type = prog->type; 4611 info.id = prog->aux->id; 4612 info.load_time = prog->aux->load_time; 4613 info.created_by_uid = from_kuid_munged(current_user_ns(), 4614 prog->aux->user->uid); 4615 info.gpl_compatible = prog->gpl_compatible; 4616 4617 memcpy(info.tag, prog->tag, sizeof(prog->tag)); 4618 memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); 4619 4620 mutex_lock(&prog->aux->used_maps_mutex); 4621 ulen = info.nr_map_ids; 4622 info.nr_map_ids = prog->aux->used_map_cnt; 4623 ulen = min_t(u32, info.nr_map_ids, ulen); 4624 if (ulen) { 4625 u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); 4626 u32 i; 4627 4628 for (i = 0; i < ulen; i++) 4629 if (put_user(prog->aux->used_maps[i]->id, 4630 &user_map_ids[i])) { 4631 mutex_unlock(&prog->aux->used_maps_mutex); 4632 return -EFAULT; 4633 } 4634 } 4635 mutex_unlock(&prog->aux->used_maps_mutex); 4636 4637 err = set_info_rec_size(&info); 4638 if (err) 4639 return err; 4640 4641 bpf_prog_get_stats(prog, &stats); 4642 info.run_time_ns = stats.nsecs; 4643 info.run_cnt = stats.cnt; 4644 info.recursion_misses = stats.misses; 4645 4646 info.verified_insns = prog->aux->verified_insns; 4647 4648 if (!bpf_capable()) { 4649 info.jited_prog_len = 0; 4650 info.xlated_prog_len = 0; 4651 info.nr_jited_ksyms = 0; 4652 info.nr_jited_func_lens = 0; 4653 info.nr_func_info = 0; 4654 info.nr_line_info = 0; 4655 info.nr_jited_line_info = 0; 4656 goto done; 4657 } 4658 4659 ulen = info.xlated_prog_len; 4660 info.xlated_prog_len = bpf_prog_insn_size(prog); 4661 if (info.xlated_prog_len && ulen) { 4662 struct bpf_insn *insns_sanitized; 4663 bool fault; 4664 4665 if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { 4666 info.xlated_prog_insns = 0; 4667 goto done; 4668 } 4669 insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); 4670 if (!insns_sanitized) 4671 return -ENOMEM; 4672 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 4673 ulen = min_t(u32, info.xlated_prog_len, ulen); 4674 fault = copy_to_user(uinsns, insns_sanitized, ulen); 4675 kfree(insns_sanitized); 4676 if (fault) 4677 return -EFAULT; 4678 } 4679 4680 if (bpf_prog_is_offloaded(prog->aux)) { 4681 err = bpf_prog_offload_info_fill(&info, prog); 4682 if (err) 4683 return err; 4684 goto done; 4685 } 4686 4687 /* NOTE: the following code is supposed to be skipped for offload. 4688 * bpf_prog_offload_info_fill() is the place to fill similar fields 4689 * for offload. 4690 */ 4691 ulen = info.jited_prog_len; 4692 if (prog->aux->func_cnt) { 4693 u32 i; 4694 4695 info.jited_prog_len = 0; 4696 for (i = 0; i < prog->aux->func_cnt; i++) 4697 info.jited_prog_len += prog->aux->func[i]->jited_len; 4698 } else { 4699 info.jited_prog_len = prog->jited_len; 4700 } 4701 4702 if (info.jited_prog_len && ulen) { 4703 if (bpf_dump_raw_ok(file->f_cred)) { 4704 uinsns = u64_to_user_ptr(info.jited_prog_insns); 4705 ulen = min_t(u32, info.jited_prog_len, ulen); 4706 4707 /* for multi-function programs, copy the JITed 4708 * instructions for all the functions 4709 */ 4710 if (prog->aux->func_cnt) { 4711 u32 len, free, i; 4712 u8 *img; 4713 4714 free = ulen; 4715 for (i = 0; i < prog->aux->func_cnt; i++) { 4716 len = prog->aux->func[i]->jited_len; 4717 len = min_t(u32, len, free); 4718 img = (u8 *) prog->aux->func[i]->bpf_func; 4719 if (copy_to_user(uinsns, img, len)) 4720 return -EFAULT; 4721 uinsns += len; 4722 free -= len; 4723 if (!free) 4724 break; 4725 } 4726 } else { 4727 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 4728 return -EFAULT; 4729 } 4730 } else { 4731 info.jited_prog_insns = 0; 4732 } 4733 } 4734 4735 ulen = info.nr_jited_ksyms; 4736 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 4737 if (ulen) { 4738 if (bpf_dump_raw_ok(file->f_cred)) { 4739 unsigned long ksym_addr; 4740 u64 __user *user_ksyms; 4741 u32 i; 4742 4743 /* copy the address of the kernel symbol 4744 * corresponding to each function 4745 */ 4746 ulen = min_t(u32, info.nr_jited_ksyms, ulen); 4747 user_ksyms = u64_to_user_ptr(info.jited_ksyms); 4748 if (prog->aux->func_cnt) { 4749 for (i = 0; i < ulen; i++) { 4750 ksym_addr = (unsigned long) 4751 prog->aux->func[i]->bpf_func; 4752 if (put_user((u64) ksym_addr, 4753 &user_ksyms[i])) 4754 return -EFAULT; 4755 } 4756 } else { 4757 ksym_addr = (unsigned long) prog->bpf_func; 4758 if (put_user((u64) ksym_addr, &user_ksyms[0])) 4759 return -EFAULT; 4760 } 4761 } else { 4762 info.jited_ksyms = 0; 4763 } 4764 } 4765 4766 ulen = info.nr_jited_func_lens; 4767 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 4768 if (ulen) { 4769 if (bpf_dump_raw_ok(file->f_cred)) { 4770 u32 __user *user_lens; 4771 u32 func_len, i; 4772 4773 /* copy the JITed image lengths for each function */ 4774 ulen = min_t(u32, info.nr_jited_func_lens, ulen); 4775 user_lens = u64_to_user_ptr(info.jited_func_lens); 4776 if (prog->aux->func_cnt) { 4777 for (i = 0; i < ulen; i++) { 4778 func_len = 4779 prog->aux->func[i]->jited_len; 4780 if (put_user(func_len, &user_lens[i])) 4781 return -EFAULT; 4782 } 4783 } else { 4784 func_len = prog->jited_len; 4785 if (put_user(func_len, &user_lens[0])) 4786 return -EFAULT; 4787 } 4788 } else { 4789 info.jited_func_lens = 0; 4790 } 4791 } 4792 4793 if (prog->aux->btf) 4794 info.btf_id = btf_obj_id(prog->aux->btf); 4795 info.attach_btf_id = prog->aux->attach_btf_id; 4796 if (attach_btf) 4797 info.attach_btf_obj_id = btf_obj_id(attach_btf); 4798 4799 ulen = info.nr_func_info; 4800 info.nr_func_info = prog->aux->func_info_cnt; 4801 if (info.nr_func_info && ulen) { 4802 char __user *user_finfo; 4803 4804 user_finfo = u64_to_user_ptr(info.func_info); 4805 ulen = min_t(u32, info.nr_func_info, ulen); 4806 if (copy_to_user(user_finfo, prog->aux->func_info, 4807 info.func_info_rec_size * ulen)) 4808 return -EFAULT; 4809 } 4810 4811 ulen = info.nr_line_info; 4812 info.nr_line_info = prog->aux->nr_linfo; 4813 if (info.nr_line_info && ulen) { 4814 __u8 __user *user_linfo; 4815 4816 user_linfo = u64_to_user_ptr(info.line_info); 4817 ulen = min_t(u32, info.nr_line_info, ulen); 4818 if (copy_to_user(user_linfo, prog->aux->linfo, 4819 info.line_info_rec_size * ulen)) 4820 return -EFAULT; 4821 } 4822 4823 ulen = info.nr_jited_line_info; 4824 if (prog->aux->jited_linfo) 4825 info.nr_jited_line_info = prog->aux->nr_linfo; 4826 else 4827 info.nr_jited_line_info = 0; 4828 if (info.nr_jited_line_info && ulen) { 4829 if (bpf_dump_raw_ok(file->f_cred)) { 4830 unsigned long line_addr; 4831 __u64 __user *user_linfo; 4832 u32 i; 4833 4834 user_linfo = u64_to_user_ptr(info.jited_line_info); 4835 ulen = min_t(u32, info.nr_jited_line_info, ulen); 4836 for (i = 0; i < ulen; i++) { 4837 line_addr = (unsigned long)prog->aux->jited_linfo[i]; 4838 if (put_user((__u64)line_addr, &user_linfo[i])) 4839 return -EFAULT; 4840 } 4841 } else { 4842 info.jited_line_info = 0; 4843 } 4844 } 4845 4846 ulen = info.nr_prog_tags; 4847 info.nr_prog_tags = prog->aux->func_cnt ? : 1; 4848 if (ulen) { 4849 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; 4850 u32 i; 4851 4852 user_prog_tags = u64_to_user_ptr(info.prog_tags); 4853 ulen = min_t(u32, info.nr_prog_tags, ulen); 4854 if (prog->aux->func_cnt) { 4855 for (i = 0; i < ulen; i++) { 4856 if (copy_to_user(user_prog_tags[i], 4857 prog->aux->func[i]->tag, 4858 BPF_TAG_SIZE)) 4859 return -EFAULT; 4860 } 4861 } else { 4862 if (copy_to_user(user_prog_tags[0], 4863 prog->tag, BPF_TAG_SIZE)) 4864 return -EFAULT; 4865 } 4866 } 4867 4868 done: 4869 if (copy_to_user(uinfo, &info, info_len) || 4870 put_user(info_len, &uattr->info.info_len)) 4871 return -EFAULT; 4872 4873 return 0; 4874 } 4875 4876 static int bpf_map_get_info_by_fd(struct file *file, 4877 struct bpf_map *map, 4878 const union bpf_attr *attr, 4879 union bpf_attr __user *uattr) 4880 { 4881 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4882 struct bpf_map_info info; 4883 u32 info_len = attr->info.info_len; 4884 int err; 4885 4886 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4887 if (err) 4888 return err; 4889 info_len = min_t(u32, sizeof(info), info_len); 4890 4891 memset(&info, 0, sizeof(info)); 4892 info.type = map->map_type; 4893 info.id = map->id; 4894 info.key_size = map->key_size; 4895 info.value_size = map->value_size; 4896 info.max_entries = map->max_entries; 4897 info.map_flags = map->map_flags; 4898 info.map_extra = map->map_extra; 4899 memcpy(info.name, map->name, sizeof(map->name)); 4900 4901 if (map->btf) { 4902 info.btf_id = btf_obj_id(map->btf); 4903 info.btf_key_type_id = map->btf_key_type_id; 4904 info.btf_value_type_id = map->btf_value_type_id; 4905 } 4906 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; 4907 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) 4908 bpf_map_struct_ops_info_fill(&info, map); 4909 4910 if (bpf_map_is_offloaded(map)) { 4911 err = bpf_map_offload_info_fill(&info, map); 4912 if (err) 4913 return err; 4914 } 4915 4916 if (copy_to_user(uinfo, &info, info_len) || 4917 put_user(info_len, &uattr->info.info_len)) 4918 return -EFAULT; 4919 4920 return 0; 4921 } 4922 4923 static int bpf_btf_get_info_by_fd(struct file *file, 4924 struct btf *btf, 4925 const union bpf_attr *attr, 4926 union bpf_attr __user *uattr) 4927 { 4928 struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4929 u32 info_len = attr->info.info_len; 4930 int err; 4931 4932 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len); 4933 if (err) 4934 return err; 4935 4936 return btf_get_info_by_fd(btf, attr, uattr); 4937 } 4938 4939 static int bpf_link_get_info_by_fd(struct file *file, 4940 struct bpf_link *link, 4941 const union bpf_attr *attr, 4942 union bpf_attr __user *uattr) 4943 { 4944 struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info); 4945 struct bpf_link_info info; 4946 u32 info_len = attr->info.info_len; 4947 int err; 4948 4949 err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len); 4950 if (err) 4951 return err; 4952 info_len = min_t(u32, sizeof(info), info_len); 4953 4954 memset(&info, 0, sizeof(info)); 4955 if (copy_from_user(&info, uinfo, info_len)) 4956 return -EFAULT; 4957 4958 info.type = link->type; 4959 info.id = link->id; 4960 if (link->prog) 4961 info.prog_id = link->prog->aux->id; 4962 4963 if (link->ops->fill_link_info) { 4964 err = link->ops->fill_link_info(link, &info); 4965 if (err) 4966 return err; 4967 } 4968 4969 if (copy_to_user(uinfo, &info, info_len) || 4970 put_user(info_len, &uattr->info.info_len)) 4971 return -EFAULT; 4972 4973 return 0; 4974 } 4975 4976 4977 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info 4978 4979 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, 4980 union bpf_attr __user *uattr) 4981 { 4982 int ufd = attr->info.bpf_fd; 4983 struct fd f; 4984 int err; 4985 4986 if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) 4987 return -EINVAL; 4988 4989 f = fdget(ufd); 4990 if (!f.file) 4991 return -EBADFD; 4992 4993 if (f.file->f_op == &bpf_prog_fops) 4994 err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, 4995 uattr); 4996 else if (f.file->f_op == &bpf_map_fops) 4997 err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, 4998 uattr); 4999 else if (f.file->f_op == &btf_fops) 5000 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 5001 else if (f.file->f_op == &bpf_link_fops || f.file->f_op == &bpf_link_fops_poll) 5002 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 5003 attr, uattr); 5004 else 5005 err = -EINVAL; 5006 5007 fdput(f); 5008 return err; 5009 } 5010 5011 #define BPF_BTF_LOAD_LAST_FIELD btf_token_fd 5012 5013 static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 5014 { 5015 struct bpf_token *token = NULL; 5016 5017 if (CHECK_ATTR(BPF_BTF_LOAD)) 5018 return -EINVAL; 5019 5020 if (attr->btf_flags & ~BPF_F_TOKEN_FD) 5021 return -EINVAL; 5022 5023 if (attr->btf_flags & BPF_F_TOKEN_FD) { 5024 token = bpf_token_get_from_fd(attr->btf_token_fd); 5025 if (IS_ERR(token)) 5026 return PTR_ERR(token); 5027 if (!bpf_token_allow_cmd(token, BPF_BTF_LOAD)) { 5028 bpf_token_put(token); 5029 token = NULL; 5030 } 5031 } 5032 5033 if (!bpf_token_capable(token, CAP_BPF)) { 5034 bpf_token_put(token); 5035 return -EPERM; 5036 } 5037 5038 bpf_token_put(token); 5039 5040 return btf_new_fd(attr, uattr, uattr_size); 5041 } 5042 5043 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id 5044 5045 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) 5046 { 5047 if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) 5048 return -EINVAL; 5049 5050 if (!capable(CAP_SYS_ADMIN)) 5051 return -EPERM; 5052 5053 return btf_get_fd_by_id(attr->btf_id); 5054 } 5055 5056 static int bpf_task_fd_query_copy(const union bpf_attr *attr, 5057 union bpf_attr __user *uattr, 5058 u32 prog_id, u32 fd_type, 5059 const char *buf, u64 probe_offset, 5060 u64 probe_addr) 5061 { 5062 char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf); 5063 u32 len = buf ? strlen(buf) : 0, input_len; 5064 int err = 0; 5065 5066 if (put_user(len, &uattr->task_fd_query.buf_len)) 5067 return -EFAULT; 5068 input_len = attr->task_fd_query.buf_len; 5069 if (input_len && ubuf) { 5070 if (!len) { 5071 /* nothing to copy, just make ubuf NULL terminated */ 5072 char zero = '\0'; 5073 5074 if (put_user(zero, ubuf)) 5075 return -EFAULT; 5076 } else if (input_len >= len + 1) { 5077 /* ubuf can hold the string with NULL terminator */ 5078 if (copy_to_user(ubuf, buf, len + 1)) 5079 return -EFAULT; 5080 } else { 5081 /* ubuf cannot hold the string with NULL terminator, 5082 * do a partial copy with NULL terminator. 5083 */ 5084 char zero = '\0'; 5085 5086 err = -ENOSPC; 5087 if (copy_to_user(ubuf, buf, input_len - 1)) 5088 return -EFAULT; 5089 if (put_user(zero, ubuf + input_len - 1)) 5090 return -EFAULT; 5091 } 5092 } 5093 5094 if (put_user(prog_id, &uattr->task_fd_query.prog_id) || 5095 put_user(fd_type, &uattr->task_fd_query.fd_type) || 5096 put_user(probe_offset, &uattr->task_fd_query.probe_offset) || 5097 put_user(probe_addr, &uattr->task_fd_query.probe_addr)) 5098 return -EFAULT; 5099 5100 return err; 5101 } 5102 5103 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr 5104 5105 static int bpf_task_fd_query(const union bpf_attr *attr, 5106 union bpf_attr __user *uattr) 5107 { 5108 pid_t pid = attr->task_fd_query.pid; 5109 u32 fd = attr->task_fd_query.fd; 5110 const struct perf_event *event; 5111 struct task_struct *task; 5112 struct file *file; 5113 int err; 5114 5115 if (CHECK_ATTR(BPF_TASK_FD_QUERY)) 5116 return -EINVAL; 5117 5118 if (!capable(CAP_SYS_ADMIN)) 5119 return -EPERM; 5120 5121 if (attr->task_fd_query.flags != 0) 5122 return -EINVAL; 5123 5124 rcu_read_lock(); 5125 task = get_pid_task(find_vpid(pid), PIDTYPE_PID); 5126 rcu_read_unlock(); 5127 if (!task) 5128 return -ENOENT; 5129 5130 err = 0; 5131 file = fget_task(task, fd); 5132 put_task_struct(task); 5133 if (!file) 5134 return -EBADF; 5135 5136 if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) { 5137 struct bpf_link *link = file->private_data; 5138 5139 if (link->ops == &bpf_raw_tp_link_lops) { 5140 struct bpf_raw_tp_link *raw_tp = 5141 container_of(link, struct bpf_raw_tp_link, link); 5142 struct bpf_raw_event_map *btp = raw_tp->btp; 5143 5144 err = bpf_task_fd_query_copy(attr, uattr, 5145 raw_tp->link.prog->aux->id, 5146 BPF_FD_TYPE_RAW_TRACEPOINT, 5147 btp->tp->name, 0, 0); 5148 goto put_file; 5149 } 5150 goto out_not_supp; 5151 } 5152 5153 event = perf_get_event(file); 5154 if (!IS_ERR(event)) { 5155 u64 probe_offset, probe_addr; 5156 u32 prog_id, fd_type; 5157 const char *buf; 5158 5159 err = bpf_get_perf_event_info(event, &prog_id, &fd_type, 5160 &buf, &probe_offset, 5161 &probe_addr, NULL); 5162 if (!err) 5163 err = bpf_task_fd_query_copy(attr, uattr, prog_id, 5164 fd_type, buf, 5165 probe_offset, 5166 probe_addr); 5167 goto put_file; 5168 } 5169 5170 out_not_supp: 5171 err = -ENOTSUPP; 5172 put_file: 5173 fput(file); 5174 return err; 5175 } 5176 5177 #define BPF_MAP_BATCH_LAST_FIELD batch.flags 5178 5179 #define BPF_DO_BATCH(fn, ...) \ 5180 do { \ 5181 if (!fn) { \ 5182 err = -ENOTSUPP; \ 5183 goto err_put; \ 5184 } \ 5185 err = fn(__VA_ARGS__); \ 5186 } while (0) 5187 5188 static int bpf_map_do_batch(const union bpf_attr *attr, 5189 union bpf_attr __user *uattr, 5190 int cmd) 5191 { 5192 bool has_read = cmd == BPF_MAP_LOOKUP_BATCH || 5193 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH; 5194 bool has_write = cmd != BPF_MAP_LOOKUP_BATCH; 5195 struct bpf_map *map; 5196 int err, ufd; 5197 struct fd f; 5198 5199 if (CHECK_ATTR(BPF_MAP_BATCH)) 5200 return -EINVAL; 5201 5202 ufd = attr->batch.map_fd; 5203 f = fdget(ufd); 5204 map = __bpf_map_get(f); 5205 if (IS_ERR(map)) 5206 return PTR_ERR(map); 5207 if (has_write) 5208 bpf_map_write_active_inc(map); 5209 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { 5210 err = -EPERM; 5211 goto err_put; 5212 } 5213 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { 5214 err = -EPERM; 5215 goto err_put; 5216 } 5217 5218 if (cmd == BPF_MAP_LOOKUP_BATCH) 5219 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); 5220 else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH) 5221 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); 5222 else if (cmd == BPF_MAP_UPDATE_BATCH) 5223 BPF_DO_BATCH(map->ops->map_update_batch, map, f.file, attr, uattr); 5224 else 5225 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); 5226 err_put: 5227 if (has_write) { 5228 maybe_wait_bpf_programs(map); 5229 bpf_map_write_active_dec(map); 5230 } 5231 fdput(f); 5232 return err; 5233 } 5234 5235 #define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid 5236 static int link_create(union bpf_attr *attr, bpfptr_t uattr) 5237 { 5238 struct bpf_prog *prog; 5239 int ret; 5240 5241 if (CHECK_ATTR(BPF_LINK_CREATE)) 5242 return -EINVAL; 5243 5244 if (attr->link_create.attach_type == BPF_STRUCT_OPS) 5245 return bpf_struct_ops_link_create(attr); 5246 5247 prog = bpf_prog_get(attr->link_create.prog_fd); 5248 if (IS_ERR(prog)) 5249 return PTR_ERR(prog); 5250 5251 ret = bpf_prog_attach_check_attach_type(prog, 5252 attr->link_create.attach_type); 5253 if (ret) 5254 goto out; 5255 5256 switch (prog->type) { 5257 case BPF_PROG_TYPE_CGROUP_SKB: 5258 case BPF_PROG_TYPE_CGROUP_SOCK: 5259 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 5260 case BPF_PROG_TYPE_SOCK_OPS: 5261 case BPF_PROG_TYPE_CGROUP_DEVICE: 5262 case BPF_PROG_TYPE_CGROUP_SYSCTL: 5263 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5264 ret = cgroup_bpf_link_attach(attr, prog); 5265 break; 5266 case BPF_PROG_TYPE_EXT: 5267 ret = bpf_tracing_prog_attach(prog, 5268 attr->link_create.target_fd, 5269 attr->link_create.target_btf_id, 5270 attr->link_create.tracing.cookie); 5271 break; 5272 case BPF_PROG_TYPE_LSM: 5273 case BPF_PROG_TYPE_TRACING: 5274 if (attr->link_create.attach_type != prog->expected_attach_type) { 5275 ret = -EINVAL; 5276 goto out; 5277 } 5278 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 5279 ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie); 5280 else if (prog->expected_attach_type == BPF_TRACE_ITER) 5281 ret = bpf_iter_link_attach(attr, uattr, prog); 5282 else if (prog->expected_attach_type == BPF_LSM_CGROUP) 5283 ret = cgroup_bpf_link_attach(attr, prog); 5284 else 5285 ret = bpf_tracing_prog_attach(prog, 5286 attr->link_create.target_fd, 5287 attr->link_create.target_btf_id, 5288 attr->link_create.tracing.cookie); 5289 break; 5290 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5291 case BPF_PROG_TYPE_SK_LOOKUP: 5292 ret = netns_bpf_link_create(attr, prog); 5293 break; 5294 case BPF_PROG_TYPE_SK_MSG: 5295 case BPF_PROG_TYPE_SK_SKB: 5296 ret = sock_map_link_create(attr, prog); 5297 break; 5298 #ifdef CONFIG_NET 5299 case BPF_PROG_TYPE_XDP: 5300 ret = bpf_xdp_link_attach(attr, prog); 5301 break; 5302 case BPF_PROG_TYPE_SCHED_CLS: 5303 if (attr->link_create.attach_type == BPF_TCX_INGRESS || 5304 attr->link_create.attach_type == BPF_TCX_EGRESS) 5305 ret = tcx_link_attach(attr, prog); 5306 else 5307 ret = netkit_link_attach(attr, prog); 5308 break; 5309 case BPF_PROG_TYPE_NETFILTER: 5310 ret = bpf_nf_link_attach(attr, prog); 5311 break; 5312 #endif 5313 case BPF_PROG_TYPE_PERF_EVENT: 5314 case BPF_PROG_TYPE_TRACEPOINT: 5315 ret = bpf_perf_link_attach(attr, prog); 5316 break; 5317 case BPF_PROG_TYPE_KPROBE: 5318 if (attr->link_create.attach_type == BPF_PERF_EVENT) 5319 ret = bpf_perf_link_attach(attr, prog); 5320 else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI || 5321 attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION) 5322 ret = bpf_kprobe_multi_link_attach(attr, prog); 5323 else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) 5324 ret = bpf_uprobe_multi_link_attach(attr, prog); 5325 break; 5326 default: 5327 ret = -EINVAL; 5328 } 5329 5330 out: 5331 if (ret < 0) 5332 bpf_prog_put(prog); 5333 return ret; 5334 } 5335 5336 static int link_update_map(struct bpf_link *link, union bpf_attr *attr) 5337 { 5338 struct bpf_map *new_map, *old_map = NULL; 5339 int ret; 5340 5341 new_map = bpf_map_get(attr->link_update.new_map_fd); 5342 if (IS_ERR(new_map)) 5343 return PTR_ERR(new_map); 5344 5345 if (attr->link_update.flags & BPF_F_REPLACE) { 5346 old_map = bpf_map_get(attr->link_update.old_map_fd); 5347 if (IS_ERR(old_map)) { 5348 ret = PTR_ERR(old_map); 5349 goto out_put; 5350 } 5351 } else if (attr->link_update.old_map_fd) { 5352 ret = -EINVAL; 5353 goto out_put; 5354 } 5355 5356 ret = link->ops->update_map(link, new_map, old_map); 5357 5358 if (old_map) 5359 bpf_map_put(old_map); 5360 out_put: 5361 bpf_map_put(new_map); 5362 return ret; 5363 } 5364 5365 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd 5366 5367 static int link_update(union bpf_attr *attr) 5368 { 5369 struct bpf_prog *old_prog = NULL, *new_prog; 5370 struct bpf_link *link; 5371 u32 flags; 5372 int ret; 5373 5374 if (CHECK_ATTR(BPF_LINK_UPDATE)) 5375 return -EINVAL; 5376 5377 flags = attr->link_update.flags; 5378 if (flags & ~BPF_F_REPLACE) 5379 return -EINVAL; 5380 5381 link = bpf_link_get_from_fd(attr->link_update.link_fd); 5382 if (IS_ERR(link)) 5383 return PTR_ERR(link); 5384 5385 if (link->ops->update_map) { 5386 ret = link_update_map(link, attr); 5387 goto out_put_link; 5388 } 5389 5390 new_prog = bpf_prog_get(attr->link_update.new_prog_fd); 5391 if (IS_ERR(new_prog)) { 5392 ret = PTR_ERR(new_prog); 5393 goto out_put_link; 5394 } 5395 5396 if (flags & BPF_F_REPLACE) { 5397 old_prog = bpf_prog_get(attr->link_update.old_prog_fd); 5398 if (IS_ERR(old_prog)) { 5399 ret = PTR_ERR(old_prog); 5400 old_prog = NULL; 5401 goto out_put_progs; 5402 } 5403 } else if (attr->link_update.old_prog_fd) { 5404 ret = -EINVAL; 5405 goto out_put_progs; 5406 } 5407 5408 if (link->ops->update_prog) 5409 ret = link->ops->update_prog(link, new_prog, old_prog); 5410 else 5411 ret = -EINVAL; 5412 5413 out_put_progs: 5414 if (old_prog) 5415 bpf_prog_put(old_prog); 5416 if (ret) 5417 bpf_prog_put(new_prog); 5418 out_put_link: 5419 bpf_link_put_direct(link); 5420 return ret; 5421 } 5422 5423 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd 5424 5425 static int link_detach(union bpf_attr *attr) 5426 { 5427 struct bpf_link *link; 5428 int ret; 5429 5430 if (CHECK_ATTR(BPF_LINK_DETACH)) 5431 return -EINVAL; 5432 5433 link = bpf_link_get_from_fd(attr->link_detach.link_fd); 5434 if (IS_ERR(link)) 5435 return PTR_ERR(link); 5436 5437 if (link->ops->detach) 5438 ret = link->ops->detach(link); 5439 else 5440 ret = -EOPNOTSUPP; 5441 5442 bpf_link_put_direct(link); 5443 return ret; 5444 } 5445 5446 struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5447 { 5448 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 5449 } 5450 EXPORT_SYMBOL(bpf_link_inc_not_zero); 5451 5452 struct bpf_link *bpf_link_by_id(u32 id) 5453 { 5454 struct bpf_link *link; 5455 5456 if (!id) 5457 return ERR_PTR(-ENOENT); 5458 5459 spin_lock_bh(&link_idr_lock); 5460 /* before link is "settled", ID is 0, pretend it doesn't exist yet */ 5461 link = idr_find(&link_idr, id); 5462 if (link) { 5463 if (link->id) 5464 link = bpf_link_inc_not_zero(link); 5465 else 5466 link = ERR_PTR(-EAGAIN); 5467 } else { 5468 link = ERR_PTR(-ENOENT); 5469 } 5470 spin_unlock_bh(&link_idr_lock); 5471 return link; 5472 } 5473 5474 struct bpf_link *bpf_link_get_curr_or_next(u32 *id) 5475 { 5476 struct bpf_link *link; 5477 5478 spin_lock_bh(&link_idr_lock); 5479 again: 5480 link = idr_get_next(&link_idr, id); 5481 if (link) { 5482 link = bpf_link_inc_not_zero(link); 5483 if (IS_ERR(link)) { 5484 (*id)++; 5485 goto again; 5486 } 5487 } 5488 spin_unlock_bh(&link_idr_lock); 5489 5490 return link; 5491 } 5492 5493 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id 5494 5495 static int bpf_link_get_fd_by_id(const union bpf_attr *attr) 5496 { 5497 struct bpf_link *link; 5498 u32 id = attr->link_id; 5499 int fd; 5500 5501 if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID)) 5502 return -EINVAL; 5503 5504 if (!capable(CAP_SYS_ADMIN)) 5505 return -EPERM; 5506 5507 link = bpf_link_by_id(id); 5508 if (IS_ERR(link)) 5509 return PTR_ERR(link); 5510 5511 fd = bpf_link_new_fd(link); 5512 if (fd < 0) 5513 bpf_link_put_direct(link); 5514 5515 return fd; 5516 } 5517 5518 DEFINE_MUTEX(bpf_stats_enabled_mutex); 5519 5520 static int bpf_stats_release(struct inode *inode, struct file *file) 5521 { 5522 mutex_lock(&bpf_stats_enabled_mutex); 5523 static_key_slow_dec(&bpf_stats_enabled_key.key); 5524 mutex_unlock(&bpf_stats_enabled_mutex); 5525 return 0; 5526 } 5527 5528 static const struct file_operations bpf_stats_fops = { 5529 .release = bpf_stats_release, 5530 }; 5531 5532 static int bpf_enable_runtime_stats(void) 5533 { 5534 int fd; 5535 5536 mutex_lock(&bpf_stats_enabled_mutex); 5537 5538 /* Set a very high limit to avoid overflow */ 5539 if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) { 5540 mutex_unlock(&bpf_stats_enabled_mutex); 5541 return -EBUSY; 5542 } 5543 5544 fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC); 5545 if (fd >= 0) 5546 static_key_slow_inc(&bpf_stats_enabled_key.key); 5547 5548 mutex_unlock(&bpf_stats_enabled_mutex); 5549 return fd; 5550 } 5551 5552 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type 5553 5554 static int bpf_enable_stats(union bpf_attr *attr) 5555 { 5556 5557 if (CHECK_ATTR(BPF_ENABLE_STATS)) 5558 return -EINVAL; 5559 5560 if (!capable(CAP_SYS_ADMIN)) 5561 return -EPERM; 5562 5563 switch (attr->enable_stats.type) { 5564 case BPF_STATS_RUN_TIME: 5565 return bpf_enable_runtime_stats(); 5566 default: 5567 break; 5568 } 5569 return -EINVAL; 5570 } 5571 5572 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags 5573 5574 static int bpf_iter_create(union bpf_attr *attr) 5575 { 5576 struct bpf_link *link; 5577 int err; 5578 5579 if (CHECK_ATTR(BPF_ITER_CREATE)) 5580 return -EINVAL; 5581 5582 if (attr->iter_create.flags) 5583 return -EINVAL; 5584 5585 link = bpf_link_get_from_fd(attr->iter_create.link_fd); 5586 if (IS_ERR(link)) 5587 return PTR_ERR(link); 5588 5589 err = bpf_iter_new_fd(link); 5590 bpf_link_put_direct(link); 5591 5592 return err; 5593 } 5594 5595 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags 5596 5597 static int bpf_prog_bind_map(union bpf_attr *attr) 5598 { 5599 struct bpf_prog *prog; 5600 struct bpf_map *map; 5601 struct bpf_map **used_maps_old, **used_maps_new; 5602 int i, ret = 0; 5603 5604 if (CHECK_ATTR(BPF_PROG_BIND_MAP)) 5605 return -EINVAL; 5606 5607 if (attr->prog_bind_map.flags) 5608 return -EINVAL; 5609 5610 prog = bpf_prog_get(attr->prog_bind_map.prog_fd); 5611 if (IS_ERR(prog)) 5612 return PTR_ERR(prog); 5613 5614 map = bpf_map_get(attr->prog_bind_map.map_fd); 5615 if (IS_ERR(map)) { 5616 ret = PTR_ERR(map); 5617 goto out_prog_put; 5618 } 5619 5620 mutex_lock(&prog->aux->used_maps_mutex); 5621 5622 used_maps_old = prog->aux->used_maps; 5623 5624 for (i = 0; i < prog->aux->used_map_cnt; i++) 5625 if (used_maps_old[i] == map) { 5626 bpf_map_put(map); 5627 goto out_unlock; 5628 } 5629 5630 used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1, 5631 sizeof(used_maps_new[0]), 5632 GFP_KERNEL); 5633 if (!used_maps_new) { 5634 ret = -ENOMEM; 5635 goto out_unlock; 5636 } 5637 5638 /* The bpf program will not access the bpf map, but for the sake of 5639 * simplicity, increase sleepable_refcnt for sleepable program as well. 5640 */ 5641 if (prog->sleepable) 5642 atomic64_inc(&map->sleepable_refcnt); 5643 memcpy(used_maps_new, used_maps_old, 5644 sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); 5645 used_maps_new[prog->aux->used_map_cnt] = map; 5646 5647 prog->aux->used_map_cnt++; 5648 prog->aux->used_maps = used_maps_new; 5649 5650 kfree(used_maps_old); 5651 5652 out_unlock: 5653 mutex_unlock(&prog->aux->used_maps_mutex); 5654 5655 if (ret) 5656 bpf_map_put(map); 5657 out_prog_put: 5658 bpf_prog_put(prog); 5659 return ret; 5660 } 5661 5662 #define BPF_TOKEN_CREATE_LAST_FIELD token_create.bpffs_fd 5663 5664 static int token_create(union bpf_attr *attr) 5665 { 5666 if (CHECK_ATTR(BPF_TOKEN_CREATE)) 5667 return -EINVAL; 5668 5669 /* no flags are supported yet */ 5670 if (attr->token_create.flags) 5671 return -EINVAL; 5672 5673 return bpf_token_create(attr); 5674 } 5675 5676 static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size) 5677 { 5678 union bpf_attr attr; 5679 int err; 5680 5681 err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); 5682 if (err) 5683 return err; 5684 size = min_t(u32, size, sizeof(attr)); 5685 5686 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 5687 memset(&attr, 0, sizeof(attr)); 5688 if (copy_from_bpfptr(&attr, uattr, size) != 0) 5689 return -EFAULT; 5690 5691 err = security_bpf(cmd, &attr, size); 5692 if (err < 0) 5693 return err; 5694 5695 switch (cmd) { 5696 case BPF_MAP_CREATE: 5697 err = map_create(&attr); 5698 break; 5699 case BPF_MAP_LOOKUP_ELEM: 5700 err = map_lookup_elem(&attr); 5701 break; 5702 case BPF_MAP_UPDATE_ELEM: 5703 err = map_update_elem(&attr, uattr); 5704 break; 5705 case BPF_MAP_DELETE_ELEM: 5706 err = map_delete_elem(&attr, uattr); 5707 break; 5708 case BPF_MAP_GET_NEXT_KEY: 5709 err = map_get_next_key(&attr); 5710 break; 5711 case BPF_MAP_FREEZE: 5712 err = map_freeze(&attr); 5713 break; 5714 case BPF_PROG_LOAD: 5715 err = bpf_prog_load(&attr, uattr, size); 5716 break; 5717 case BPF_OBJ_PIN: 5718 err = bpf_obj_pin(&attr); 5719 break; 5720 case BPF_OBJ_GET: 5721 err = bpf_obj_get(&attr); 5722 break; 5723 case BPF_PROG_ATTACH: 5724 err = bpf_prog_attach(&attr); 5725 break; 5726 case BPF_PROG_DETACH: 5727 err = bpf_prog_detach(&attr); 5728 break; 5729 case BPF_PROG_QUERY: 5730 err = bpf_prog_query(&attr, uattr.user); 5731 break; 5732 case BPF_PROG_TEST_RUN: 5733 err = bpf_prog_test_run(&attr, uattr.user); 5734 break; 5735 case BPF_PROG_GET_NEXT_ID: 5736 err = bpf_obj_get_next_id(&attr, uattr.user, 5737 &prog_idr, &prog_idr_lock); 5738 break; 5739 case BPF_MAP_GET_NEXT_ID: 5740 err = bpf_obj_get_next_id(&attr, uattr.user, 5741 &map_idr, &map_idr_lock); 5742 break; 5743 case BPF_BTF_GET_NEXT_ID: 5744 err = bpf_obj_get_next_id(&attr, uattr.user, 5745 &btf_idr, &btf_idr_lock); 5746 break; 5747 case BPF_PROG_GET_FD_BY_ID: 5748 err = bpf_prog_get_fd_by_id(&attr); 5749 break; 5750 case BPF_MAP_GET_FD_BY_ID: 5751 err = bpf_map_get_fd_by_id(&attr); 5752 break; 5753 case BPF_OBJ_GET_INFO_BY_FD: 5754 err = bpf_obj_get_info_by_fd(&attr, uattr.user); 5755 break; 5756 case BPF_RAW_TRACEPOINT_OPEN: 5757 err = bpf_raw_tracepoint_open(&attr); 5758 break; 5759 case BPF_BTF_LOAD: 5760 err = bpf_btf_load(&attr, uattr, size); 5761 break; 5762 case BPF_BTF_GET_FD_BY_ID: 5763 err = bpf_btf_get_fd_by_id(&attr); 5764 break; 5765 case BPF_TASK_FD_QUERY: 5766 err = bpf_task_fd_query(&attr, uattr.user); 5767 break; 5768 case BPF_MAP_LOOKUP_AND_DELETE_ELEM: 5769 err = map_lookup_and_delete_elem(&attr); 5770 break; 5771 case BPF_MAP_LOOKUP_BATCH: 5772 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH); 5773 break; 5774 case BPF_MAP_LOOKUP_AND_DELETE_BATCH: 5775 err = bpf_map_do_batch(&attr, uattr.user, 5776 BPF_MAP_LOOKUP_AND_DELETE_BATCH); 5777 break; 5778 case BPF_MAP_UPDATE_BATCH: 5779 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH); 5780 break; 5781 case BPF_MAP_DELETE_BATCH: 5782 err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH); 5783 break; 5784 case BPF_LINK_CREATE: 5785 err = link_create(&attr, uattr); 5786 break; 5787 case BPF_LINK_UPDATE: 5788 err = link_update(&attr); 5789 break; 5790 case BPF_LINK_GET_FD_BY_ID: 5791 err = bpf_link_get_fd_by_id(&attr); 5792 break; 5793 case BPF_LINK_GET_NEXT_ID: 5794 err = bpf_obj_get_next_id(&attr, uattr.user, 5795 &link_idr, &link_idr_lock); 5796 break; 5797 case BPF_ENABLE_STATS: 5798 err = bpf_enable_stats(&attr); 5799 break; 5800 case BPF_ITER_CREATE: 5801 err = bpf_iter_create(&attr); 5802 break; 5803 case BPF_LINK_DETACH: 5804 err = link_detach(&attr); 5805 break; 5806 case BPF_PROG_BIND_MAP: 5807 err = bpf_prog_bind_map(&attr); 5808 break; 5809 case BPF_TOKEN_CREATE: 5810 err = token_create(&attr); 5811 break; 5812 default: 5813 err = -EINVAL; 5814 break; 5815 } 5816 5817 return err; 5818 } 5819 5820 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 5821 { 5822 return __sys_bpf(cmd, USER_BPFPTR(uattr), size); 5823 } 5824 5825 static bool syscall_prog_is_valid_access(int off, int size, 5826 enum bpf_access_type type, 5827 const struct bpf_prog *prog, 5828 struct bpf_insn_access_aux *info) 5829 { 5830 if (off < 0 || off >= U16_MAX) 5831 return false; 5832 if (off % size != 0) 5833 return false; 5834 return true; 5835 } 5836 5837 BPF_CALL_3(bpf_sys_bpf, int, cmd, union bpf_attr *, attr, u32, attr_size) 5838 { 5839 switch (cmd) { 5840 case BPF_MAP_CREATE: 5841 case BPF_MAP_DELETE_ELEM: 5842 case BPF_MAP_UPDATE_ELEM: 5843 case BPF_MAP_FREEZE: 5844 case BPF_MAP_GET_FD_BY_ID: 5845 case BPF_PROG_LOAD: 5846 case BPF_BTF_LOAD: 5847 case BPF_LINK_CREATE: 5848 case BPF_RAW_TRACEPOINT_OPEN: 5849 break; 5850 default: 5851 return -EINVAL; 5852 } 5853 return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size); 5854 } 5855 5856 5857 /* To shut up -Wmissing-prototypes. 5858 * This function is used by the kernel light skeleton 5859 * to load bpf programs when modules are loaded or during kernel boot. 5860 * See tools/lib/bpf/skel_internal.h 5861 */ 5862 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); 5863 5864 int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size) 5865 { 5866 struct bpf_prog * __maybe_unused prog; 5867 struct bpf_tramp_run_ctx __maybe_unused run_ctx; 5868 5869 switch (cmd) { 5870 #ifdef CONFIG_BPF_JIT /* __bpf_prog_enter_sleepable used by trampoline and JIT */ 5871 case BPF_PROG_TEST_RUN: 5872 if (attr->test.data_in || attr->test.data_out || 5873 attr->test.ctx_out || attr->test.duration || 5874 attr->test.repeat || attr->test.flags) 5875 return -EINVAL; 5876 5877 prog = bpf_prog_get_type(attr->test.prog_fd, BPF_PROG_TYPE_SYSCALL); 5878 if (IS_ERR(prog)) 5879 return PTR_ERR(prog); 5880 5881 if (attr->test.ctx_size_in < prog->aux->max_ctx_offset || 5882 attr->test.ctx_size_in > U16_MAX) { 5883 bpf_prog_put(prog); 5884 return -EINVAL; 5885 } 5886 5887 run_ctx.bpf_cookie = 0; 5888 if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) { 5889 /* recursion detected */ 5890 __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx); 5891 bpf_prog_put(prog); 5892 return -EBUSY; 5893 } 5894 attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in); 5895 __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */, 5896 &run_ctx); 5897 bpf_prog_put(prog); 5898 return 0; 5899 #endif 5900 default: 5901 return ____bpf_sys_bpf(cmd, attr, size); 5902 } 5903 } 5904 EXPORT_SYMBOL(kern_sys_bpf); 5905 5906 static const struct bpf_func_proto bpf_sys_bpf_proto = { 5907 .func = bpf_sys_bpf, 5908 .gpl_only = false, 5909 .ret_type = RET_INTEGER, 5910 .arg1_type = ARG_ANYTHING, 5911 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 5912 .arg3_type = ARG_CONST_SIZE, 5913 }; 5914 5915 const struct bpf_func_proto * __weak 5916 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5917 { 5918 return bpf_base_func_proto(func_id, prog); 5919 } 5920 5921 BPF_CALL_1(bpf_sys_close, u32, fd) 5922 { 5923 /* When bpf program calls this helper there should not be 5924 * an fdget() without matching completed fdput(). 5925 * This helper is allowed in the following callchain only: 5926 * sys_bpf->prog_test_run->bpf_prog->bpf_sys_close 5927 */ 5928 return close_fd(fd); 5929 } 5930 5931 static const struct bpf_func_proto bpf_sys_close_proto = { 5932 .func = bpf_sys_close, 5933 .gpl_only = false, 5934 .ret_type = RET_INTEGER, 5935 .arg1_type = ARG_ANYTHING, 5936 }; 5937 5938 BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res) 5939 { 5940 *res = 0; 5941 if (flags) 5942 return -EINVAL; 5943 5944 if (name_sz <= 1 || name[name_sz - 1]) 5945 return -EINVAL; 5946 5947 if (!bpf_dump_raw_ok(current_cred())) 5948 return -EPERM; 5949 5950 *res = kallsyms_lookup_name(name); 5951 return *res ? 0 : -ENOENT; 5952 } 5953 5954 static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { 5955 .func = bpf_kallsyms_lookup_name, 5956 .gpl_only = false, 5957 .ret_type = RET_INTEGER, 5958 .arg1_type = ARG_PTR_TO_MEM, 5959 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 5960 .arg3_type = ARG_ANYTHING, 5961 .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED, 5962 .arg4_size = sizeof(u64), 5963 }; 5964 5965 static const struct bpf_func_proto * 5966 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 5967 { 5968 switch (func_id) { 5969 case BPF_FUNC_sys_bpf: 5970 return !bpf_token_capable(prog->aux->token, CAP_PERFMON) 5971 ? NULL : &bpf_sys_bpf_proto; 5972 case BPF_FUNC_btf_find_by_name_kind: 5973 return &bpf_btf_find_by_name_kind_proto; 5974 case BPF_FUNC_sys_close: 5975 return &bpf_sys_close_proto; 5976 case BPF_FUNC_kallsyms_lookup_name: 5977 return &bpf_kallsyms_lookup_name_proto; 5978 default: 5979 return tracing_prog_func_proto(func_id, prog); 5980 } 5981 } 5982 5983 const struct bpf_verifier_ops bpf_syscall_verifier_ops = { 5984 .get_func_proto = syscall_prog_func_proto, 5985 .is_valid_access = syscall_prog_is_valid_access, 5986 }; 5987 5988 const struct bpf_prog_ops bpf_syscall_prog_ops = { 5989 .test_run = bpf_prog_test_run_syscall, 5990 }; 5991 5992 #ifdef CONFIG_SYSCTL 5993 static int bpf_stats_handler(const struct ctl_table *table, int write, 5994 void *buffer, size_t *lenp, loff_t *ppos) 5995 { 5996 struct static_key *key = (struct static_key *)table->data; 5997 static int saved_val; 5998 int val, ret; 5999 struct ctl_table tmp = { 6000 .data = &val, 6001 .maxlen = sizeof(val), 6002 .mode = table->mode, 6003 .extra1 = SYSCTL_ZERO, 6004 .extra2 = SYSCTL_ONE, 6005 }; 6006 6007 if (write && !capable(CAP_SYS_ADMIN)) 6008 return -EPERM; 6009 6010 mutex_lock(&bpf_stats_enabled_mutex); 6011 val = saved_val; 6012 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6013 if (write && !ret && val != saved_val) { 6014 if (val) 6015 static_key_slow_inc(key); 6016 else 6017 static_key_slow_dec(key); 6018 saved_val = val; 6019 } 6020 mutex_unlock(&bpf_stats_enabled_mutex); 6021 return ret; 6022 } 6023 6024 void __weak unpriv_ebpf_notify(int new_state) 6025 { 6026 } 6027 6028 static int bpf_unpriv_handler(const struct ctl_table *table, int write, 6029 void *buffer, size_t *lenp, loff_t *ppos) 6030 { 6031 int ret, unpriv_enable = *(int *)table->data; 6032 bool locked_state = unpriv_enable == 1; 6033 struct ctl_table tmp = *table; 6034 6035 if (write && !capable(CAP_SYS_ADMIN)) 6036 return -EPERM; 6037 6038 tmp.data = &unpriv_enable; 6039 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 6040 if (write && !ret) { 6041 if (locked_state && unpriv_enable != 1) 6042 return -EPERM; 6043 *(int *)table->data = unpriv_enable; 6044 } 6045 6046 if (write) 6047 unpriv_ebpf_notify(unpriv_enable); 6048 6049 return ret; 6050 } 6051 6052 static struct ctl_table bpf_syscall_table[] = { 6053 { 6054 .procname = "unprivileged_bpf_disabled", 6055 .data = &sysctl_unprivileged_bpf_disabled, 6056 .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), 6057 .mode = 0644, 6058 .proc_handler = bpf_unpriv_handler, 6059 .extra1 = SYSCTL_ZERO, 6060 .extra2 = SYSCTL_TWO, 6061 }, 6062 { 6063 .procname = "bpf_stats_enabled", 6064 .data = &bpf_stats_enabled_key.key, 6065 .mode = 0644, 6066 .proc_handler = bpf_stats_handler, 6067 }, 6068 }; 6069 6070 static int __init bpf_syscall_sysctl_init(void) 6071 { 6072 register_sysctl_init("kernel", bpf_syscall_table); 6073 return 0; 6074 } 6075 late_initcall(bpf_syscall_sysctl_init); 6076 #endif /* CONFIG_SYSCTL */ 6077
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.