1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016,2017 Facebook 4 */ 5 #include <linux/bpf.h> 6 #include <linux/btf.h> 7 #include <linux/err.h> 8 #include <linux/slab.h> 9 #include <linux/mm.h> 10 #include <linux/filter.h> 11 #include <linux/perf_event.h> 12 #include <uapi/linux/btf.h> 13 #include <linux/rcupdate_trace.h> 14 #include <linux/btf_ids.h> 15 16 #include "map_in_map.h" 17 18 #define ARRAY_CREATE_FLAG_MASK \ 19 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ 20 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP) 21 22 static void bpf_array_free_percpu(struct bpf_array *array) 23 { 24 int i; 25 26 for (i = 0; i < array->map.max_entries; i++) { 27 free_percpu(array->pptrs[i]); 28 cond_resched(); 29 } 30 } 31 32 static int bpf_array_alloc_percpu(struct bpf_array *array) 33 { 34 void __percpu *ptr; 35 int i; 36 37 for (i = 0; i < array->map.max_entries; i++) { 38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, 39 GFP_USER | __GFP_NOWARN); 40 if (!ptr) { 41 bpf_array_free_percpu(array); 42 return -ENOMEM; 43 } 44 array->pptrs[i] = ptr; 45 cond_resched(); 46 } 47 48 return 0; 49 } 50 51 /* Called from syscall */ 52 int array_map_alloc_check(union bpf_attr *attr) 53 { 54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 55 int numa_node = bpf_map_attr_numa_node(attr); 56 57 /* check sanity of attributes */ 58 if (attr->max_entries == 0 || attr->key_size != 4 || 59 attr->value_size == 0 || 60 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || 61 !bpf_map_flags_access_ok(attr->map_flags) || 62 (percpu && numa_node != NUMA_NO_NODE)) 63 return -EINVAL; 64 65 if (attr->map_type != BPF_MAP_TYPE_ARRAY && 66 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) 67 return -EINVAL; 68 69 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && 70 attr->map_flags & BPF_F_PRESERVE_ELEMS) 71 return -EINVAL; 72 73 /* avoid overflow on round_up(map->value_size) */ 74 if (attr->value_size > INT_MAX) 75 return -E2BIG; 76 /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */ 77 if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE) 78 return -E2BIG; 79 80 return 0; 81 } 82 83 static struct bpf_map *array_map_alloc(union bpf_attr *attr) 84 { 85 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 86 int numa_node = bpf_map_attr_numa_node(attr); 87 u32 elem_size, index_mask, max_entries; 88 bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL); 89 u64 array_size, mask64; 90 struct bpf_array *array; 91 92 elem_size = round_up(attr->value_size, 8); 93 94 max_entries = attr->max_entries; 95 96 /* On 32 bit archs roundup_pow_of_two() with max_entries that has 97 * upper most bit set in u32 space is undefined behavior due to 98 * resulting 1U << 32, so do it manually here in u64 space. 99 */ 100 mask64 = fls_long(max_entries - 1); 101 mask64 = 1ULL << mask64; 102 mask64 -= 1; 103 104 index_mask = mask64; 105 if (!bypass_spec_v1) { 106 /* round up array size to nearest power of 2, 107 * since cpu will speculate within index_mask limits 108 */ 109 max_entries = index_mask + 1; 110 /* Check for overflows. */ 111 if (max_entries < attr->max_entries) 112 return ERR_PTR(-E2BIG); 113 } 114 115 array_size = sizeof(*array); 116 if (percpu) { 117 array_size += (u64) max_entries * sizeof(void *); 118 } else { 119 /* rely on vmalloc() to return page-aligned memory and 120 * ensure array->value is exactly page-aligned 121 */ 122 if (attr->map_flags & BPF_F_MMAPABLE) { 123 array_size = PAGE_ALIGN(array_size); 124 array_size += PAGE_ALIGN((u64) max_entries * elem_size); 125 } else { 126 array_size += (u64) max_entries * elem_size; 127 } 128 } 129 130 /* allocate all map elements and zero-initialize them */ 131 if (attr->map_flags & BPF_F_MMAPABLE) { 132 void *data; 133 134 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ 135 data = bpf_map_area_mmapable_alloc(array_size, numa_node); 136 if (!data) 137 return ERR_PTR(-ENOMEM); 138 array = data + PAGE_ALIGN(sizeof(struct bpf_array)) 139 - offsetof(struct bpf_array, value); 140 } else { 141 array = bpf_map_area_alloc(array_size, numa_node); 142 } 143 if (!array) 144 return ERR_PTR(-ENOMEM); 145 array->index_mask = index_mask; 146 array->map.bypass_spec_v1 = bypass_spec_v1; 147 148 /* copy mandatory map attributes */ 149 bpf_map_init_from_attr(&array->map, attr); 150 array->elem_size = elem_size; 151 152 if (percpu && bpf_array_alloc_percpu(array)) { 153 bpf_map_area_free(array); 154 return ERR_PTR(-ENOMEM); 155 } 156 157 return &array->map; 158 } 159 160 static void *array_map_elem_ptr(struct bpf_array* array, u32 index) 161 { 162 return array->value + (u64)array->elem_size * index; 163 } 164 165 /* Called from syscall or from eBPF program */ 166 static void *array_map_lookup_elem(struct bpf_map *map, void *key) 167 { 168 struct bpf_array *array = container_of(map, struct bpf_array, map); 169 u32 index = *(u32 *)key; 170 171 if (unlikely(index >= array->map.max_entries)) 172 return NULL; 173 174 return array->value + (u64)array->elem_size * (index & array->index_mask); 175 } 176 177 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, 178 u32 off) 179 { 180 struct bpf_array *array = container_of(map, struct bpf_array, map); 181 182 if (map->max_entries != 1) 183 return -ENOTSUPP; 184 if (off >= map->value_size) 185 return -EINVAL; 186 187 *imm = (unsigned long)array->value; 188 return 0; 189 } 190 191 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, 192 u32 *off) 193 { 194 struct bpf_array *array = container_of(map, struct bpf_array, map); 195 u64 base = (unsigned long)array->value; 196 u64 range = array->elem_size; 197 198 if (map->max_entries != 1) 199 return -ENOTSUPP; 200 if (imm < base || imm >= base + range) 201 return -ENOENT; 202 203 *off = imm - base; 204 return 0; 205 } 206 207 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 208 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 209 { 210 struct bpf_array *array = container_of(map, struct bpf_array, map); 211 struct bpf_insn *insn = insn_buf; 212 u32 elem_size = array->elem_size; 213 const int ret = BPF_REG_0; 214 const int map_ptr = BPF_REG_1; 215 const int index = BPF_REG_2; 216 217 if (map->map_flags & BPF_F_INNER_MAP) 218 return -EOPNOTSUPP; 219 220 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 221 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 222 if (!map->bypass_spec_v1) { 223 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); 224 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 225 } else { 226 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 227 } 228 229 if (is_power_of_2(elem_size)) { 230 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 231 } else { 232 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 233 } 234 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 235 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 236 *insn++ = BPF_MOV64_IMM(ret, 0); 237 return insn - insn_buf; 238 } 239 240 /* Called from eBPF program */ 241 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 242 { 243 struct bpf_array *array = container_of(map, struct bpf_array, map); 244 u32 index = *(u32 *)key; 245 246 if (unlikely(index >= array->map.max_entries)) 247 return NULL; 248 249 return this_cpu_ptr(array->pptrs[index & array->index_mask]); 250 } 251 252 /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */ 253 static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 254 { 255 struct bpf_array *array = container_of(map, struct bpf_array, map); 256 struct bpf_insn *insn = insn_buf; 257 258 if (!bpf_jit_supports_percpu_insn()) 259 return -EOPNOTSUPP; 260 261 if (map->map_flags & BPF_F_INNER_MAP) 262 return -EOPNOTSUPP; 263 264 BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0); 265 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs)); 266 267 *insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0); 268 if (!map->bypass_spec_v1) { 269 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6); 270 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask); 271 } else { 272 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5); 273 } 274 275 *insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); 276 *insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); 277 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); 278 *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); 279 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 280 *insn++ = BPF_MOV64_IMM(BPF_REG_0, 0); 281 return insn - insn_buf; 282 } 283 284 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) 285 { 286 struct bpf_array *array = container_of(map, struct bpf_array, map); 287 u32 index = *(u32 *)key; 288 289 if (cpu >= nr_cpu_ids) 290 return NULL; 291 292 if (unlikely(index >= array->map.max_entries)) 293 return NULL; 294 295 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); 296 } 297 298 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 299 { 300 struct bpf_array *array = container_of(map, struct bpf_array, map); 301 u32 index = *(u32 *)key; 302 void __percpu *pptr; 303 int cpu, off = 0; 304 u32 size; 305 306 if (unlikely(index >= array->map.max_entries)) 307 return -ENOENT; 308 309 /* per_cpu areas are zero-filled and bpf programs can only 310 * access 'value_size' of them, so copying rounded areas 311 * will not leak any kernel data 312 */ 313 size = array->elem_size; 314 rcu_read_lock(); 315 pptr = array->pptrs[index & array->index_mask]; 316 for_each_possible_cpu(cpu) { 317 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); 318 check_and_init_map_value(map, value + off); 319 off += size; 320 } 321 rcu_read_unlock(); 322 return 0; 323 } 324 325 /* Called from syscall */ 326 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 327 { 328 struct bpf_array *array = container_of(map, struct bpf_array, map); 329 u32 index = key ? *(u32 *)key : U32_MAX; 330 u32 *next = (u32 *)next_key; 331 332 if (index >= array->map.max_entries) { 333 *next = 0; 334 return 0; 335 } 336 337 if (index == array->map.max_entries - 1) 338 return -ENOENT; 339 340 *next = index + 1; 341 return 0; 342 } 343 344 /* Called from syscall or from eBPF program */ 345 static long array_map_update_elem(struct bpf_map *map, void *key, void *value, 346 u64 map_flags) 347 { 348 struct bpf_array *array = container_of(map, struct bpf_array, map); 349 u32 index = *(u32 *)key; 350 char *val; 351 352 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 353 /* unknown flags */ 354 return -EINVAL; 355 356 if (unlikely(index >= array->map.max_entries)) 357 /* all elements were pre-allocated, cannot insert a new one */ 358 return -E2BIG; 359 360 if (unlikely(map_flags & BPF_NOEXIST)) 361 /* all elements already exist */ 362 return -EEXIST; 363 364 if (unlikely((map_flags & BPF_F_LOCK) && 365 !btf_record_has_field(map->record, BPF_SPIN_LOCK))) 366 return -EINVAL; 367 368 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 369 val = this_cpu_ptr(array->pptrs[index & array->index_mask]); 370 copy_map_value(map, val, value); 371 bpf_obj_free_fields(array->map.record, val); 372 } else { 373 val = array->value + 374 (u64)array->elem_size * (index & array->index_mask); 375 if (map_flags & BPF_F_LOCK) 376 copy_map_value_locked(map, val, value, false); 377 else 378 copy_map_value(map, val, value); 379 bpf_obj_free_fields(array->map.record, val); 380 } 381 return 0; 382 } 383 384 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 385 u64 map_flags) 386 { 387 struct bpf_array *array = container_of(map, struct bpf_array, map); 388 u32 index = *(u32 *)key; 389 void __percpu *pptr; 390 int cpu, off = 0; 391 u32 size; 392 393 if (unlikely(map_flags > BPF_EXIST)) 394 /* unknown flags */ 395 return -EINVAL; 396 397 if (unlikely(index >= array->map.max_entries)) 398 /* all elements were pre-allocated, cannot insert a new one */ 399 return -E2BIG; 400 401 if (unlikely(map_flags == BPF_NOEXIST)) 402 /* all elements already exist */ 403 return -EEXIST; 404 405 /* the user space will provide round_up(value_size, 8) bytes that 406 * will be copied into per-cpu area. bpf programs can only access 407 * value_size of it. During lookup the same extra bytes will be 408 * returned or zeros which were zero-filled by percpu_alloc, 409 * so no kernel data leaks possible 410 */ 411 size = array->elem_size; 412 rcu_read_lock(); 413 pptr = array->pptrs[index & array->index_mask]; 414 for_each_possible_cpu(cpu) { 415 copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); 416 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); 417 off += size; 418 } 419 rcu_read_unlock(); 420 return 0; 421 } 422 423 /* Called from syscall or from eBPF program */ 424 static long array_map_delete_elem(struct bpf_map *map, void *key) 425 { 426 return -EINVAL; 427 } 428 429 static void *array_map_vmalloc_addr(struct bpf_array *array) 430 { 431 return (void *)round_down((unsigned long)array, PAGE_SIZE); 432 } 433 434 static void array_map_free_timers_wq(struct bpf_map *map) 435 { 436 struct bpf_array *array = container_of(map, struct bpf_array, map); 437 int i; 438 439 /* We don't reset or free fields other than timer and workqueue 440 * on uref dropping to zero. 441 */ 442 if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) { 443 for (i = 0; i < array->map.max_entries; i++) { 444 if (btf_record_has_field(map->record, BPF_TIMER)) 445 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); 446 if (btf_record_has_field(map->record, BPF_WORKQUEUE)) 447 bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i)); 448 } 449 } 450 } 451 452 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 453 static void array_map_free(struct bpf_map *map) 454 { 455 struct bpf_array *array = container_of(map, struct bpf_array, map); 456 int i; 457 458 if (!IS_ERR_OR_NULL(map->record)) { 459 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 460 for (i = 0; i < array->map.max_entries; i++) { 461 void __percpu *pptr = array->pptrs[i & array->index_mask]; 462 int cpu; 463 464 for_each_possible_cpu(cpu) { 465 bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu)); 466 cond_resched(); 467 } 468 } 469 } else { 470 for (i = 0; i < array->map.max_entries; i++) 471 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); 472 } 473 } 474 475 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 476 bpf_array_free_percpu(array); 477 478 if (array->map.map_flags & BPF_F_MMAPABLE) 479 bpf_map_area_free(array_map_vmalloc_addr(array)); 480 else 481 bpf_map_area_free(array); 482 } 483 484 static void array_map_seq_show_elem(struct bpf_map *map, void *key, 485 struct seq_file *m) 486 { 487 void *value; 488 489 rcu_read_lock(); 490 491 value = array_map_lookup_elem(map, key); 492 if (!value) { 493 rcu_read_unlock(); 494 return; 495 } 496 497 if (map->btf_key_type_id) 498 seq_printf(m, "%u: ", *(u32 *)key); 499 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 500 seq_puts(m, "\n"); 501 502 rcu_read_unlock(); 503 } 504 505 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, 506 struct seq_file *m) 507 { 508 struct bpf_array *array = container_of(map, struct bpf_array, map); 509 u32 index = *(u32 *)key; 510 void __percpu *pptr; 511 int cpu; 512 513 rcu_read_lock(); 514 515 seq_printf(m, "%u: {\n", *(u32 *)key); 516 pptr = array->pptrs[index & array->index_mask]; 517 for_each_possible_cpu(cpu) { 518 seq_printf(m, "\tcpu%d: ", cpu); 519 btf_type_seq_show(map->btf, map->btf_value_type_id, 520 per_cpu_ptr(pptr, cpu), m); 521 seq_puts(m, "\n"); 522 } 523 seq_puts(m, "}\n"); 524 525 rcu_read_unlock(); 526 } 527 528 static int array_map_check_btf(const struct bpf_map *map, 529 const struct btf *btf, 530 const struct btf_type *key_type, 531 const struct btf_type *value_type) 532 { 533 u32 int_data; 534 535 /* One exception for keyless BTF: .bss/.data/.rodata map */ 536 if (btf_type_is_void(key_type)) { 537 if (map->map_type != BPF_MAP_TYPE_ARRAY || 538 map->max_entries != 1) 539 return -EINVAL; 540 541 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) 542 return -EINVAL; 543 544 return 0; 545 } 546 547 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) 548 return -EINVAL; 549 550 int_data = *(u32 *)(key_type + 1); 551 /* bpf array can only take a u32 key. This check makes sure 552 * that the btf matches the attr used during map_create. 553 */ 554 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) 555 return -EINVAL; 556 557 return 0; 558 } 559 560 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) 561 { 562 struct bpf_array *array = container_of(map, struct bpf_array, map); 563 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; 564 565 if (!(map->map_flags & BPF_F_MMAPABLE)) 566 return -EINVAL; 567 568 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > 569 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) 570 return -EINVAL; 571 572 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), 573 vma->vm_pgoff + pgoff); 574 } 575 576 static bool array_map_meta_equal(const struct bpf_map *meta0, 577 const struct bpf_map *meta1) 578 { 579 if (!bpf_map_meta_equal(meta0, meta1)) 580 return false; 581 return meta0->map_flags & BPF_F_INNER_MAP ? true : 582 meta0->max_entries == meta1->max_entries; 583 } 584 585 struct bpf_iter_seq_array_map_info { 586 struct bpf_map *map; 587 void *percpu_value_buf; 588 u32 index; 589 }; 590 591 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) 592 { 593 struct bpf_iter_seq_array_map_info *info = seq->private; 594 struct bpf_map *map = info->map; 595 struct bpf_array *array; 596 u32 index; 597 598 if (info->index >= map->max_entries) 599 return NULL; 600 601 if (*pos == 0) 602 ++*pos; 603 array = container_of(map, struct bpf_array, map); 604 index = info->index & array->index_mask; 605 if (info->percpu_value_buf) 606 return array->pptrs[index]; 607 return array_map_elem_ptr(array, index); 608 } 609 610 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 611 { 612 struct bpf_iter_seq_array_map_info *info = seq->private; 613 struct bpf_map *map = info->map; 614 struct bpf_array *array; 615 u32 index; 616 617 ++*pos; 618 ++info->index; 619 if (info->index >= map->max_entries) 620 return NULL; 621 622 array = container_of(map, struct bpf_array, map); 623 index = info->index & array->index_mask; 624 if (info->percpu_value_buf) 625 return array->pptrs[index]; 626 return array_map_elem_ptr(array, index); 627 } 628 629 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) 630 { 631 struct bpf_iter_seq_array_map_info *info = seq->private; 632 struct bpf_iter__bpf_map_elem ctx = {}; 633 struct bpf_map *map = info->map; 634 struct bpf_array *array = container_of(map, struct bpf_array, map); 635 struct bpf_iter_meta meta; 636 struct bpf_prog *prog; 637 int off = 0, cpu = 0; 638 void __percpu **pptr; 639 u32 size; 640 641 meta.seq = seq; 642 prog = bpf_iter_get_info(&meta, v == NULL); 643 if (!prog) 644 return 0; 645 646 ctx.meta = &meta; 647 ctx.map = info->map; 648 if (v) { 649 ctx.key = &info->index; 650 651 if (!info->percpu_value_buf) { 652 ctx.value = v; 653 } else { 654 pptr = v; 655 size = array->elem_size; 656 for_each_possible_cpu(cpu) { 657 copy_map_value_long(map, info->percpu_value_buf + off, 658 per_cpu_ptr(pptr, cpu)); 659 check_and_init_map_value(map, info->percpu_value_buf + off); 660 off += size; 661 } 662 ctx.value = info->percpu_value_buf; 663 } 664 } 665 666 return bpf_iter_run_prog(prog, &ctx); 667 } 668 669 static int bpf_array_map_seq_show(struct seq_file *seq, void *v) 670 { 671 return __bpf_array_map_seq_show(seq, v); 672 } 673 674 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) 675 { 676 if (!v) 677 (void)__bpf_array_map_seq_show(seq, NULL); 678 } 679 680 static int bpf_iter_init_array_map(void *priv_data, 681 struct bpf_iter_aux_info *aux) 682 { 683 struct bpf_iter_seq_array_map_info *seq_info = priv_data; 684 struct bpf_map *map = aux->map; 685 struct bpf_array *array = container_of(map, struct bpf_array, map); 686 void *value_buf; 687 u32 buf_size; 688 689 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 690 buf_size = array->elem_size * num_possible_cpus(); 691 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 692 if (!value_buf) 693 return -ENOMEM; 694 695 seq_info->percpu_value_buf = value_buf; 696 } 697 698 /* bpf_iter_attach_map() acquires a map uref, and the uref may be 699 * released before or in the middle of iterating map elements, so 700 * acquire an extra map uref for iterator. 701 */ 702 bpf_map_inc_with_uref(map); 703 seq_info->map = map; 704 return 0; 705 } 706 707 static void bpf_iter_fini_array_map(void *priv_data) 708 { 709 struct bpf_iter_seq_array_map_info *seq_info = priv_data; 710 711 bpf_map_put_with_uref(seq_info->map); 712 kfree(seq_info->percpu_value_buf); 713 } 714 715 static const struct seq_operations bpf_array_map_seq_ops = { 716 .start = bpf_array_map_seq_start, 717 .next = bpf_array_map_seq_next, 718 .stop = bpf_array_map_seq_stop, 719 .show = bpf_array_map_seq_show, 720 }; 721 722 static const struct bpf_iter_seq_info iter_seq_info = { 723 .seq_ops = &bpf_array_map_seq_ops, 724 .init_seq_private = bpf_iter_init_array_map, 725 .fini_seq_private = bpf_iter_fini_array_map, 726 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), 727 }; 728 729 static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, 730 void *callback_ctx, u64 flags) 731 { 732 u32 i, key, num_elems = 0; 733 struct bpf_array *array; 734 bool is_percpu; 735 u64 ret = 0; 736 void *val; 737 738 if (flags != 0) 739 return -EINVAL; 740 741 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 742 array = container_of(map, struct bpf_array, map); 743 if (is_percpu) 744 migrate_disable(); 745 for (i = 0; i < map->max_entries; i++) { 746 if (is_percpu) 747 val = this_cpu_ptr(array->pptrs[i]); 748 else 749 val = array_map_elem_ptr(array, i); 750 num_elems++; 751 key = i; 752 ret = callback_fn((u64)(long)map, (u64)(long)&key, 753 (u64)(long)val, (u64)(long)callback_ctx, 0); 754 /* return value: 0 - continue, 1 - stop and return */ 755 if (ret) 756 break; 757 } 758 759 if (is_percpu) 760 migrate_enable(); 761 return num_elems; 762 } 763 764 static u64 array_map_mem_usage(const struct bpf_map *map) 765 { 766 struct bpf_array *array = container_of(map, struct bpf_array, map); 767 bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 768 u32 elem_size = array->elem_size; 769 u64 entries = map->max_entries; 770 u64 usage = sizeof(*array); 771 772 if (percpu) { 773 usage += entries * sizeof(void *); 774 usage += entries * elem_size * num_possible_cpus(); 775 } else { 776 if (map->map_flags & BPF_F_MMAPABLE) { 777 usage = PAGE_ALIGN(usage); 778 usage += PAGE_ALIGN(entries * elem_size); 779 } else { 780 usage += entries * elem_size; 781 } 782 } 783 return usage; 784 } 785 786 BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array) 787 const struct bpf_map_ops array_map_ops = { 788 .map_meta_equal = array_map_meta_equal, 789 .map_alloc_check = array_map_alloc_check, 790 .map_alloc = array_map_alloc, 791 .map_free = array_map_free, 792 .map_get_next_key = array_map_get_next_key, 793 .map_release_uref = array_map_free_timers_wq, 794 .map_lookup_elem = array_map_lookup_elem, 795 .map_update_elem = array_map_update_elem, 796 .map_delete_elem = array_map_delete_elem, 797 .map_gen_lookup = array_map_gen_lookup, 798 .map_direct_value_addr = array_map_direct_value_addr, 799 .map_direct_value_meta = array_map_direct_value_meta, 800 .map_mmap = array_map_mmap, 801 .map_seq_show_elem = array_map_seq_show_elem, 802 .map_check_btf = array_map_check_btf, 803 .map_lookup_batch = generic_map_lookup_batch, 804 .map_update_batch = generic_map_update_batch, 805 .map_set_for_each_callback_args = map_set_for_each_callback_args, 806 .map_for_each_callback = bpf_for_each_array_elem, 807 .map_mem_usage = array_map_mem_usage, 808 .map_btf_id = &array_map_btf_ids[0], 809 .iter_seq_info = &iter_seq_info, 810 }; 811 812 const struct bpf_map_ops percpu_array_map_ops = { 813 .map_meta_equal = bpf_map_meta_equal, 814 .map_alloc_check = array_map_alloc_check, 815 .map_alloc = array_map_alloc, 816 .map_free = array_map_free, 817 .map_get_next_key = array_map_get_next_key, 818 .map_lookup_elem = percpu_array_map_lookup_elem, 819 .map_gen_lookup = percpu_array_map_gen_lookup, 820 .map_update_elem = array_map_update_elem, 821 .map_delete_elem = array_map_delete_elem, 822 .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem, 823 .map_seq_show_elem = percpu_array_map_seq_show_elem, 824 .map_check_btf = array_map_check_btf, 825 .map_lookup_batch = generic_map_lookup_batch, 826 .map_update_batch = generic_map_update_batch, 827 .map_set_for_each_callback_args = map_set_for_each_callback_args, 828 .map_for_each_callback = bpf_for_each_array_elem, 829 .map_mem_usage = array_map_mem_usage, 830 .map_btf_id = &array_map_btf_ids[0], 831 .iter_seq_info = &iter_seq_info, 832 }; 833 834 static int fd_array_map_alloc_check(union bpf_attr *attr) 835 { 836 /* only file descriptors can be stored in this type of map */ 837 if (attr->value_size != sizeof(u32)) 838 return -EINVAL; 839 /* Program read-only/write-only not supported for special maps yet. */ 840 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) 841 return -EINVAL; 842 return array_map_alloc_check(attr); 843 } 844 845 static void fd_array_map_free(struct bpf_map *map) 846 { 847 struct bpf_array *array = container_of(map, struct bpf_array, map); 848 int i; 849 850 /* make sure it's empty */ 851 for (i = 0; i < array->map.max_entries; i++) 852 BUG_ON(array->ptrs[i] != NULL); 853 854 bpf_map_area_free(array); 855 } 856 857 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 858 { 859 return ERR_PTR(-EOPNOTSUPP); 860 } 861 862 /* only called from syscall */ 863 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 864 { 865 void **elem, *ptr; 866 int ret = 0; 867 868 if (!map->ops->map_fd_sys_lookup_elem) 869 return -ENOTSUPP; 870 871 rcu_read_lock(); 872 elem = array_map_lookup_elem(map, key); 873 if (elem && (ptr = READ_ONCE(*elem))) 874 *value = map->ops->map_fd_sys_lookup_elem(ptr); 875 else 876 ret = -ENOENT; 877 rcu_read_unlock(); 878 879 return ret; 880 } 881 882 /* only called from syscall */ 883 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 884 void *key, void *value, u64 map_flags) 885 { 886 struct bpf_array *array = container_of(map, struct bpf_array, map); 887 void *new_ptr, *old_ptr; 888 u32 index = *(u32 *)key, ufd; 889 890 if (map_flags != BPF_ANY) 891 return -EINVAL; 892 893 if (index >= array->map.max_entries) 894 return -E2BIG; 895 896 ufd = *(u32 *)value; 897 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 898 if (IS_ERR(new_ptr)) 899 return PTR_ERR(new_ptr); 900 901 if (map->ops->map_poke_run) { 902 mutex_lock(&array->aux->poke_mutex); 903 old_ptr = xchg(array->ptrs + index, new_ptr); 904 map->ops->map_poke_run(map, index, old_ptr, new_ptr); 905 mutex_unlock(&array->aux->poke_mutex); 906 } else { 907 old_ptr = xchg(array->ptrs + index, new_ptr); 908 } 909 910 if (old_ptr) 911 map->ops->map_fd_put_ptr(map, old_ptr, true); 912 return 0; 913 } 914 915 static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer) 916 { 917 struct bpf_array *array = container_of(map, struct bpf_array, map); 918 void *old_ptr; 919 u32 index = *(u32 *)key; 920 921 if (index >= array->map.max_entries) 922 return -E2BIG; 923 924 if (map->ops->map_poke_run) { 925 mutex_lock(&array->aux->poke_mutex); 926 old_ptr = xchg(array->ptrs + index, NULL); 927 map->ops->map_poke_run(map, index, old_ptr, NULL); 928 mutex_unlock(&array->aux->poke_mutex); 929 } else { 930 old_ptr = xchg(array->ptrs + index, NULL); 931 } 932 933 if (old_ptr) { 934 map->ops->map_fd_put_ptr(map, old_ptr, need_defer); 935 return 0; 936 } else { 937 return -ENOENT; 938 } 939 } 940 941 static long fd_array_map_delete_elem(struct bpf_map *map, void *key) 942 { 943 return __fd_array_map_delete_elem(map, key, true); 944 } 945 946 static void *prog_fd_array_get_ptr(struct bpf_map *map, 947 struct file *map_file, int fd) 948 { 949 struct bpf_prog *prog = bpf_prog_get(fd); 950 951 if (IS_ERR(prog)) 952 return prog; 953 954 if (!bpf_prog_map_compatible(map, prog)) { 955 bpf_prog_put(prog); 956 return ERR_PTR(-EINVAL); 957 } 958 959 return prog; 960 } 961 962 static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 963 { 964 /* bpf_prog is freed after one RCU or tasks trace grace period */ 965 bpf_prog_put(ptr); 966 } 967 968 static u32 prog_fd_array_sys_lookup_elem(void *ptr) 969 { 970 return ((struct bpf_prog *)ptr)->aux->id; 971 } 972 973 /* decrement refcnt of all bpf_progs that are stored in this map */ 974 static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer) 975 { 976 struct bpf_array *array = container_of(map, struct bpf_array, map); 977 int i; 978 979 for (i = 0; i < array->map.max_entries; i++) 980 __fd_array_map_delete_elem(map, &i, need_defer); 981 } 982 983 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, 984 struct seq_file *m) 985 { 986 void **elem, *ptr; 987 u32 prog_id; 988 989 rcu_read_lock(); 990 991 elem = array_map_lookup_elem(map, key); 992 if (elem) { 993 ptr = READ_ONCE(*elem); 994 if (ptr) { 995 seq_printf(m, "%u: ", *(u32 *)key); 996 prog_id = prog_fd_array_sys_lookup_elem(ptr); 997 btf_type_seq_show(map->btf, map->btf_value_type_id, 998 &prog_id, m); 999 seq_puts(m, "\n"); 1000 } 1001 } 1002 1003 rcu_read_unlock(); 1004 } 1005 1006 struct prog_poke_elem { 1007 struct list_head list; 1008 struct bpf_prog_aux *aux; 1009 }; 1010 1011 static int prog_array_map_poke_track(struct bpf_map *map, 1012 struct bpf_prog_aux *prog_aux) 1013 { 1014 struct prog_poke_elem *elem; 1015 struct bpf_array_aux *aux; 1016 int ret = 0; 1017 1018 aux = container_of(map, struct bpf_array, map)->aux; 1019 mutex_lock(&aux->poke_mutex); 1020 list_for_each_entry(elem, &aux->poke_progs, list) { 1021 if (elem->aux == prog_aux) 1022 goto out; 1023 } 1024 1025 elem = kmalloc(sizeof(*elem), GFP_KERNEL); 1026 if (!elem) { 1027 ret = -ENOMEM; 1028 goto out; 1029 } 1030 1031 INIT_LIST_HEAD(&elem->list); 1032 /* We must track the program's aux info at this point in time 1033 * since the program pointer itself may not be stable yet, see 1034 * also comment in prog_array_map_poke_run(). 1035 */ 1036 elem->aux = prog_aux; 1037 1038 list_add_tail(&elem->list, &aux->poke_progs); 1039 out: 1040 mutex_unlock(&aux->poke_mutex); 1041 return ret; 1042 } 1043 1044 static void prog_array_map_poke_untrack(struct bpf_map *map, 1045 struct bpf_prog_aux *prog_aux) 1046 { 1047 struct prog_poke_elem *elem, *tmp; 1048 struct bpf_array_aux *aux; 1049 1050 aux = container_of(map, struct bpf_array, map)->aux; 1051 mutex_lock(&aux->poke_mutex); 1052 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1053 if (elem->aux == prog_aux) { 1054 list_del_init(&elem->list); 1055 kfree(elem); 1056 break; 1057 } 1058 } 1059 mutex_unlock(&aux->poke_mutex); 1060 } 1061 1062 void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, 1063 struct bpf_prog *new, struct bpf_prog *old) 1064 { 1065 WARN_ON_ONCE(1); 1066 } 1067 1068 static void prog_array_map_poke_run(struct bpf_map *map, u32 key, 1069 struct bpf_prog *old, 1070 struct bpf_prog *new) 1071 { 1072 struct prog_poke_elem *elem; 1073 struct bpf_array_aux *aux; 1074 1075 aux = container_of(map, struct bpf_array, map)->aux; 1076 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); 1077 1078 list_for_each_entry(elem, &aux->poke_progs, list) { 1079 struct bpf_jit_poke_descriptor *poke; 1080 int i; 1081 1082 for (i = 0; i < elem->aux->size_poke_tab; i++) { 1083 poke = &elem->aux->poke_tab[i]; 1084 1085 /* Few things to be aware of: 1086 * 1087 * 1) We can only ever access aux in this context, but 1088 * not aux->prog since it might not be stable yet and 1089 * there could be danger of use after free otherwise. 1090 * 2) Initially when we start tracking aux, the program 1091 * is not JITed yet and also does not have a kallsyms 1092 * entry. We skip these as poke->tailcall_target_stable 1093 * is not active yet. The JIT will do the final fixup 1094 * before setting it stable. The various 1095 * poke->tailcall_target_stable are successively 1096 * activated, so tail call updates can arrive from here 1097 * while JIT is still finishing its final fixup for 1098 * non-activated poke entries. 1099 * 3) Also programs reaching refcount of zero while patching 1100 * is in progress is okay since we're protected under 1101 * poke_mutex and untrack the programs before the JIT 1102 * buffer is freed. 1103 */ 1104 if (!READ_ONCE(poke->tailcall_target_stable)) 1105 continue; 1106 if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 1107 continue; 1108 if (poke->tail_call.map != map || 1109 poke->tail_call.key != key) 1110 continue; 1111 1112 bpf_arch_poke_desc_update(poke, new, old); 1113 } 1114 } 1115 } 1116 1117 static void prog_array_map_clear_deferred(struct work_struct *work) 1118 { 1119 struct bpf_map *map = container_of(work, struct bpf_array_aux, 1120 work)->map; 1121 bpf_fd_array_map_clear(map, true); 1122 bpf_map_put(map); 1123 } 1124 1125 static void prog_array_map_clear(struct bpf_map *map) 1126 { 1127 struct bpf_array_aux *aux = container_of(map, struct bpf_array, 1128 map)->aux; 1129 bpf_map_inc(map); 1130 schedule_work(&aux->work); 1131 } 1132 1133 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) 1134 { 1135 struct bpf_array_aux *aux; 1136 struct bpf_map *map; 1137 1138 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT); 1139 if (!aux) 1140 return ERR_PTR(-ENOMEM); 1141 1142 INIT_WORK(&aux->work, prog_array_map_clear_deferred); 1143 INIT_LIST_HEAD(&aux->poke_progs); 1144 mutex_init(&aux->poke_mutex); 1145 1146 map = array_map_alloc(attr); 1147 if (IS_ERR(map)) { 1148 kfree(aux); 1149 return map; 1150 } 1151 1152 container_of(map, struct bpf_array, map)->aux = aux; 1153 aux->map = map; 1154 1155 return map; 1156 } 1157 1158 static void prog_array_map_free(struct bpf_map *map) 1159 { 1160 struct prog_poke_elem *elem, *tmp; 1161 struct bpf_array_aux *aux; 1162 1163 aux = container_of(map, struct bpf_array, map)->aux; 1164 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1165 list_del_init(&elem->list); 1166 kfree(elem); 1167 } 1168 kfree(aux); 1169 fd_array_map_free(map); 1170 } 1171 1172 /* prog_array->aux->{type,jited} is a runtime binding. 1173 * Doing static check alone in the verifier is not enough. 1174 * Thus, prog_array_map cannot be used as an inner_map 1175 * and map_meta_equal is not implemented. 1176 */ 1177 const struct bpf_map_ops prog_array_map_ops = { 1178 .map_alloc_check = fd_array_map_alloc_check, 1179 .map_alloc = prog_array_map_alloc, 1180 .map_free = prog_array_map_free, 1181 .map_poke_track = prog_array_map_poke_track, 1182 .map_poke_untrack = prog_array_map_poke_untrack, 1183 .map_poke_run = prog_array_map_poke_run, 1184 .map_get_next_key = array_map_get_next_key, 1185 .map_lookup_elem = fd_array_map_lookup_elem, 1186 .map_delete_elem = fd_array_map_delete_elem, 1187 .map_fd_get_ptr = prog_fd_array_get_ptr, 1188 .map_fd_put_ptr = prog_fd_array_put_ptr, 1189 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 1190 .map_release_uref = prog_array_map_clear, 1191 .map_seq_show_elem = prog_array_map_seq_show_elem, 1192 .map_mem_usage = array_map_mem_usage, 1193 .map_btf_id = &array_map_btf_ids[0], 1194 }; 1195 1196 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 1197 struct file *map_file) 1198 { 1199 struct bpf_event_entry *ee; 1200 1201 ee = kzalloc(sizeof(*ee), GFP_KERNEL); 1202 if (ee) { 1203 ee->event = perf_file->private_data; 1204 ee->perf_file = perf_file; 1205 ee->map_file = map_file; 1206 } 1207 1208 return ee; 1209 } 1210 1211 static void __bpf_event_entry_free(struct rcu_head *rcu) 1212 { 1213 struct bpf_event_entry *ee; 1214 1215 ee = container_of(rcu, struct bpf_event_entry, rcu); 1216 fput(ee->perf_file); 1217 kfree(ee); 1218 } 1219 1220 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 1221 { 1222 call_rcu(&ee->rcu, __bpf_event_entry_free); 1223 } 1224 1225 static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 1226 struct file *map_file, int fd) 1227 { 1228 struct bpf_event_entry *ee; 1229 struct perf_event *event; 1230 struct file *perf_file; 1231 u64 value; 1232 1233 perf_file = perf_event_get(fd); 1234 if (IS_ERR(perf_file)) 1235 return perf_file; 1236 1237 ee = ERR_PTR(-EOPNOTSUPP); 1238 event = perf_file->private_data; 1239 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) 1240 goto err_out; 1241 1242 ee = bpf_event_entry_gen(perf_file, map_file); 1243 if (ee) 1244 return ee; 1245 ee = ERR_PTR(-ENOMEM); 1246 err_out: 1247 fput(perf_file); 1248 return ee; 1249 } 1250 1251 static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 1252 { 1253 /* bpf_perf_event is freed after one RCU grace period */ 1254 bpf_event_entry_free_rcu(ptr); 1255 } 1256 1257 static void perf_event_fd_array_release(struct bpf_map *map, 1258 struct file *map_file) 1259 { 1260 struct bpf_array *array = container_of(map, struct bpf_array, map); 1261 struct bpf_event_entry *ee; 1262 int i; 1263 1264 if (map->map_flags & BPF_F_PRESERVE_ELEMS) 1265 return; 1266 1267 rcu_read_lock(); 1268 for (i = 0; i < array->map.max_entries; i++) { 1269 ee = READ_ONCE(array->ptrs[i]); 1270 if (ee && ee->map_file == map_file) 1271 __fd_array_map_delete_elem(map, &i, true); 1272 } 1273 rcu_read_unlock(); 1274 } 1275 1276 static void perf_event_fd_array_map_free(struct bpf_map *map) 1277 { 1278 if (map->map_flags & BPF_F_PRESERVE_ELEMS) 1279 bpf_fd_array_map_clear(map, false); 1280 fd_array_map_free(map); 1281 } 1282 1283 const struct bpf_map_ops perf_event_array_map_ops = { 1284 .map_meta_equal = bpf_map_meta_equal, 1285 .map_alloc_check = fd_array_map_alloc_check, 1286 .map_alloc = array_map_alloc, 1287 .map_free = perf_event_fd_array_map_free, 1288 .map_get_next_key = array_map_get_next_key, 1289 .map_lookup_elem = fd_array_map_lookup_elem, 1290 .map_delete_elem = fd_array_map_delete_elem, 1291 .map_fd_get_ptr = perf_event_fd_array_get_ptr, 1292 .map_fd_put_ptr = perf_event_fd_array_put_ptr, 1293 .map_release = perf_event_fd_array_release, 1294 .map_check_btf = map_check_no_btf, 1295 .map_mem_usage = array_map_mem_usage, 1296 .map_btf_id = &array_map_btf_ids[0], 1297 }; 1298 1299 #ifdef CONFIG_CGROUPS 1300 static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 1301 struct file *map_file /* not used */, 1302 int fd) 1303 { 1304 return cgroup_get_from_fd(fd); 1305 } 1306 1307 static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 1308 { 1309 /* cgroup_put free cgrp after a rcu grace period */ 1310 cgroup_put(ptr); 1311 } 1312 1313 static void cgroup_fd_array_free(struct bpf_map *map) 1314 { 1315 bpf_fd_array_map_clear(map, false); 1316 fd_array_map_free(map); 1317 } 1318 1319 const struct bpf_map_ops cgroup_array_map_ops = { 1320 .map_meta_equal = bpf_map_meta_equal, 1321 .map_alloc_check = fd_array_map_alloc_check, 1322 .map_alloc = array_map_alloc, 1323 .map_free = cgroup_fd_array_free, 1324 .map_get_next_key = array_map_get_next_key, 1325 .map_lookup_elem = fd_array_map_lookup_elem, 1326 .map_delete_elem = fd_array_map_delete_elem, 1327 .map_fd_get_ptr = cgroup_fd_array_get_ptr, 1328 .map_fd_put_ptr = cgroup_fd_array_put_ptr, 1329 .map_check_btf = map_check_no_btf, 1330 .map_mem_usage = array_map_mem_usage, 1331 .map_btf_id = &array_map_btf_ids[0], 1332 }; 1333 #endif 1334 1335 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 1336 { 1337 struct bpf_map *map, *inner_map_meta; 1338 1339 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 1340 if (IS_ERR(inner_map_meta)) 1341 return inner_map_meta; 1342 1343 map = array_map_alloc(attr); 1344 if (IS_ERR(map)) { 1345 bpf_map_meta_free(inner_map_meta); 1346 return map; 1347 } 1348 1349 map->inner_map_meta = inner_map_meta; 1350 1351 return map; 1352 } 1353 1354 static void array_of_map_free(struct bpf_map *map) 1355 { 1356 /* map->inner_map_meta is only accessed by syscall which 1357 * is protected by fdget/fdput. 1358 */ 1359 bpf_map_meta_free(map->inner_map_meta); 1360 bpf_fd_array_map_clear(map, false); 1361 fd_array_map_free(map); 1362 } 1363 1364 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 1365 { 1366 struct bpf_map **inner_map = array_map_lookup_elem(map, key); 1367 1368 if (!inner_map) 1369 return NULL; 1370 1371 return READ_ONCE(*inner_map); 1372 } 1373 1374 static int array_of_map_gen_lookup(struct bpf_map *map, 1375 struct bpf_insn *insn_buf) 1376 { 1377 struct bpf_array *array = container_of(map, struct bpf_array, map); 1378 u32 elem_size = array->elem_size; 1379 struct bpf_insn *insn = insn_buf; 1380 const int ret = BPF_REG_0; 1381 const int map_ptr = BPF_REG_1; 1382 const int index = BPF_REG_2; 1383 1384 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 1385 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 1386 if (!map->bypass_spec_v1) { 1387 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); 1388 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 1389 } else { 1390 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 1391 } 1392 if (is_power_of_2(elem_size)) 1393 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 1394 else 1395 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 1396 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 1397 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 1398 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 1399 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 1400 *insn++ = BPF_MOV64_IMM(ret, 0); 1401 1402 return insn - insn_buf; 1403 } 1404 1405 const struct bpf_map_ops array_of_maps_map_ops = { 1406 .map_alloc_check = fd_array_map_alloc_check, 1407 .map_alloc = array_of_map_alloc, 1408 .map_free = array_of_map_free, 1409 .map_get_next_key = array_map_get_next_key, 1410 .map_lookup_elem = array_of_map_lookup_elem, 1411 .map_delete_elem = fd_array_map_delete_elem, 1412 .map_fd_get_ptr = bpf_map_fd_get_ptr, 1413 .map_fd_put_ptr = bpf_map_fd_put_ptr, 1414 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 1415 .map_gen_lookup = array_of_map_gen_lookup, 1416 .map_lookup_batch = generic_map_lookup_batch, 1417 .map_update_batch = generic_map_update_batch, 1418 .map_check_btf = map_check_no_btf, 1419 .map_mem_usage = array_map_mem_usage, 1420 .map_btf_id = &array_map_btf_ids[0], 1421 }; 1422
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.