1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SLUB: A slab allocator that limits cache line use instead of queuing 4 * objects in per cpu and per node lists. 5 * 6 * The allocator synchronizes using per slab locks or atomic operations 7 * and only uses a centralized lock to manage a pool of partial slabs. 8 * 9 * (C) 2007 SGI, Christoph Lameter 10 * (C) 2011 Linux Foundation, Christoph Lameter 11 */ 12 13 #include <linux/mm.h> 14 #include <linux/swap.h> /* mm_account_reclaimed_pages() */ 15 #include <linux/module.h> 16 #include <linux/bit_spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/swab.h> 19 #include <linux/bitops.h> 20 #include <linux/slab.h> 21 #include "slab.h" 22 #include <linux/proc_fs.h> 23 #include <linux/seq_file.h> 24 #include <linux/kasan.h> 25 #include <linux/kmsan.h> 26 #include <linux/cpu.h> 27 #include <linux/cpuset.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ctype.h> 30 #include <linux/stackdepot.h> 31 #include <linux/debugobjects.h> 32 #include <linux/kallsyms.h> 33 #include <linux/kfence.h> 34 #include <linux/memory.h> 35 #include <linux/math64.h> 36 #include <linux/fault-inject.h> 37 #include <linux/kmemleak.h> 38 #include <linux/stacktrace.h> 39 #include <linux/prefetch.h> 40 #include <linux/memcontrol.h> 41 #include <linux/random.h> 42 #include <kunit/test.h> 43 #include <kunit/test-bug.h> 44 #include <linux/sort.h> 45 46 #include <linux/debugfs.h> 47 #include <trace/events/kmem.h> 48 49 #include "internal.h" 50 51 /* 52 * Lock order: 53 * 1. slab_mutex (Global Mutex) 54 * 2. node->list_lock (Spinlock) 55 * 3. kmem_cache->cpu_slab->lock (Local lock) 56 * 4. slab_lock(slab) (Only on some arches) 57 * 5. object_map_lock (Only for debugging) 58 * 59 * slab_mutex 60 * 61 * The role of the slab_mutex is to protect the list of all the slabs 62 * and to synchronize major metadata changes to slab cache structures. 63 * Also synchronizes memory hotplug callbacks. 64 * 65 * slab_lock 66 * 67 * The slab_lock is a wrapper around the page lock, thus it is a bit 68 * spinlock. 69 * 70 * The slab_lock is only used on arches that do not have the ability 71 * to do a cmpxchg_double. It only protects: 72 * 73 * A. slab->freelist -> List of free objects in a slab 74 * B. slab->inuse -> Number of objects in use 75 * C. slab->objects -> Number of objects in slab 76 * D. slab->frozen -> frozen state 77 * 78 * Frozen slabs 79 * 80 * If a slab is frozen then it is exempt from list management. It is 81 * the cpu slab which is actively allocated from by the processor that 82 * froze it and it is not on any list. The processor that froze the 83 * slab is the one who can perform list operations on the slab. Other 84 * processors may put objects onto the freelist but the processor that 85 * froze the slab is the only one that can retrieve the objects from the 86 * slab's freelist. 87 * 88 * CPU partial slabs 89 * 90 * The partially empty slabs cached on the CPU partial list are used 91 * for performance reasons, which speeds up the allocation process. 92 * These slabs are not frozen, but are also exempt from list management, 93 * by clearing the PG_workingset flag when moving out of the node 94 * partial list. Please see __slab_free() for more details. 95 * 96 * To sum up, the current scheme is: 97 * - node partial slab: PG_Workingset && !frozen 98 * - cpu partial slab: !PG_Workingset && !frozen 99 * - cpu slab: !PG_Workingset && frozen 100 * - full slab: !PG_Workingset && !frozen 101 * 102 * list_lock 103 * 104 * The list_lock protects the partial and full list on each node and 105 * the partial slab counter. If taken then no new slabs may be added or 106 * removed from the lists nor make the number of partial slabs be modified. 107 * (Note that the total number of slabs is an atomic value that may be 108 * modified without taking the list lock). 109 * 110 * The list_lock is a centralized lock and thus we avoid taking it as 111 * much as possible. As long as SLUB does not have to handle partial 112 * slabs, operations can continue without any centralized lock. F.e. 113 * allocating a long series of objects that fill up slabs does not require 114 * the list lock. 115 * 116 * For debug caches, all allocations are forced to go through a list_lock 117 * protected region to serialize against concurrent validation. 118 * 119 * cpu_slab->lock local lock 120 * 121 * This locks protect slowpath manipulation of all kmem_cache_cpu fields 122 * except the stat counters. This is a percpu structure manipulated only by 123 * the local cpu, so the lock protects against being preempted or interrupted 124 * by an irq. Fast path operations rely on lockless operations instead. 125 * 126 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption 127 * which means the lockless fastpath cannot be used as it might interfere with 128 * an in-progress slow path operations. In this case the local lock is always 129 * taken but it still utilizes the freelist for the common operations. 130 * 131 * lockless fastpaths 132 * 133 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free()) 134 * are fully lockless when satisfied from the percpu slab (and when 135 * cmpxchg_double is possible to use, otherwise slab_lock is taken). 136 * They also don't disable preemption or migration or irqs. They rely on 137 * the transaction id (tid) field to detect being preempted or moved to 138 * another cpu. 139 * 140 * irq, preemption, migration considerations 141 * 142 * Interrupts are disabled as part of list_lock or local_lock operations, or 143 * around the slab_lock operation, in order to make the slab allocator safe 144 * to use in the context of an irq. 145 * 146 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the 147 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the 148 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer 149 * doesn't have to be revalidated in each section protected by the local lock. 150 * 151 * SLUB assigns one slab for allocation to each processor. 152 * Allocations only occur from these slabs called cpu slabs. 153 * 154 * Slabs with free elements are kept on a partial list and during regular 155 * operations no list for full slabs is used. If an object in a full slab is 156 * freed then the slab will show up again on the partial lists. 157 * We track full slabs for debugging purposes though because otherwise we 158 * cannot scan all objects. 159 * 160 * Slabs are freed when they become empty. Teardown and setup is 161 * minimal so we rely on the page allocators per cpu caches for 162 * fast frees and allocs. 163 * 164 * slab->frozen The slab is frozen and exempt from list processing. 165 * This means that the slab is dedicated to a purpose 166 * such as satisfying allocations for a specific 167 * processor. Objects may be freed in the slab while 168 * it is frozen but slab_free will then skip the usual 169 * list operations. It is up to the processor holding 170 * the slab to integrate the slab into the slab lists 171 * when the slab is no longer needed. 172 * 173 * One use of this flag is to mark slabs that are 174 * used for allocations. Then such a slab becomes a cpu 175 * slab. The cpu slab may be equipped with an additional 176 * freelist that allows lockless access to 177 * free objects in addition to the regular freelist 178 * that requires the slab lock. 179 * 180 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug 181 * options set. This moves slab handling out of 182 * the fast path and disables lockless freelists. 183 */ 184 185 /* 186 * We could simply use migrate_disable()/enable() but as long as it's a 187 * function call even on !PREEMPT_RT, use inline preempt_disable() there. 188 */ 189 #ifndef CONFIG_PREEMPT_RT 190 #define slub_get_cpu_ptr(var) get_cpu_ptr(var) 191 #define slub_put_cpu_ptr(var) put_cpu_ptr(var) 192 #define USE_LOCKLESS_FAST_PATH() (true) 193 #else 194 #define slub_get_cpu_ptr(var) \ 195 ({ \ 196 migrate_disable(); \ 197 this_cpu_ptr(var); \ 198 }) 199 #define slub_put_cpu_ptr(var) \ 200 do { \ 201 (void)(var); \ 202 migrate_enable(); \ 203 } while (0) 204 #define USE_LOCKLESS_FAST_PATH() (false) 205 #endif 206 207 #ifndef CONFIG_SLUB_TINY 208 #define __fastpath_inline __always_inline 209 #else 210 #define __fastpath_inline 211 #endif 212 213 #ifdef CONFIG_SLUB_DEBUG 214 #ifdef CONFIG_SLUB_DEBUG_ON 215 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); 216 #else 217 DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); 218 #endif 219 #endif /* CONFIG_SLUB_DEBUG */ 220 221 /* Structure holding parameters for get_partial() call chain */ 222 struct partial_context { 223 gfp_t flags; 224 unsigned int orig_size; 225 void *object; 226 }; 227 228 static inline bool kmem_cache_debug(struct kmem_cache *s) 229 { 230 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); 231 } 232 233 static inline bool slub_debug_orig_size(struct kmem_cache *s) 234 { 235 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && 236 (s->flags & SLAB_KMALLOC)); 237 } 238 239 void *fixup_red_left(struct kmem_cache *s, void *p) 240 { 241 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) 242 p += s->red_left_pad; 243 244 return p; 245 } 246 247 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) 248 { 249 #ifdef CONFIG_SLUB_CPU_PARTIAL 250 return !kmem_cache_debug(s); 251 #else 252 return false; 253 #endif 254 } 255 256 /* 257 * Issues still to be resolved: 258 * 259 * - Support PAGE_ALLOC_DEBUG. Should be easy to do. 260 * 261 * - Variable sizing of the per node arrays 262 */ 263 264 /* Enable to log cmpxchg failures */ 265 #undef SLUB_DEBUG_CMPXCHG 266 267 #ifndef CONFIG_SLUB_TINY 268 /* 269 * Minimum number of partial slabs. These will be left on the partial 270 * lists even if they are empty. kmem_cache_shrink may reclaim them. 271 */ 272 #define MIN_PARTIAL 5 273 274 /* 275 * Maximum number of desirable partial slabs. 276 * The existence of more partial slabs makes kmem_cache_shrink 277 * sort the partial list by the number of objects in use. 278 */ 279 #define MAX_PARTIAL 10 280 #else 281 #define MIN_PARTIAL 0 282 #define MAX_PARTIAL 0 283 #endif 284 285 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ 286 SLAB_POISON | SLAB_STORE_USER) 287 288 /* 289 * These debug flags cannot use CMPXCHG because there might be consistency 290 * issues when checking or reading debug information 291 */ 292 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ 293 SLAB_TRACE) 294 295 296 /* 297 * Debugging flags that require metadata to be stored in the slab. These get 298 * disabled when slab_debug=O is used and a cache's min order increases with 299 * metadata. 300 */ 301 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 302 303 #define OO_SHIFT 16 304 #define OO_MASK ((1 << OO_SHIFT) - 1) 305 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */ 306 307 /* Internal SLUB flags */ 308 /* Poison object */ 309 #define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON) 310 /* Use cmpxchg_double */ 311 312 #ifdef system_has_freelist_aba 313 #define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE) 314 #else 315 #define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED 316 #endif 317 318 /* 319 * Tracking user of a slab. 320 */ 321 #define TRACK_ADDRS_COUNT 16 322 struct track { 323 unsigned long addr; /* Called from address */ 324 #ifdef CONFIG_STACKDEPOT 325 depot_stack_handle_t handle; 326 #endif 327 int cpu; /* Was running on cpu */ 328 int pid; /* Pid context */ 329 unsigned long when; /* When did the operation occur */ 330 }; 331 332 enum track_item { TRACK_ALLOC, TRACK_FREE }; 333 334 #ifdef SLAB_SUPPORTS_SYSFS 335 static int sysfs_slab_add(struct kmem_cache *); 336 static int sysfs_slab_alias(struct kmem_cache *, const char *); 337 #else 338 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 339 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 340 { return 0; } 341 #endif 342 343 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) 344 static void debugfs_slab_add(struct kmem_cache *); 345 #else 346 static inline void debugfs_slab_add(struct kmem_cache *s) { } 347 #endif 348 349 enum stat_item { 350 ALLOC_FASTPATH, /* Allocation from cpu slab */ 351 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ 352 FREE_FASTPATH, /* Free to cpu slab */ 353 FREE_SLOWPATH, /* Freeing not to cpu slab */ 354 FREE_FROZEN, /* Freeing to frozen slab */ 355 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 356 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 357 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ 358 ALLOC_SLAB, /* Cpu slab acquired from page allocator */ 359 ALLOC_REFILL, /* Refill cpu slab from slab freelist */ 360 ALLOC_NODE_MISMATCH, /* Switching cpu slab */ 361 FREE_SLAB, /* Slab freed to the page allocator */ 362 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 363 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 364 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 365 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 366 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 367 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 368 DEACTIVATE_BYPASS, /* Implicit deactivation */ 369 ORDER_FALLBACK, /* Number of times fallback was necessary */ 370 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */ 371 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */ 372 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ 373 CPU_PARTIAL_FREE, /* Refill cpu partial on free */ 374 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ 375 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ 376 NR_SLUB_STAT_ITEMS 377 }; 378 379 #ifndef CONFIG_SLUB_TINY 380 /* 381 * When changing the layout, make sure freelist and tid are still compatible 382 * with this_cpu_cmpxchg_double() alignment requirements. 383 */ 384 struct kmem_cache_cpu { 385 union { 386 struct { 387 void **freelist; /* Pointer to next available object */ 388 unsigned long tid; /* Globally unique transaction id */ 389 }; 390 freelist_aba_t freelist_tid; 391 }; 392 struct slab *slab; /* The slab from which we are allocating */ 393 #ifdef CONFIG_SLUB_CPU_PARTIAL 394 struct slab *partial; /* Partially allocated slabs */ 395 #endif 396 local_lock_t lock; /* Protects the fields above */ 397 #ifdef CONFIG_SLUB_STATS 398 unsigned int stat[NR_SLUB_STAT_ITEMS]; 399 #endif 400 }; 401 #endif /* CONFIG_SLUB_TINY */ 402 403 static inline void stat(const struct kmem_cache *s, enum stat_item si) 404 { 405 #ifdef CONFIG_SLUB_STATS 406 /* 407 * The rmw is racy on a preemptible kernel but this is acceptable, so 408 * avoid this_cpu_add()'s irq-disable overhead. 409 */ 410 raw_cpu_inc(s->cpu_slab->stat[si]); 411 #endif 412 } 413 414 static inline 415 void stat_add(const struct kmem_cache *s, enum stat_item si, int v) 416 { 417 #ifdef CONFIG_SLUB_STATS 418 raw_cpu_add(s->cpu_slab->stat[si], v); 419 #endif 420 } 421 422 /* 423 * The slab lists for all objects. 424 */ 425 struct kmem_cache_node { 426 spinlock_t list_lock; 427 unsigned long nr_partial; 428 struct list_head partial; 429 #ifdef CONFIG_SLUB_DEBUG 430 atomic_long_t nr_slabs; 431 atomic_long_t total_objects; 432 struct list_head full; 433 #endif 434 }; 435 436 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 437 { 438 return s->node[node]; 439 } 440 441 /* 442 * Iterator over all nodes. The body will be executed for each node that has 443 * a kmem_cache_node structure allocated (which is true for all online nodes) 444 */ 445 #define for_each_kmem_cache_node(__s, __node, __n) \ 446 for (__node = 0; __node < nr_node_ids; __node++) \ 447 if ((__n = get_node(__s, __node))) 448 449 /* 450 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated. 451 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily 452 * differ during memory hotplug/hotremove operations. 453 * Protected by slab_mutex. 454 */ 455 static nodemask_t slab_nodes; 456 457 #ifndef CONFIG_SLUB_TINY 458 /* 459 * Workqueue used for flush_cpu_slab(). 460 */ 461 static struct workqueue_struct *flushwq; 462 #endif 463 464 /******************************************************************** 465 * Core slab cache functions 466 *******************************************************************/ 467 468 /* 469 * freeptr_t represents a SLUB freelist pointer, which might be encoded 470 * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. 471 */ 472 typedef struct { unsigned long v; } freeptr_t; 473 474 /* 475 * Returns freelist pointer (ptr). With hardening, this is obfuscated 476 * with an XOR of the address where the pointer is held and a per-cache 477 * random number. 478 */ 479 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, 480 void *ptr, unsigned long ptr_addr) 481 { 482 unsigned long encoded; 483 484 #ifdef CONFIG_SLAB_FREELIST_HARDENED 485 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); 486 #else 487 encoded = (unsigned long)ptr; 488 #endif 489 return (freeptr_t){.v = encoded}; 490 } 491 492 static inline void *freelist_ptr_decode(const struct kmem_cache *s, 493 freeptr_t ptr, unsigned long ptr_addr) 494 { 495 void *decoded; 496 497 #ifdef CONFIG_SLAB_FREELIST_HARDENED 498 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); 499 #else 500 decoded = (void *)ptr.v; 501 #endif 502 return decoded; 503 } 504 505 static inline void *get_freepointer(struct kmem_cache *s, void *object) 506 { 507 unsigned long ptr_addr; 508 freeptr_t p; 509 510 object = kasan_reset_tag(object); 511 ptr_addr = (unsigned long)object + s->offset; 512 p = *(freeptr_t *)(ptr_addr); 513 return freelist_ptr_decode(s, p, ptr_addr); 514 } 515 516 #ifndef CONFIG_SLUB_TINY 517 static void prefetch_freepointer(const struct kmem_cache *s, void *object) 518 { 519 prefetchw(object + s->offset); 520 } 521 #endif 522 523 /* 524 * When running under KMSAN, get_freepointer_safe() may return an uninitialized 525 * pointer value in the case the current thread loses the race for the next 526 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 527 * slab_alloc_node() will fail, so the uninitialized value won't be used, but 528 * KMSAN will still check all arguments of cmpxchg because of imperfect 529 * handling of inline assembly. 530 * To work around this problem, we apply __no_kmsan_checks to ensure that 531 * get_freepointer_safe() returns initialized memory. 532 */ 533 __no_kmsan_checks 534 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) 535 { 536 unsigned long freepointer_addr; 537 freeptr_t p; 538 539 if (!debug_pagealloc_enabled_static()) 540 return get_freepointer(s, object); 541 542 object = kasan_reset_tag(object); 543 freepointer_addr = (unsigned long)object + s->offset; 544 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p)); 545 return freelist_ptr_decode(s, p, freepointer_addr); 546 } 547 548 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) 549 { 550 unsigned long freeptr_addr = (unsigned long)object + s->offset; 551 552 #ifdef CONFIG_SLAB_FREELIST_HARDENED 553 BUG_ON(object == fp); /* naive detection of double free or corruption */ 554 #endif 555 556 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr); 557 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); 558 } 559 560 /* 561 * See comment in calculate_sizes(). 562 */ 563 static inline bool freeptr_outside_object(struct kmem_cache *s) 564 { 565 return s->offset >= s->inuse; 566 } 567 568 /* 569 * Return offset of the end of info block which is inuse + free pointer if 570 * not overlapping with object. 571 */ 572 static inline unsigned int get_info_end(struct kmem_cache *s) 573 { 574 if (freeptr_outside_object(s)) 575 return s->inuse + sizeof(void *); 576 else 577 return s->inuse; 578 } 579 580 /* Loop over all objects in a slab */ 581 #define for_each_object(__p, __s, __addr, __objects) \ 582 for (__p = fixup_red_left(__s, __addr); \ 583 __p < (__addr) + (__objects) * (__s)->size; \ 584 __p += (__s)->size) 585 586 static inline unsigned int order_objects(unsigned int order, unsigned int size) 587 { 588 return ((unsigned int)PAGE_SIZE << order) / size; 589 } 590 591 static inline struct kmem_cache_order_objects oo_make(unsigned int order, 592 unsigned int size) 593 { 594 struct kmem_cache_order_objects x = { 595 (order << OO_SHIFT) + order_objects(order, size) 596 }; 597 598 return x; 599 } 600 601 static inline unsigned int oo_order(struct kmem_cache_order_objects x) 602 { 603 return x.x >> OO_SHIFT; 604 } 605 606 static inline unsigned int oo_objects(struct kmem_cache_order_objects x) 607 { 608 return x.x & OO_MASK; 609 } 610 611 #ifdef CONFIG_SLUB_CPU_PARTIAL 612 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 613 { 614 unsigned int nr_slabs; 615 616 s->cpu_partial = nr_objects; 617 618 /* 619 * We take the number of objects but actually limit the number of 620 * slabs on the per cpu partial list, in order to limit excessive 621 * growth of the list. For simplicity we assume that the slabs will 622 * be half-full. 623 */ 624 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); 625 s->cpu_partial_slabs = nr_slabs; 626 } 627 628 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 629 { 630 return s->cpu_partial_slabs; 631 } 632 #else 633 static inline void 634 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) 635 { 636 } 637 638 static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s) 639 { 640 return 0; 641 } 642 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 643 644 /* 645 * Per slab locking using the pagelock 646 */ 647 static __always_inline void slab_lock(struct slab *slab) 648 { 649 bit_spin_lock(PG_locked, &slab->__page_flags); 650 } 651 652 static __always_inline void slab_unlock(struct slab *slab) 653 { 654 bit_spin_unlock(PG_locked, &slab->__page_flags); 655 } 656 657 static inline bool 658 __update_freelist_fast(struct slab *slab, 659 void *freelist_old, unsigned long counters_old, 660 void *freelist_new, unsigned long counters_new) 661 { 662 #ifdef system_has_freelist_aba 663 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old }; 664 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new }; 665 666 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); 667 #else 668 return false; 669 #endif 670 } 671 672 static inline bool 673 __update_freelist_slow(struct slab *slab, 674 void *freelist_old, unsigned long counters_old, 675 void *freelist_new, unsigned long counters_new) 676 { 677 bool ret = false; 678 679 slab_lock(slab); 680 if (slab->freelist == freelist_old && 681 slab->counters == counters_old) { 682 slab->freelist = freelist_new; 683 slab->counters = counters_new; 684 ret = true; 685 } 686 slab_unlock(slab); 687 688 return ret; 689 } 690 691 /* 692 * Interrupts must be disabled (for the fallback code to work right), typically 693 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is 694 * part of bit_spin_lock(), is sufficient because the policy is not to allow any 695 * allocation/ free operation in hardirq context. Therefore nothing can 696 * interrupt the operation. 697 */ 698 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, 699 void *freelist_old, unsigned long counters_old, 700 void *freelist_new, unsigned long counters_new, 701 const char *n) 702 { 703 bool ret; 704 705 if (USE_LOCKLESS_FAST_PATH()) 706 lockdep_assert_irqs_disabled(); 707 708 if (s->flags & __CMPXCHG_DOUBLE) { 709 ret = __update_freelist_fast(slab, freelist_old, counters_old, 710 freelist_new, counters_new); 711 } else { 712 ret = __update_freelist_slow(slab, freelist_old, counters_old, 713 freelist_new, counters_new); 714 } 715 if (likely(ret)) 716 return true; 717 718 cpu_relax(); 719 stat(s, CMPXCHG_DOUBLE_FAIL); 720 721 #ifdef SLUB_DEBUG_CMPXCHG 722 pr_info("%s %s: cmpxchg double redo ", n, s->name); 723 #endif 724 725 return false; 726 } 727 728 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, 729 void *freelist_old, unsigned long counters_old, 730 void *freelist_new, unsigned long counters_new, 731 const char *n) 732 { 733 bool ret; 734 735 if (s->flags & __CMPXCHG_DOUBLE) { 736 ret = __update_freelist_fast(slab, freelist_old, counters_old, 737 freelist_new, counters_new); 738 } else { 739 unsigned long flags; 740 741 local_irq_save(flags); 742 ret = __update_freelist_slow(slab, freelist_old, counters_old, 743 freelist_new, counters_new); 744 local_irq_restore(flags); 745 } 746 if (likely(ret)) 747 return true; 748 749 cpu_relax(); 750 stat(s, CMPXCHG_DOUBLE_FAIL); 751 752 #ifdef SLUB_DEBUG_CMPXCHG 753 pr_info("%s %s: cmpxchg double redo ", n, s->name); 754 #endif 755 756 return false; 757 } 758 759 /* 760 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API 761 * family will round up the real request size to these fixed ones, so 762 * there could be an extra area than what is requested. Save the original 763 * request size in the meta data area, for better debug and sanity check. 764 */ 765 static inline void set_orig_size(struct kmem_cache *s, 766 void *object, unsigned int orig_size) 767 { 768 void *p = kasan_reset_tag(object); 769 unsigned int kasan_meta_size; 770 771 if (!slub_debug_orig_size(s)) 772 return; 773 774 /* 775 * KASAN can save its free meta data inside of the object at offset 0. 776 * If this meta data size is larger than 'orig_size', it will overlap 777 * the data redzone in [orig_size+1, object_size]. Thus, we adjust 778 * 'orig_size' to be as at least as big as KASAN's meta data. 779 */ 780 kasan_meta_size = kasan_metadata_size(s, true); 781 if (kasan_meta_size > orig_size) 782 orig_size = kasan_meta_size; 783 784 p += get_info_end(s); 785 p += sizeof(struct track) * 2; 786 787 *(unsigned int *)p = orig_size; 788 } 789 790 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) 791 { 792 void *p = kasan_reset_tag(object); 793 794 if (!slub_debug_orig_size(s)) 795 return s->object_size; 796 797 p += get_info_end(s); 798 p += sizeof(struct track) * 2; 799 800 return *(unsigned int *)p; 801 } 802 803 #ifdef CONFIG_SLUB_DEBUG 804 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; 805 static DEFINE_SPINLOCK(object_map_lock); 806 807 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, 808 struct slab *slab) 809 { 810 void *addr = slab_address(slab); 811 void *p; 812 813 bitmap_zero(obj_map, slab->objects); 814 815 for (p = slab->freelist; p; p = get_freepointer(s, p)) 816 set_bit(__obj_to_index(s, addr, p), obj_map); 817 } 818 819 #if IS_ENABLED(CONFIG_KUNIT) 820 static bool slab_add_kunit_errors(void) 821 { 822 struct kunit_resource *resource; 823 824 if (!kunit_get_current_test()) 825 return false; 826 827 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 828 if (!resource) 829 return false; 830 831 (*(int *)resource->data)++; 832 kunit_put_resource(resource); 833 return true; 834 } 835 836 static bool slab_in_kunit_test(void) 837 { 838 struct kunit_resource *resource; 839 840 if (!kunit_get_current_test()) 841 return false; 842 843 resource = kunit_find_named_resource(current->kunit_test, "slab_errors"); 844 if (!resource) 845 return false; 846 847 kunit_put_resource(resource); 848 return true; 849 } 850 #else 851 static inline bool slab_add_kunit_errors(void) { return false; } 852 static inline bool slab_in_kunit_test(void) { return false; } 853 #endif 854 855 static inline unsigned int size_from_object(struct kmem_cache *s) 856 { 857 if (s->flags & SLAB_RED_ZONE) 858 return s->size - s->red_left_pad; 859 860 return s->size; 861 } 862 863 static inline void *restore_red_left(struct kmem_cache *s, void *p) 864 { 865 if (s->flags & SLAB_RED_ZONE) 866 p -= s->red_left_pad; 867 868 return p; 869 } 870 871 /* 872 * Debug settings: 873 */ 874 #if defined(CONFIG_SLUB_DEBUG_ON) 875 static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 876 #else 877 static slab_flags_t slub_debug; 878 #endif 879 880 static char *slub_debug_string; 881 static int disable_higher_order_debug; 882 883 /* 884 * slub is about to manipulate internal object metadata. This memory lies 885 * outside the range of the allocated object, so accessing it would normally 886 * be reported by kasan as a bounds error. metadata_access_enable() is used 887 * to tell kasan that these accesses are OK. 888 */ 889 static inline void metadata_access_enable(void) 890 { 891 kasan_disable_current(); 892 kmsan_disable_current(); 893 } 894 895 static inline void metadata_access_disable(void) 896 { 897 kmsan_enable_current(); 898 kasan_enable_current(); 899 } 900 901 /* 902 * Object debugging 903 */ 904 905 /* Verify that a pointer has an address that is valid within a slab page */ 906 static inline int check_valid_pointer(struct kmem_cache *s, 907 struct slab *slab, void *object) 908 { 909 void *base; 910 911 if (!object) 912 return 1; 913 914 base = slab_address(slab); 915 object = kasan_reset_tag(object); 916 object = restore_red_left(s, object); 917 if (object < base || object >= base + slab->objects * s->size || 918 (object - base) % s->size) { 919 return 0; 920 } 921 922 return 1; 923 } 924 925 static void print_section(char *level, char *text, u8 *addr, 926 unsigned int length) 927 { 928 metadata_access_enable(); 929 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 930 16, 1, kasan_reset_tag((void *)addr), length, 1); 931 metadata_access_disable(); 932 } 933 934 static struct track *get_track(struct kmem_cache *s, void *object, 935 enum track_item alloc) 936 { 937 struct track *p; 938 939 p = object + get_info_end(s); 940 941 return kasan_reset_tag(p + alloc); 942 } 943 944 #ifdef CONFIG_STACKDEPOT 945 static noinline depot_stack_handle_t set_track_prepare(void) 946 { 947 depot_stack_handle_t handle; 948 unsigned long entries[TRACK_ADDRS_COUNT]; 949 unsigned int nr_entries; 950 951 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 952 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); 953 954 return handle; 955 } 956 #else 957 static inline depot_stack_handle_t set_track_prepare(void) 958 { 959 return 0; 960 } 961 #endif 962 963 static void set_track_update(struct kmem_cache *s, void *object, 964 enum track_item alloc, unsigned long addr, 965 depot_stack_handle_t handle) 966 { 967 struct track *p = get_track(s, object, alloc); 968 969 #ifdef CONFIG_STACKDEPOT 970 p->handle = handle; 971 #endif 972 p->addr = addr; 973 p->cpu = smp_processor_id(); 974 p->pid = current->pid; 975 p->when = jiffies; 976 } 977 978 static __always_inline void set_track(struct kmem_cache *s, void *object, 979 enum track_item alloc, unsigned long addr) 980 { 981 depot_stack_handle_t handle = set_track_prepare(); 982 983 set_track_update(s, object, alloc, addr, handle); 984 } 985 986 static void init_tracking(struct kmem_cache *s, void *object) 987 { 988 struct track *p; 989 990 if (!(s->flags & SLAB_STORE_USER)) 991 return; 992 993 p = get_track(s, object, TRACK_ALLOC); 994 memset(p, 0, 2*sizeof(struct track)); 995 } 996 997 static void print_track(const char *s, struct track *t, unsigned long pr_time) 998 { 999 depot_stack_handle_t handle __maybe_unused; 1000 1001 if (!t->addr) 1002 return; 1003 1004 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", 1005 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); 1006 #ifdef CONFIG_STACKDEPOT 1007 handle = READ_ONCE(t->handle); 1008 if (handle) 1009 stack_depot_print(handle); 1010 else 1011 pr_err("object allocation/free stack trace missing\n"); 1012 #endif 1013 } 1014 1015 void print_tracking(struct kmem_cache *s, void *object) 1016 { 1017 unsigned long pr_time = jiffies; 1018 if (!(s->flags & SLAB_STORE_USER)) 1019 return; 1020 1021 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); 1022 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); 1023 } 1024 1025 static void print_slab_info(const struct slab *slab) 1026 { 1027 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n", 1028 slab, slab->objects, slab->inuse, slab->freelist, 1029 &slab->__page_flags); 1030 } 1031 1032 void skip_orig_size_check(struct kmem_cache *s, const void *object) 1033 { 1034 set_orig_size(s, (void *)object, s->object_size); 1035 } 1036 1037 static void slab_bug(struct kmem_cache *s, char *fmt, ...) 1038 { 1039 struct va_format vaf; 1040 va_list args; 1041 1042 va_start(args, fmt); 1043 vaf.fmt = fmt; 1044 vaf.va = &args; 1045 pr_err("=============================================================================\n"); 1046 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); 1047 pr_err("-----------------------------------------------------------------------------\n\n"); 1048 va_end(args); 1049 } 1050 1051 __printf(2, 3) 1052 static void slab_fix(struct kmem_cache *s, char *fmt, ...) 1053 { 1054 struct va_format vaf; 1055 va_list args; 1056 1057 if (slab_add_kunit_errors()) 1058 return; 1059 1060 va_start(args, fmt); 1061 vaf.fmt = fmt; 1062 vaf.va = &args; 1063 pr_err("FIX %s: %pV\n", s->name, &vaf); 1064 va_end(args); 1065 } 1066 1067 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) 1068 { 1069 unsigned int off; /* Offset of last byte */ 1070 u8 *addr = slab_address(slab); 1071 1072 print_tracking(s, p); 1073 1074 print_slab_info(slab); 1075 1076 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n", 1077 p, p - addr, get_freepointer(s, p)); 1078 1079 if (s->flags & SLAB_RED_ZONE) 1080 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, 1081 s->red_left_pad); 1082 else if (p > addr + 16) 1083 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); 1084 1085 print_section(KERN_ERR, "Object ", p, 1086 min_t(unsigned int, s->object_size, PAGE_SIZE)); 1087 if (s->flags & SLAB_RED_ZONE) 1088 print_section(KERN_ERR, "Redzone ", p + s->object_size, 1089 s->inuse - s->object_size); 1090 1091 off = get_info_end(s); 1092 1093 if (s->flags & SLAB_STORE_USER) 1094 off += 2 * sizeof(struct track); 1095 1096 if (slub_debug_orig_size(s)) 1097 off += sizeof(unsigned int); 1098 1099 off += kasan_metadata_size(s, false); 1100 1101 if (off != size_from_object(s)) 1102 /* Beginning of the filler is the free pointer */ 1103 print_section(KERN_ERR, "Padding ", p + off, 1104 size_from_object(s) - off); 1105 1106 dump_stack(); 1107 } 1108 1109 static void object_err(struct kmem_cache *s, struct slab *slab, 1110 u8 *object, char *reason) 1111 { 1112 if (slab_add_kunit_errors()) 1113 return; 1114 1115 slab_bug(s, "%s", reason); 1116 print_trailer(s, slab, object); 1117 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1118 } 1119 1120 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1121 void **freelist, void *nextfree) 1122 { 1123 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && 1124 !check_valid_pointer(s, slab, nextfree) && freelist) { 1125 object_err(s, slab, *freelist, "Freechain corrupt"); 1126 *freelist = NULL; 1127 slab_fix(s, "Isolate corrupted freechain"); 1128 return true; 1129 } 1130 1131 return false; 1132 } 1133 1134 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, 1135 const char *fmt, ...) 1136 { 1137 va_list args; 1138 char buf[100]; 1139 1140 if (slab_add_kunit_errors()) 1141 return; 1142 1143 va_start(args, fmt); 1144 vsnprintf(buf, sizeof(buf), fmt, args); 1145 va_end(args); 1146 slab_bug(s, "%s", buf); 1147 print_slab_info(slab); 1148 dump_stack(); 1149 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1150 } 1151 1152 static void init_object(struct kmem_cache *s, void *object, u8 val) 1153 { 1154 u8 *p = kasan_reset_tag(object); 1155 unsigned int poison_size = s->object_size; 1156 1157 if (s->flags & SLAB_RED_ZONE) { 1158 /* 1159 * Here and below, avoid overwriting the KMSAN shadow. Keeping 1160 * the shadow makes it possible to distinguish uninit-value 1161 * from use-after-free. 1162 */ 1163 memset_no_sanitize_memory(p - s->red_left_pad, val, 1164 s->red_left_pad); 1165 1166 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1167 /* 1168 * Redzone the extra allocated space by kmalloc than 1169 * requested, and the poison size will be limited to 1170 * the original request size accordingly. 1171 */ 1172 poison_size = get_orig_size(s, object); 1173 } 1174 } 1175 1176 if (s->flags & __OBJECT_POISON) { 1177 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1); 1178 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1); 1179 } 1180 1181 if (s->flags & SLAB_RED_ZONE) 1182 memset_no_sanitize_memory(p + poison_size, val, 1183 s->inuse - poison_size); 1184 } 1185 1186 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, 1187 void *from, void *to) 1188 { 1189 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); 1190 memset(from, data, to - from); 1191 } 1192 1193 #ifdef CONFIG_KMSAN 1194 #define pad_check_attributes noinline __no_kmsan_checks 1195 #else 1196 #define pad_check_attributes 1197 #endif 1198 1199 static pad_check_attributes int 1200 check_bytes_and_report(struct kmem_cache *s, struct slab *slab, 1201 u8 *object, char *what, 1202 u8 *start, unsigned int value, unsigned int bytes) 1203 { 1204 u8 *fault; 1205 u8 *end; 1206 u8 *addr = slab_address(slab); 1207 1208 metadata_access_enable(); 1209 fault = memchr_inv(kasan_reset_tag(start), value, bytes); 1210 metadata_access_disable(); 1211 if (!fault) 1212 return 1; 1213 1214 end = start + bytes; 1215 while (end > fault && end[-1] == value) 1216 end--; 1217 1218 if (slab_add_kunit_errors()) 1219 goto skip_bug_print; 1220 1221 slab_bug(s, "%s overwritten", what); 1222 pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", 1223 fault, end - 1, fault - addr, 1224 fault[0], value); 1225 1226 skip_bug_print: 1227 restore_bytes(s, what, value, fault, end); 1228 return 0; 1229 } 1230 1231 /* 1232 * Object layout: 1233 * 1234 * object address 1235 * Bytes of the object to be managed. 1236 * If the freepointer may overlay the object then the free 1237 * pointer is at the middle of the object. 1238 * 1239 * Poisoning uses 0x6b (POISON_FREE) and the last byte is 1240 * 0xa5 (POISON_END) 1241 * 1242 * object + s->object_size 1243 * Padding to reach word boundary. This is also used for Redzoning. 1244 * Padding is extended by another word if Redzoning is enabled and 1245 * object_size == inuse. 1246 * 1247 * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with 1248 * 0xcc (SLUB_RED_ACTIVE) for objects in use. 1249 * 1250 * object + s->inuse 1251 * Meta data starts here. 1252 * 1253 * A. Free pointer (if we cannot overwrite object on free) 1254 * B. Tracking data for SLAB_STORE_USER 1255 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) 1256 * D. Padding to reach required alignment boundary or at minimum 1257 * one word if debugging is on to be able to detect writes 1258 * before the word boundary. 1259 * 1260 * Padding is done using 0x5a (POISON_INUSE) 1261 * 1262 * object + s->size 1263 * Nothing is used beyond s->size. 1264 * 1265 * If slabcaches are merged then the object_size and inuse boundaries are mostly 1266 * ignored. And therefore no slab options that rely on these boundaries 1267 * may be used with merged slabcaches. 1268 */ 1269 1270 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) 1271 { 1272 unsigned long off = get_info_end(s); /* The end of info */ 1273 1274 if (s->flags & SLAB_STORE_USER) { 1275 /* We also have user information there */ 1276 off += 2 * sizeof(struct track); 1277 1278 if (s->flags & SLAB_KMALLOC) 1279 off += sizeof(unsigned int); 1280 } 1281 1282 off += kasan_metadata_size(s, false); 1283 1284 if (size_from_object(s) == off) 1285 return 1; 1286 1287 return check_bytes_and_report(s, slab, p, "Object padding", 1288 p + off, POISON_INUSE, size_from_object(s) - off); 1289 } 1290 1291 /* Check the pad bytes at the end of a slab page */ 1292 static pad_check_attributes void 1293 slab_pad_check(struct kmem_cache *s, struct slab *slab) 1294 { 1295 u8 *start; 1296 u8 *fault; 1297 u8 *end; 1298 u8 *pad; 1299 int length; 1300 int remainder; 1301 1302 if (!(s->flags & SLAB_POISON)) 1303 return; 1304 1305 start = slab_address(slab); 1306 length = slab_size(slab); 1307 end = start + length; 1308 remainder = length % s->size; 1309 if (!remainder) 1310 return; 1311 1312 pad = end - remainder; 1313 metadata_access_enable(); 1314 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder); 1315 metadata_access_disable(); 1316 if (!fault) 1317 return; 1318 while (end > fault && end[-1] == POISON_INUSE) 1319 end--; 1320 1321 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", 1322 fault, end - 1, fault - start); 1323 print_section(KERN_ERR, "Padding ", pad, remainder); 1324 1325 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); 1326 } 1327 1328 static int check_object(struct kmem_cache *s, struct slab *slab, 1329 void *object, u8 val) 1330 { 1331 u8 *p = object; 1332 u8 *endobject = object + s->object_size; 1333 unsigned int orig_size, kasan_meta_size; 1334 int ret = 1; 1335 1336 if (s->flags & SLAB_RED_ZONE) { 1337 if (!check_bytes_and_report(s, slab, object, "Left Redzone", 1338 object - s->red_left_pad, val, s->red_left_pad)) 1339 ret = 0; 1340 1341 if (!check_bytes_and_report(s, slab, object, "Right Redzone", 1342 endobject, val, s->inuse - s->object_size)) 1343 ret = 0; 1344 1345 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { 1346 orig_size = get_orig_size(s, object); 1347 1348 if (s->object_size > orig_size && 1349 !check_bytes_and_report(s, slab, object, 1350 "kmalloc Redzone", p + orig_size, 1351 val, s->object_size - orig_size)) { 1352 ret = 0; 1353 } 1354 } 1355 } else { 1356 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { 1357 if (!check_bytes_and_report(s, slab, p, "Alignment padding", 1358 endobject, POISON_INUSE, 1359 s->inuse - s->object_size)) 1360 ret = 0; 1361 } 1362 } 1363 1364 if (s->flags & SLAB_POISON) { 1365 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) { 1366 /* 1367 * KASAN can save its free meta data inside of the 1368 * object at offset 0. Thus, skip checking the part of 1369 * the redzone that overlaps with the meta data. 1370 */ 1371 kasan_meta_size = kasan_metadata_size(s, true); 1372 if (kasan_meta_size < s->object_size - 1 && 1373 !check_bytes_and_report(s, slab, p, "Poison", 1374 p + kasan_meta_size, POISON_FREE, 1375 s->object_size - kasan_meta_size - 1)) 1376 ret = 0; 1377 if (kasan_meta_size < s->object_size && 1378 !check_bytes_and_report(s, slab, p, "End Poison", 1379 p + s->object_size - 1, POISON_END, 1)) 1380 ret = 0; 1381 } 1382 /* 1383 * check_pad_bytes cleans up on its own. 1384 */ 1385 if (!check_pad_bytes(s, slab, p)) 1386 ret = 0; 1387 } 1388 1389 /* 1390 * Cannot check freepointer while object is allocated if 1391 * object and freepointer overlap. 1392 */ 1393 if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) && 1394 !check_valid_pointer(s, slab, get_freepointer(s, p))) { 1395 object_err(s, slab, p, "Freepointer corrupt"); 1396 /* 1397 * No choice but to zap it and thus lose the remainder 1398 * of the free objects in this slab. May cause 1399 * another error because the object count is now wrong. 1400 */ 1401 set_freepointer(s, p, NULL); 1402 ret = 0; 1403 } 1404 1405 if (!ret && !slab_in_kunit_test()) { 1406 print_trailer(s, slab, object); 1407 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 1408 } 1409 1410 return ret; 1411 } 1412 1413 static int check_slab(struct kmem_cache *s, struct slab *slab) 1414 { 1415 int maxobj; 1416 1417 if (!folio_test_slab(slab_folio(slab))) { 1418 slab_err(s, slab, "Not a valid slab page"); 1419 return 0; 1420 } 1421 1422 maxobj = order_objects(slab_order(slab), s->size); 1423 if (slab->objects > maxobj) { 1424 slab_err(s, slab, "objects %u > max %u", 1425 slab->objects, maxobj); 1426 return 0; 1427 } 1428 if (slab->inuse > slab->objects) { 1429 slab_err(s, slab, "inuse %u > max %u", 1430 slab->inuse, slab->objects); 1431 return 0; 1432 } 1433 /* Slab_pad_check fixes things up after itself */ 1434 slab_pad_check(s, slab); 1435 return 1; 1436 } 1437 1438 /* 1439 * Determine if a certain object in a slab is on the freelist. Must hold the 1440 * slab lock to guarantee that the chains are in a consistent state. 1441 */ 1442 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) 1443 { 1444 int nr = 0; 1445 void *fp; 1446 void *object = NULL; 1447 int max_objects; 1448 1449 fp = slab->freelist; 1450 while (fp && nr <= slab->objects) { 1451 if (fp == search) 1452 return 1; 1453 if (!check_valid_pointer(s, slab, fp)) { 1454 if (object) { 1455 object_err(s, slab, object, 1456 "Freechain corrupt"); 1457 set_freepointer(s, object, NULL); 1458 } else { 1459 slab_err(s, slab, "Freepointer corrupt"); 1460 slab->freelist = NULL; 1461 slab->inuse = slab->objects; 1462 slab_fix(s, "Freelist cleared"); 1463 return 0; 1464 } 1465 break; 1466 } 1467 object = fp; 1468 fp = get_freepointer(s, object); 1469 nr++; 1470 } 1471 1472 max_objects = order_objects(slab_order(slab), s->size); 1473 if (max_objects > MAX_OBJS_PER_PAGE) 1474 max_objects = MAX_OBJS_PER_PAGE; 1475 1476 if (slab->objects != max_objects) { 1477 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", 1478 slab->objects, max_objects); 1479 slab->objects = max_objects; 1480 slab_fix(s, "Number of objects adjusted"); 1481 } 1482 if (slab->inuse != slab->objects - nr) { 1483 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", 1484 slab->inuse, slab->objects - nr); 1485 slab->inuse = slab->objects - nr; 1486 slab_fix(s, "Object count adjusted"); 1487 } 1488 return search == NULL; 1489 } 1490 1491 static void trace(struct kmem_cache *s, struct slab *slab, void *object, 1492 int alloc) 1493 { 1494 if (s->flags & SLAB_TRACE) { 1495 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 1496 s->name, 1497 alloc ? "alloc" : "free", 1498 object, slab->inuse, 1499 slab->freelist); 1500 1501 if (!alloc) 1502 print_section(KERN_INFO, "Object ", (void *)object, 1503 s->object_size); 1504 1505 dump_stack(); 1506 } 1507 } 1508 1509 /* 1510 * Tracking of fully allocated slabs for debugging purposes. 1511 */ 1512 static void add_full(struct kmem_cache *s, 1513 struct kmem_cache_node *n, struct slab *slab) 1514 { 1515 if (!(s->flags & SLAB_STORE_USER)) 1516 return; 1517 1518 lockdep_assert_held(&n->list_lock); 1519 list_add(&slab->slab_list, &n->full); 1520 } 1521 1522 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) 1523 { 1524 if (!(s->flags & SLAB_STORE_USER)) 1525 return; 1526 1527 lockdep_assert_held(&n->list_lock); 1528 list_del(&slab->slab_list); 1529 } 1530 1531 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1532 { 1533 return atomic_long_read(&n->nr_slabs); 1534 } 1535 1536 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) 1537 { 1538 struct kmem_cache_node *n = get_node(s, node); 1539 1540 atomic_long_inc(&n->nr_slabs); 1541 atomic_long_add(objects, &n->total_objects); 1542 } 1543 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) 1544 { 1545 struct kmem_cache_node *n = get_node(s, node); 1546 1547 atomic_long_dec(&n->nr_slabs); 1548 atomic_long_sub(objects, &n->total_objects); 1549 } 1550 1551 /* Object debug checks for alloc/free paths */ 1552 static void setup_object_debug(struct kmem_cache *s, void *object) 1553 { 1554 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) 1555 return; 1556 1557 init_object(s, object, SLUB_RED_INACTIVE); 1558 init_tracking(s, object); 1559 } 1560 1561 static 1562 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) 1563 { 1564 if (!kmem_cache_debug_flags(s, SLAB_POISON)) 1565 return; 1566 1567 metadata_access_enable(); 1568 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab)); 1569 metadata_access_disable(); 1570 } 1571 1572 static inline int alloc_consistency_checks(struct kmem_cache *s, 1573 struct slab *slab, void *object) 1574 { 1575 if (!check_slab(s, slab)) 1576 return 0; 1577 1578 if (!check_valid_pointer(s, slab, object)) { 1579 object_err(s, slab, object, "Freelist Pointer check fails"); 1580 return 0; 1581 } 1582 1583 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) 1584 return 0; 1585 1586 return 1; 1587 } 1588 1589 static noinline bool alloc_debug_processing(struct kmem_cache *s, 1590 struct slab *slab, void *object, int orig_size) 1591 { 1592 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 1593 if (!alloc_consistency_checks(s, slab, object)) 1594 goto bad; 1595 } 1596 1597 /* Success. Perform special debug activities for allocs */ 1598 trace(s, slab, object, 1); 1599 set_orig_size(s, object, orig_size); 1600 init_object(s, object, SLUB_RED_ACTIVE); 1601 return true; 1602 1603 bad: 1604 if (folio_test_slab(slab_folio(slab))) { 1605 /* 1606 * If this is a slab page then lets do the best we can 1607 * to avoid issues in the future. Marking all objects 1608 * as used avoids touching the remaining objects. 1609 */ 1610 slab_fix(s, "Marking all objects used"); 1611 slab->inuse = slab->objects; 1612 slab->freelist = NULL; 1613 } 1614 return false; 1615 } 1616 1617 static inline int free_consistency_checks(struct kmem_cache *s, 1618 struct slab *slab, void *object, unsigned long addr) 1619 { 1620 if (!check_valid_pointer(s, slab, object)) { 1621 slab_err(s, slab, "Invalid object pointer 0x%p", object); 1622 return 0; 1623 } 1624 1625 if (on_freelist(s, slab, object)) { 1626 object_err(s, slab, object, "Object already free"); 1627 return 0; 1628 } 1629 1630 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) 1631 return 0; 1632 1633 if (unlikely(s != slab->slab_cache)) { 1634 if (!folio_test_slab(slab_folio(slab))) { 1635 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", 1636 object); 1637 } else if (!slab->slab_cache) { 1638 pr_err("SLUB <none>: no slab for object 0x%p.\n", 1639 object); 1640 dump_stack(); 1641 } else 1642 object_err(s, slab, object, 1643 "page slab pointer corrupt."); 1644 return 0; 1645 } 1646 return 1; 1647 } 1648 1649 /* 1650 * Parse a block of slab_debug options. Blocks are delimited by ';' 1651 * 1652 * @str: start of block 1653 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified 1654 * @slabs: return start of list of slabs, or NULL when there's no list 1655 * @init: assume this is initial parsing and not per-kmem-create parsing 1656 * 1657 * returns the start of next block if there's any, or NULL 1658 */ 1659 static char * 1660 parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) 1661 { 1662 bool higher_order_disable = false; 1663 1664 /* Skip any completely empty blocks */ 1665 while (*str && *str == ';') 1666 str++; 1667 1668 if (*str == ',') { 1669 /* 1670 * No options but restriction on slabs. This means full 1671 * debugging for slabs matching a pattern. 1672 */ 1673 *flags = DEBUG_DEFAULT_FLAGS; 1674 goto check_slabs; 1675 } 1676 *flags = 0; 1677 1678 /* Determine which debug features should be switched on */ 1679 for (; *str && *str != ',' && *str != ';'; str++) { 1680 switch (tolower(*str)) { 1681 case '-': 1682 *flags = 0; 1683 break; 1684 case 'f': 1685 *flags |= SLAB_CONSISTENCY_CHECKS; 1686 break; 1687 case 'z': 1688 *flags |= SLAB_RED_ZONE; 1689 break; 1690 case 'p': 1691 *flags |= SLAB_POISON; 1692 break; 1693 case 'u': 1694 *flags |= SLAB_STORE_USER; 1695 break; 1696 case 't': 1697 *flags |= SLAB_TRACE; 1698 break; 1699 case 'a': 1700 *flags |= SLAB_FAILSLAB; 1701 break; 1702 case 'o': 1703 /* 1704 * Avoid enabling debugging on caches if its minimum 1705 * order would increase as a result. 1706 */ 1707 higher_order_disable = true; 1708 break; 1709 default: 1710 if (init) 1711 pr_err("slab_debug option '%c' unknown. skipped\n", *str); 1712 } 1713 } 1714 check_slabs: 1715 if (*str == ',') 1716 *slabs = ++str; 1717 else 1718 *slabs = NULL; 1719 1720 /* Skip over the slab list */ 1721 while (*str && *str != ';') 1722 str++; 1723 1724 /* Skip any completely empty blocks */ 1725 while (*str && *str == ';') 1726 str++; 1727 1728 if (init && higher_order_disable) 1729 disable_higher_order_debug = 1; 1730 1731 if (*str) 1732 return str; 1733 else 1734 return NULL; 1735 } 1736 1737 static int __init setup_slub_debug(char *str) 1738 { 1739 slab_flags_t flags; 1740 slab_flags_t global_flags; 1741 char *saved_str; 1742 char *slab_list; 1743 bool global_slub_debug_changed = false; 1744 bool slab_list_specified = false; 1745 1746 global_flags = DEBUG_DEFAULT_FLAGS; 1747 if (*str++ != '=' || !*str) 1748 /* 1749 * No options specified. Switch on full debugging. 1750 */ 1751 goto out; 1752 1753 saved_str = str; 1754 while (str) { 1755 str = parse_slub_debug_flags(str, &flags, &slab_list, true); 1756 1757 if (!slab_list) { 1758 global_flags = flags; 1759 global_slub_debug_changed = true; 1760 } else { 1761 slab_list_specified = true; 1762 if (flags & SLAB_STORE_USER) 1763 stack_depot_request_early_init(); 1764 } 1765 } 1766 1767 /* 1768 * For backwards compatibility, a single list of flags with list of 1769 * slabs means debugging is only changed for those slabs, so the global 1770 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending 1771 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as 1772 * long as there is no option specifying flags without a slab list. 1773 */ 1774 if (slab_list_specified) { 1775 if (!global_slub_debug_changed) 1776 global_flags = slub_debug; 1777 slub_debug_string = saved_str; 1778 } 1779 out: 1780 slub_debug = global_flags; 1781 if (slub_debug & SLAB_STORE_USER) 1782 stack_depot_request_early_init(); 1783 if (slub_debug != 0 || slub_debug_string) 1784 static_branch_enable(&slub_debug_enabled); 1785 else 1786 static_branch_disable(&slub_debug_enabled); 1787 if ((static_branch_unlikely(&init_on_alloc) || 1788 static_branch_unlikely(&init_on_free)) && 1789 (slub_debug & SLAB_POISON)) 1790 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); 1791 return 1; 1792 } 1793 1794 __setup("slab_debug", setup_slub_debug); 1795 __setup_param("slub_debug", slub_debug, setup_slub_debug, 0); 1796 1797 /* 1798 * kmem_cache_flags - apply debugging options to the cache 1799 * @flags: flags to set 1800 * @name: name of the cache 1801 * 1802 * Debug option(s) are applied to @flags. In addition to the debug 1803 * option(s), if a slab name (or multiple) is specified i.e. 1804 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ... 1805 * then only the select slabs will receive the debug option(s). 1806 */ 1807 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1808 { 1809 char *iter; 1810 size_t len; 1811 char *next_block; 1812 slab_flags_t block_flags; 1813 slab_flags_t slub_debug_local = slub_debug; 1814 1815 if (flags & SLAB_NO_USER_FLAGS) 1816 return flags; 1817 1818 /* 1819 * If the slab cache is for debugging (e.g. kmemleak) then 1820 * don't store user (stack trace) information by default, 1821 * but let the user enable it via the command line below. 1822 */ 1823 if (flags & SLAB_NOLEAKTRACE) 1824 slub_debug_local &= ~SLAB_STORE_USER; 1825 1826 len = strlen(name); 1827 next_block = slub_debug_string; 1828 /* Go through all blocks of debug options, see if any matches our slab's name */ 1829 while (next_block) { 1830 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); 1831 if (!iter) 1832 continue; 1833 /* Found a block that has a slab list, search it */ 1834 while (*iter) { 1835 char *end, *glob; 1836 size_t cmplen; 1837 1838 end = strchrnul(iter, ','); 1839 if (next_block && next_block < end) 1840 end = next_block - 1; 1841 1842 glob = strnchr(iter, end - iter, '*'); 1843 if (glob) 1844 cmplen = glob - iter; 1845 else 1846 cmplen = max_t(size_t, len, (end - iter)); 1847 1848 if (!strncmp(name, iter, cmplen)) { 1849 flags |= block_flags; 1850 return flags; 1851 } 1852 1853 if (!*end || *end == ';') 1854 break; 1855 iter = end + 1; 1856 } 1857 } 1858 1859 return flags | slub_debug_local; 1860 } 1861 #else /* !CONFIG_SLUB_DEBUG */ 1862 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} 1863 static inline 1864 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} 1865 1866 static inline bool alloc_debug_processing(struct kmem_cache *s, 1867 struct slab *slab, void *object, int orig_size) { return true; } 1868 1869 static inline bool free_debug_processing(struct kmem_cache *s, 1870 struct slab *slab, void *head, void *tail, int *bulk_cnt, 1871 unsigned long addr, depot_stack_handle_t handle) { return true; } 1872 1873 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} 1874 static inline int check_object(struct kmem_cache *s, struct slab *slab, 1875 void *object, u8 val) { return 1; } 1876 static inline depot_stack_handle_t set_track_prepare(void) { return 0; } 1877 static inline void set_track(struct kmem_cache *s, void *object, 1878 enum track_item alloc, unsigned long addr) {} 1879 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, 1880 struct slab *slab) {} 1881 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1882 struct slab *slab) {} 1883 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name) 1884 { 1885 return flags; 1886 } 1887 #define slub_debug 0 1888 1889 #define disable_higher_order_debug 0 1890 1891 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) 1892 { return 0; } 1893 static inline void inc_slabs_node(struct kmem_cache *s, int node, 1894 int objects) {} 1895 static inline void dec_slabs_node(struct kmem_cache *s, int node, 1896 int objects) {} 1897 #ifndef CONFIG_SLUB_TINY 1898 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, 1899 void **freelist, void *nextfree) 1900 { 1901 return false; 1902 } 1903 #endif 1904 #endif /* CONFIG_SLUB_DEBUG */ 1905 1906 #ifdef CONFIG_SLAB_OBJ_EXT 1907 1908 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG 1909 1910 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) 1911 { 1912 struct slabobj_ext *slab_exts; 1913 struct slab *obj_exts_slab; 1914 1915 obj_exts_slab = virt_to_slab(obj_exts); 1916 slab_exts = slab_obj_exts(obj_exts_slab); 1917 if (slab_exts) { 1918 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, 1919 obj_exts_slab, obj_exts); 1920 /* codetag should be NULL */ 1921 WARN_ON(slab_exts[offs].ref.ct); 1922 set_codetag_empty(&slab_exts[offs].ref); 1923 } 1924 } 1925 1926 static inline void mark_failed_objexts_alloc(struct slab *slab) 1927 { 1928 slab->obj_exts = OBJEXTS_ALLOC_FAIL; 1929 } 1930 1931 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 1932 struct slabobj_ext *vec, unsigned int objects) 1933 { 1934 /* 1935 * If vector previously failed to allocate then we have live 1936 * objects with no tag reference. Mark all references in this 1937 * vector as empty to avoid warnings later on. 1938 */ 1939 if (obj_exts & OBJEXTS_ALLOC_FAIL) { 1940 unsigned int i; 1941 1942 for (i = 0; i < objects; i++) 1943 set_codetag_empty(&vec[i].ref); 1944 } 1945 } 1946 1947 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 1948 1949 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} 1950 static inline void mark_failed_objexts_alloc(struct slab *slab) {} 1951 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 1952 struct slabobj_ext *vec, unsigned int objects) {} 1953 1954 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 1955 1956 /* 1957 * The allocated objcg pointers array is not accounted directly. 1958 * Moreover, it should not come from DMA buffer and is not readily 1959 * reclaimable. So those GFP bits should be masked off. 1960 */ 1961 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ 1962 __GFP_ACCOUNT | __GFP_NOFAIL) 1963 1964 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 1965 gfp_t gfp, bool new_slab) 1966 { 1967 unsigned int objects = objs_per_slab(s, slab); 1968 unsigned long new_exts; 1969 unsigned long old_exts; 1970 struct slabobj_ext *vec; 1971 1972 gfp &= ~OBJCGS_CLEAR_MASK; 1973 /* Prevent recursive extension vector allocation */ 1974 gfp |= __GFP_NO_OBJ_EXT; 1975 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, 1976 slab_nid(slab)); 1977 if (!vec) { 1978 /* Mark vectors which failed to allocate */ 1979 if (new_slab) 1980 mark_failed_objexts_alloc(slab); 1981 1982 return -ENOMEM; 1983 } 1984 1985 new_exts = (unsigned long)vec; 1986 #ifdef CONFIG_MEMCG 1987 new_exts |= MEMCG_DATA_OBJEXTS; 1988 #endif 1989 old_exts = READ_ONCE(slab->obj_exts); 1990 handle_failed_objexts_alloc(old_exts, vec, objects); 1991 if (new_slab) { 1992 /* 1993 * If the slab is brand new and nobody can yet access its 1994 * obj_exts, no synchronization is required and obj_exts can 1995 * be simply assigned. 1996 */ 1997 slab->obj_exts = new_exts; 1998 } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || 1999 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { 2000 /* 2001 * If the slab is already in use, somebody can allocate and 2002 * assign slabobj_exts in parallel. In this case the existing 2003 * objcg vector should be reused. 2004 */ 2005 mark_objexts_empty(vec); 2006 kfree(vec); 2007 return 0; 2008 } 2009 2010 kmemleak_not_leak(vec); 2011 return 0; 2012 } 2013 2014 static inline void free_slab_obj_exts(struct slab *slab) 2015 { 2016 struct slabobj_ext *obj_exts; 2017 2018 obj_exts = slab_obj_exts(slab); 2019 if (!obj_exts) 2020 return; 2021 2022 /* 2023 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its 2024 * corresponding extension will be NULL. alloc_tag_sub() will throw a 2025 * warning if slab has extensions but the extension of an object is 2026 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that 2027 * the extension for obj_exts is expected to be NULL. 2028 */ 2029 mark_objexts_empty(obj_exts); 2030 kfree(obj_exts); 2031 slab->obj_exts = 0; 2032 } 2033 2034 static inline bool need_slab_obj_ext(void) 2035 { 2036 if (mem_alloc_profiling_enabled()) 2037 return true; 2038 2039 /* 2040 * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally 2041 * inside memcg_slab_post_alloc_hook. No other users for now. 2042 */ 2043 return false; 2044 } 2045 2046 #else /* CONFIG_SLAB_OBJ_EXT */ 2047 2048 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, 2049 gfp_t gfp, bool new_slab) 2050 { 2051 return 0; 2052 } 2053 2054 static inline void free_slab_obj_exts(struct slab *slab) 2055 { 2056 } 2057 2058 static inline bool need_slab_obj_ext(void) 2059 { 2060 return false; 2061 } 2062 2063 #endif /* CONFIG_SLAB_OBJ_EXT */ 2064 2065 #ifdef CONFIG_MEM_ALLOC_PROFILING 2066 2067 static inline struct slabobj_ext * 2068 prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 2069 { 2070 struct slab *slab; 2071 2072 if (!p) 2073 return NULL; 2074 2075 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2076 return NULL; 2077 2078 if (flags & __GFP_NO_OBJ_EXT) 2079 return NULL; 2080 2081 slab = virt_to_slab(p); 2082 if (!slab_obj_exts(slab) && 2083 WARN(alloc_slab_obj_exts(slab, s, flags, false), 2084 "%s, %s: Failed to create slab extension vector!\n", 2085 __func__, s->name)) 2086 return NULL; 2087 2088 return slab_obj_exts(slab) + obj_to_index(s, slab, p); 2089 } 2090 2091 static inline void 2092 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2093 { 2094 if (need_slab_obj_ext()) { 2095 struct slabobj_ext *obj_exts; 2096 2097 obj_exts = prepare_slab_obj_exts_hook(s, flags, object); 2098 /* 2099 * Currently obj_exts is used only for allocation profiling. 2100 * If other users appear then mem_alloc_profiling_enabled() 2101 * check should be added before alloc_tag_add(). 2102 */ 2103 if (likely(obj_exts)) 2104 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); 2105 } 2106 } 2107 2108 static inline void 2109 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2110 int objects) 2111 { 2112 struct slabobj_ext *obj_exts; 2113 int i; 2114 2115 if (!mem_alloc_profiling_enabled()) 2116 return; 2117 2118 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ 2119 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) 2120 return; 2121 2122 obj_exts = slab_obj_exts(slab); 2123 if (!obj_exts) 2124 return; 2125 2126 for (i = 0; i < objects; i++) { 2127 unsigned int off = obj_to_index(s, slab, p[i]); 2128 2129 alloc_tag_sub(&obj_exts[off].ref, s->size); 2130 } 2131 } 2132 2133 #else /* CONFIG_MEM_ALLOC_PROFILING */ 2134 2135 static inline void 2136 alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2137 { 2138 } 2139 2140 static inline void 2141 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2142 int objects) 2143 { 2144 } 2145 2146 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 2147 2148 2149 #ifdef CONFIG_MEMCG 2150 2151 static void memcg_alloc_abort_single(struct kmem_cache *s, void *object); 2152 2153 static __fastpath_inline 2154 bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 2155 gfp_t flags, size_t size, void **p) 2156 { 2157 if (likely(!memcg_kmem_online())) 2158 return true; 2159 2160 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))) 2161 return true; 2162 2163 if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p))) 2164 return true; 2165 2166 if (likely(size == 1)) { 2167 memcg_alloc_abort_single(s, *p); 2168 *p = NULL; 2169 } else { 2170 kmem_cache_free_bulk(s, size, p); 2171 } 2172 2173 return false; 2174 } 2175 2176 static __fastpath_inline 2177 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2178 int objects) 2179 { 2180 struct slabobj_ext *obj_exts; 2181 2182 if (!memcg_kmem_online()) 2183 return; 2184 2185 obj_exts = slab_obj_exts(slab); 2186 if (likely(!obj_exts)) 2187 return; 2188 2189 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); 2190 } 2191 #else /* CONFIG_MEMCG */ 2192 static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s, 2193 struct list_lru *lru, 2194 gfp_t flags, size_t size, 2195 void **p) 2196 { 2197 return true; 2198 } 2199 2200 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 2201 void **p, int objects) 2202 { 2203 } 2204 #endif /* CONFIG_MEMCG */ 2205 2206 /* 2207 * Hooks for other subsystems that check memory allocations. In a typical 2208 * production configuration these hooks all should produce no code at all. 2209 * 2210 * Returns true if freeing of the object can proceed, false if its reuse 2211 * was delayed by KASAN quarantine, or it was returned to KFENCE. 2212 */ 2213 static __always_inline 2214 bool slab_free_hook(struct kmem_cache *s, void *x, bool init) 2215 { 2216 kmemleak_free_recursive(x, s->flags); 2217 kmsan_slab_free(s, x); 2218 2219 debug_check_no_locks_freed(x, s->object_size); 2220 2221 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 2222 debug_check_no_obj_freed(x, s->object_size); 2223 2224 /* Use KCSAN to help debug racy use-after-free. */ 2225 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) 2226 __kcsan_check_access(x, s->object_size, 2227 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); 2228 2229 if (kfence_free(x)) 2230 return false; 2231 2232 /* 2233 * As memory initialization might be integrated into KASAN, 2234 * kasan_slab_free and initialization memset's must be 2235 * kept together to avoid discrepancies in behavior. 2236 * 2237 * The initialization memset's clear the object and the metadata, 2238 * but don't touch the SLAB redzone. 2239 * 2240 * The object's freepointer is also avoided if stored outside the 2241 * object. 2242 */ 2243 if (unlikely(init)) { 2244 int rsize; 2245 unsigned int inuse, orig_size; 2246 2247 inuse = get_info_end(s); 2248 orig_size = get_orig_size(s, x); 2249 if (!kasan_has_integrated_init()) 2250 memset(kasan_reset_tag(x), 0, orig_size); 2251 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; 2252 memset((char *)kasan_reset_tag(x) + inuse, 0, 2253 s->size - inuse - rsize); 2254 /* 2255 * Restore orig_size, otherwize kmalloc redzone overwritten 2256 * would be reported 2257 */ 2258 set_orig_size(s, x, orig_size); 2259 2260 } 2261 /* KASAN might put x into memory quarantine, delaying its reuse. */ 2262 return !kasan_slab_free(s, x, init); 2263 } 2264 2265 static __fastpath_inline 2266 bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail, 2267 int *cnt) 2268 { 2269 2270 void *object; 2271 void *next = *head; 2272 void *old_tail = *tail; 2273 bool init; 2274 2275 if (is_kfence_address(next)) { 2276 slab_free_hook(s, next, false); 2277 return false; 2278 } 2279 2280 /* Head and tail of the reconstructed freelist */ 2281 *head = NULL; 2282 *tail = NULL; 2283 2284 init = slab_want_init_on_free(s); 2285 2286 do { 2287 object = next; 2288 next = get_freepointer(s, object); 2289 2290 /* If object's reuse doesn't have to be delayed */ 2291 if (likely(slab_free_hook(s, object, init))) { 2292 /* Move object to the new freelist */ 2293 set_freepointer(s, object, *head); 2294 *head = object; 2295 if (!*tail) 2296 *tail = object; 2297 } else { 2298 /* 2299 * Adjust the reconstructed freelist depth 2300 * accordingly if object's reuse is delayed. 2301 */ 2302 --(*cnt); 2303 } 2304 } while (object != old_tail); 2305 2306 return *head != NULL; 2307 } 2308 2309 static void *setup_object(struct kmem_cache *s, void *object) 2310 { 2311 setup_object_debug(s, object); 2312 object = kasan_init_slab_obj(s, object); 2313 if (unlikely(s->ctor)) { 2314 kasan_unpoison_new_object(s, object); 2315 s->ctor(object); 2316 kasan_poison_new_object(s, object); 2317 } 2318 return object; 2319 } 2320 2321 /* 2322 * Slab allocation and freeing 2323 */ 2324 static inline struct slab *alloc_slab_page(gfp_t flags, int node, 2325 struct kmem_cache_order_objects oo) 2326 { 2327 struct folio *folio; 2328 struct slab *slab; 2329 unsigned int order = oo_order(oo); 2330 2331 folio = (struct folio *)alloc_pages_node(node, flags, order); 2332 if (!folio) 2333 return NULL; 2334 2335 slab = folio_slab(folio); 2336 __folio_set_slab(folio); 2337 /* Make the flag visible before any changes to folio->mapping */ 2338 smp_wmb(); 2339 if (folio_is_pfmemalloc(folio)) 2340 slab_set_pfmemalloc(slab); 2341 2342 return slab; 2343 } 2344 2345 #ifdef CONFIG_SLAB_FREELIST_RANDOM 2346 /* Pre-initialize the random sequence cache */ 2347 static int init_cache_random_seq(struct kmem_cache *s) 2348 { 2349 unsigned int count = oo_objects(s->oo); 2350 int err; 2351 2352 /* Bailout if already initialised */ 2353 if (s->random_seq) 2354 return 0; 2355 2356 err = cache_random_seq_create(s, count, GFP_KERNEL); 2357 if (err) { 2358 pr_err("SLUB: Unable to initialize free list for %s\n", 2359 s->name); 2360 return err; 2361 } 2362 2363 /* Transform to an offset on the set of pages */ 2364 if (s->random_seq) { 2365 unsigned int i; 2366 2367 for (i = 0; i < count; i++) 2368 s->random_seq[i] *= s->size; 2369 } 2370 return 0; 2371 } 2372 2373 /* Initialize each random sequence freelist per cache */ 2374 static void __init init_freelist_randomization(void) 2375 { 2376 struct kmem_cache *s; 2377 2378 mutex_lock(&slab_mutex); 2379 2380 list_for_each_entry(s, &slab_caches, list) 2381 init_cache_random_seq(s); 2382 2383 mutex_unlock(&slab_mutex); 2384 } 2385 2386 /* Get the next entry on the pre-computed freelist randomized */ 2387 static void *next_freelist_entry(struct kmem_cache *s, 2388 unsigned long *pos, void *start, 2389 unsigned long page_limit, 2390 unsigned long freelist_count) 2391 { 2392 unsigned int idx; 2393 2394 /* 2395 * If the target page allocation failed, the number of objects on the 2396 * page might be smaller than the usual size defined by the cache. 2397 */ 2398 do { 2399 idx = s->random_seq[*pos]; 2400 *pos += 1; 2401 if (*pos >= freelist_count) 2402 *pos = 0; 2403 } while (unlikely(idx >= page_limit)); 2404 2405 return (char *)start + idx; 2406 } 2407 2408 /* Shuffle the single linked freelist based on a random pre-computed sequence */ 2409 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2410 { 2411 void *start; 2412 void *cur; 2413 void *next; 2414 unsigned long idx, pos, page_limit, freelist_count; 2415 2416 if (slab->objects < 2 || !s->random_seq) 2417 return false; 2418 2419 freelist_count = oo_objects(s->oo); 2420 pos = get_random_u32_below(freelist_count); 2421 2422 page_limit = slab->objects * s->size; 2423 start = fixup_red_left(s, slab_address(slab)); 2424 2425 /* First entry is used as the base of the freelist */ 2426 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count); 2427 cur = setup_object(s, cur); 2428 slab->freelist = cur; 2429 2430 for (idx = 1; idx < slab->objects; idx++) { 2431 next = next_freelist_entry(s, &pos, start, page_limit, 2432 freelist_count); 2433 next = setup_object(s, next); 2434 set_freepointer(s, cur, next); 2435 cur = next; 2436 } 2437 set_freepointer(s, cur, NULL); 2438 2439 return true; 2440 } 2441 #else 2442 static inline int init_cache_random_seq(struct kmem_cache *s) 2443 { 2444 return 0; 2445 } 2446 static inline void init_freelist_randomization(void) { } 2447 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) 2448 { 2449 return false; 2450 } 2451 #endif /* CONFIG_SLAB_FREELIST_RANDOM */ 2452 2453 static __always_inline void account_slab(struct slab *slab, int order, 2454 struct kmem_cache *s, gfp_t gfp) 2455 { 2456 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) 2457 alloc_slab_obj_exts(slab, s, gfp, true); 2458 2459 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2460 PAGE_SIZE << order); 2461 } 2462 2463 static __always_inline void unaccount_slab(struct slab *slab, int order, 2464 struct kmem_cache *s) 2465 { 2466 if (memcg_kmem_online() || need_slab_obj_ext()) 2467 free_slab_obj_exts(slab); 2468 2469 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), 2470 -(PAGE_SIZE << order)); 2471 } 2472 2473 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) 2474 { 2475 struct slab *slab; 2476 struct kmem_cache_order_objects oo = s->oo; 2477 gfp_t alloc_gfp; 2478 void *start, *p, *next; 2479 int idx; 2480 bool shuffle; 2481 2482 flags &= gfp_allowed_mask; 2483 2484 flags |= s->allocflags; 2485 2486 /* 2487 * Let the initial higher-order allocation fail under memory pressure 2488 * so we fall-back to the minimum order allocation. 2489 */ 2490 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; 2491 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) 2492 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM; 2493 2494 slab = alloc_slab_page(alloc_gfp, node, oo); 2495 if (unlikely(!slab)) { 2496 oo = s->min; 2497 alloc_gfp = flags; 2498 /* 2499 * Allocation may have failed due to fragmentation. 2500 * Try a lower order alloc if possible 2501 */ 2502 slab = alloc_slab_page(alloc_gfp, node, oo); 2503 if (unlikely(!slab)) 2504 return NULL; 2505 stat(s, ORDER_FALLBACK); 2506 } 2507 2508 slab->objects = oo_objects(oo); 2509 slab->inuse = 0; 2510 slab->frozen = 0; 2511 2512 account_slab(slab, oo_order(oo), s, flags); 2513 2514 slab->slab_cache = s; 2515 2516 kasan_poison_slab(slab); 2517 2518 start = slab_address(slab); 2519 2520 setup_slab_debug(s, slab, start); 2521 2522 shuffle = shuffle_freelist(s, slab); 2523 2524 if (!shuffle) { 2525 start = fixup_red_left(s, start); 2526 start = setup_object(s, start); 2527 slab->freelist = start; 2528 for (idx = 0, p = start; idx < slab->objects - 1; idx++) { 2529 next = p + s->size; 2530 next = setup_object(s, next); 2531 set_freepointer(s, p, next); 2532 p = next; 2533 } 2534 set_freepointer(s, p, NULL); 2535 } 2536 2537 return slab; 2538 } 2539 2540 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) 2541 { 2542 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 2543 flags = kmalloc_fix_flags(flags); 2544 2545 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); 2546 2547 return allocate_slab(s, 2548 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 2549 } 2550 2551 static void __free_slab(struct kmem_cache *s, struct slab *slab) 2552 { 2553 struct folio *folio = slab_folio(slab); 2554 int order = folio_order(folio); 2555 int pages = 1 << order; 2556 2557 __slab_clear_pfmemalloc(slab); 2558 folio->mapping = NULL; 2559 /* Make the mapping reset visible before clearing the flag */ 2560 smp_wmb(); 2561 __folio_clear_slab(folio); 2562 mm_account_reclaimed_pages(pages); 2563 unaccount_slab(slab, order, s); 2564 __free_pages(&folio->page, order); 2565 } 2566 2567 static void rcu_free_slab(struct rcu_head *h) 2568 { 2569 struct slab *slab = container_of(h, struct slab, rcu_head); 2570 2571 __free_slab(slab->slab_cache, slab); 2572 } 2573 2574 static void free_slab(struct kmem_cache *s, struct slab *slab) 2575 { 2576 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { 2577 void *p; 2578 2579 slab_pad_check(s, slab); 2580 for_each_object(p, s, slab_address(slab), slab->objects) 2581 check_object(s, slab, p, SLUB_RED_INACTIVE); 2582 } 2583 2584 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 2585 call_rcu(&slab->rcu_head, rcu_free_slab); 2586 else 2587 __free_slab(s, slab); 2588 } 2589 2590 static void discard_slab(struct kmem_cache *s, struct slab *slab) 2591 { 2592 dec_slabs_node(s, slab_nid(slab), slab->objects); 2593 free_slab(s, slab); 2594 } 2595 2596 /* 2597 * SLUB reuses PG_workingset bit to keep track of whether it's on 2598 * the per-node partial list. 2599 */ 2600 static inline bool slab_test_node_partial(const struct slab *slab) 2601 { 2602 return folio_test_workingset(slab_folio(slab)); 2603 } 2604 2605 static inline void slab_set_node_partial(struct slab *slab) 2606 { 2607 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2608 } 2609 2610 static inline void slab_clear_node_partial(struct slab *slab) 2611 { 2612 clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); 2613 } 2614 2615 /* 2616 * Management of partially allocated slabs. 2617 */ 2618 static inline void 2619 __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 2620 { 2621 n->nr_partial++; 2622 if (tail == DEACTIVATE_TO_TAIL) 2623 list_add_tail(&slab->slab_list, &n->partial); 2624 else 2625 list_add(&slab->slab_list, &n->partial); 2626 slab_set_node_partial(slab); 2627 } 2628 2629 static inline void add_partial(struct kmem_cache_node *n, 2630 struct slab *slab, int tail) 2631 { 2632 lockdep_assert_held(&n->list_lock); 2633 __add_partial(n, slab, tail); 2634 } 2635 2636 static inline void remove_partial(struct kmem_cache_node *n, 2637 struct slab *slab) 2638 { 2639 lockdep_assert_held(&n->list_lock); 2640 list_del(&slab->slab_list); 2641 slab_clear_node_partial(slab); 2642 n->nr_partial--; 2643 } 2644 2645 /* 2646 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a 2647 * slab from the n->partial list. Remove only a single object from the slab, do 2648 * the alloc_debug_processing() checks and leave the slab on the list, or move 2649 * it to full list if it was the last free object. 2650 */ 2651 static void *alloc_single_from_partial(struct kmem_cache *s, 2652 struct kmem_cache_node *n, struct slab *slab, int orig_size) 2653 { 2654 void *object; 2655 2656 lockdep_assert_held(&n->list_lock); 2657 2658 object = slab->freelist; 2659 slab->freelist = get_freepointer(s, object); 2660 slab->inuse++; 2661 2662 if (!alloc_debug_processing(s, slab, object, orig_size)) { 2663 remove_partial(n, slab); 2664 return NULL; 2665 } 2666 2667 if (slab->inuse == slab->objects) { 2668 remove_partial(n, slab); 2669 add_full(s, n, slab); 2670 } 2671 2672 return object; 2673 } 2674 2675 /* 2676 * Called only for kmem_cache_debug() caches to allocate from a freshly 2677 * allocated slab. Allocate a single object instead of whole freelist 2678 * and put the slab to the partial (or full) list. 2679 */ 2680 static void *alloc_single_from_new_slab(struct kmem_cache *s, 2681 struct slab *slab, int orig_size) 2682 { 2683 int nid = slab_nid(slab); 2684 struct kmem_cache_node *n = get_node(s, nid); 2685 unsigned long flags; 2686 void *object; 2687 2688 2689 object = slab->freelist; 2690 slab->freelist = get_freepointer(s, object); 2691 slab->inuse = 1; 2692 2693 if (!alloc_debug_processing(s, slab, object, orig_size)) 2694 /* 2695 * It's not really expected that this would fail on a 2696 * freshly allocated slab, but a concurrent memory 2697 * corruption in theory could cause that. 2698 */ 2699 return NULL; 2700 2701 spin_lock_irqsave(&n->list_lock, flags); 2702 2703 if (slab->inuse == slab->objects) 2704 add_full(s, n, slab); 2705 else 2706 add_partial(n, slab, DEACTIVATE_TO_HEAD); 2707 2708 inc_slabs_node(s, nid, slab->objects); 2709 spin_unlock_irqrestore(&n->list_lock, flags); 2710 2711 return object; 2712 } 2713 2714 #ifdef CONFIG_SLUB_CPU_PARTIAL 2715 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain); 2716 #else 2717 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, 2718 int drain) { } 2719 #endif 2720 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags); 2721 2722 /* 2723 * Try to allocate a partial slab from a specific node. 2724 */ 2725 static struct slab *get_partial_node(struct kmem_cache *s, 2726 struct kmem_cache_node *n, 2727 struct partial_context *pc) 2728 { 2729 struct slab *slab, *slab2, *partial = NULL; 2730 unsigned long flags; 2731 unsigned int partial_slabs = 0; 2732 2733 /* 2734 * Racy check. If we mistakenly see no partial slabs then we 2735 * just allocate an empty slab. If we mistakenly try to get a 2736 * partial slab and there is none available then get_partial() 2737 * will return NULL. 2738 */ 2739 if (!n || !n->nr_partial) 2740 return NULL; 2741 2742 spin_lock_irqsave(&n->list_lock, flags); 2743 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { 2744 if (!pfmemalloc_match(slab, pc->flags)) 2745 continue; 2746 2747 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 2748 void *object = alloc_single_from_partial(s, n, slab, 2749 pc->orig_size); 2750 if (object) { 2751 partial = slab; 2752 pc->object = object; 2753 break; 2754 } 2755 continue; 2756 } 2757 2758 remove_partial(n, slab); 2759 2760 if (!partial) { 2761 partial = slab; 2762 stat(s, ALLOC_FROM_PARTIAL); 2763 2764 if ((slub_get_cpu_partial(s) == 0)) { 2765 break; 2766 } 2767 } else { 2768 put_cpu_partial(s, slab, 0); 2769 stat(s, CPU_PARTIAL_NODE); 2770 2771 if (++partial_slabs > slub_get_cpu_partial(s) / 2) { 2772 break; 2773 } 2774 } 2775 } 2776 spin_unlock_irqrestore(&n->list_lock, flags); 2777 return partial; 2778 } 2779 2780 /* 2781 * Get a slab from somewhere. Search in increasing NUMA distances. 2782 */ 2783 static struct slab *get_any_partial(struct kmem_cache *s, 2784 struct partial_context *pc) 2785 { 2786 #ifdef CONFIG_NUMA 2787 struct zonelist *zonelist; 2788 struct zoneref *z; 2789 struct zone *zone; 2790 enum zone_type highest_zoneidx = gfp_zone(pc->flags); 2791 struct slab *slab; 2792 unsigned int cpuset_mems_cookie; 2793 2794 /* 2795 * The defrag ratio allows a configuration of the tradeoffs between 2796 * inter node defragmentation and node local allocations. A lower 2797 * defrag_ratio increases the tendency to do local allocations 2798 * instead of attempting to obtain partial slabs from other nodes. 2799 * 2800 * If the defrag_ratio is set to 0 then kmalloc() always 2801 * returns node local objects. If the ratio is higher then kmalloc() 2802 * may return off node objects because partial slabs are obtained 2803 * from other nodes and filled up. 2804 * 2805 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 2806 * (which makes defrag_ratio = 1000) then every (well almost) 2807 * allocation will first attempt to defrag slab caches on other nodes. 2808 * This means scanning over all nodes to look for partial slabs which 2809 * may be expensive if we do it every time we are trying to find a slab 2810 * with available objects. 2811 */ 2812 if (!s->remote_node_defrag_ratio || 2813 get_cycles() % 1024 > s->remote_node_defrag_ratio) 2814 return NULL; 2815 2816 do { 2817 cpuset_mems_cookie = read_mems_allowed_begin(); 2818 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); 2819 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { 2820 struct kmem_cache_node *n; 2821 2822 n = get_node(s, zone_to_nid(zone)); 2823 2824 if (n && cpuset_zone_allowed(zone, pc->flags) && 2825 n->nr_partial > s->min_partial) { 2826 slab = get_partial_node(s, n, pc); 2827 if (slab) { 2828 /* 2829 * Don't check read_mems_allowed_retry() 2830 * here - if mems_allowed was updated in 2831 * parallel, that was a harmless race 2832 * between allocation and the cpuset 2833 * update 2834 */ 2835 return slab; 2836 } 2837 } 2838 } 2839 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 2840 #endif /* CONFIG_NUMA */ 2841 return NULL; 2842 } 2843 2844 /* 2845 * Get a partial slab, lock it and return it. 2846 */ 2847 static struct slab *get_partial(struct kmem_cache *s, int node, 2848 struct partial_context *pc) 2849 { 2850 struct slab *slab; 2851 int searchnode = node; 2852 2853 if (node == NUMA_NO_NODE) 2854 searchnode = numa_mem_id(); 2855 2856 slab = get_partial_node(s, get_node(s, searchnode), pc); 2857 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE))) 2858 return slab; 2859 2860 return get_any_partial(s, pc); 2861 } 2862 2863 #ifndef CONFIG_SLUB_TINY 2864 2865 #ifdef CONFIG_PREEMPTION 2866 /* 2867 * Calculate the next globally unique transaction for disambiguation 2868 * during cmpxchg. The transactions start with the cpu number and are then 2869 * incremented by CONFIG_NR_CPUS. 2870 */ 2871 #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) 2872 #else 2873 /* 2874 * No preemption supported therefore also no need to check for 2875 * different cpus. 2876 */ 2877 #define TID_STEP 1 2878 #endif /* CONFIG_PREEMPTION */ 2879 2880 static inline unsigned long next_tid(unsigned long tid) 2881 { 2882 return tid + TID_STEP; 2883 } 2884 2885 #ifdef SLUB_DEBUG_CMPXCHG 2886 static inline unsigned int tid_to_cpu(unsigned long tid) 2887 { 2888 return tid % TID_STEP; 2889 } 2890 2891 static inline unsigned long tid_to_event(unsigned long tid) 2892 { 2893 return tid / TID_STEP; 2894 } 2895 #endif 2896 2897 static inline unsigned int init_tid(int cpu) 2898 { 2899 return cpu; 2900 } 2901 2902 static inline void note_cmpxchg_failure(const char *n, 2903 const struct kmem_cache *s, unsigned long tid) 2904 { 2905 #ifdef SLUB_DEBUG_CMPXCHG 2906 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); 2907 2908 pr_info("%s %s: cmpxchg redo ", n, s->name); 2909 2910 #ifdef CONFIG_PREEMPTION 2911 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) 2912 pr_warn("due to cpu change %d -> %d\n", 2913 tid_to_cpu(tid), tid_to_cpu(actual_tid)); 2914 else 2915 #endif 2916 if (tid_to_event(tid) != tid_to_event(actual_tid)) 2917 pr_warn("due to cpu running other code. Event %ld->%ld\n", 2918 tid_to_event(tid), tid_to_event(actual_tid)); 2919 else 2920 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", 2921 actual_tid, tid, next_tid(tid)); 2922 #endif 2923 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); 2924 } 2925 2926 static void init_kmem_cache_cpus(struct kmem_cache *s) 2927 { 2928 int cpu; 2929 struct kmem_cache_cpu *c; 2930 2931 for_each_possible_cpu(cpu) { 2932 c = per_cpu_ptr(s->cpu_slab, cpu); 2933 local_lock_init(&c->lock); 2934 c->tid = init_tid(cpu); 2935 } 2936 } 2937 2938 /* 2939 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist, 2940 * unfreezes the slabs and puts it on the proper list. 2941 * Assumes the slab has been already safely taken away from kmem_cache_cpu 2942 * by the caller. 2943 */ 2944 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, 2945 void *freelist) 2946 { 2947 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 2948 int free_delta = 0; 2949 void *nextfree, *freelist_iter, *freelist_tail; 2950 int tail = DEACTIVATE_TO_HEAD; 2951 unsigned long flags = 0; 2952 struct slab new; 2953 struct slab old; 2954 2955 if (READ_ONCE(slab->freelist)) { 2956 stat(s, DEACTIVATE_REMOTE_FREES); 2957 tail = DEACTIVATE_TO_TAIL; 2958 } 2959 2960 /* 2961 * Stage one: Count the objects on cpu's freelist as free_delta and 2962 * remember the last object in freelist_tail for later splicing. 2963 */ 2964 freelist_tail = NULL; 2965 freelist_iter = freelist; 2966 while (freelist_iter) { 2967 nextfree = get_freepointer(s, freelist_iter); 2968 2969 /* 2970 * If 'nextfree' is invalid, it is possible that the object at 2971 * 'freelist_iter' is already corrupted. So isolate all objects 2972 * starting at 'freelist_iter' by skipping them. 2973 */ 2974 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) 2975 break; 2976 2977 freelist_tail = freelist_iter; 2978 free_delta++; 2979 2980 freelist_iter = nextfree; 2981 } 2982 2983 /* 2984 * Stage two: Unfreeze the slab while splicing the per-cpu 2985 * freelist to the head of slab's freelist. 2986 */ 2987 do { 2988 old.freelist = READ_ONCE(slab->freelist); 2989 old.counters = READ_ONCE(slab->counters); 2990 VM_BUG_ON(!old.frozen); 2991 2992 /* Determine target state of the slab */ 2993 new.counters = old.counters; 2994 new.frozen = 0; 2995 if (freelist_tail) { 2996 new.inuse -= free_delta; 2997 set_freepointer(s, freelist_tail, old.freelist); 2998 new.freelist = freelist; 2999 } else { 3000 new.freelist = old.freelist; 3001 } 3002 } while (!slab_update_freelist(s, slab, 3003 old.freelist, old.counters, 3004 new.freelist, new.counters, 3005 "unfreezing slab")); 3006 3007 /* 3008 * Stage three: Manipulate the slab list based on the updated state. 3009 */ 3010 if (!new.inuse && n->nr_partial >= s->min_partial) { 3011 stat(s, DEACTIVATE_EMPTY); 3012 discard_slab(s, slab); 3013 stat(s, FREE_SLAB); 3014 } else if (new.freelist) { 3015 spin_lock_irqsave(&n->list_lock, flags); 3016 add_partial(n, slab, tail); 3017 spin_unlock_irqrestore(&n->list_lock, flags); 3018 stat(s, tail); 3019 } else { 3020 stat(s, DEACTIVATE_FULL); 3021 } 3022 } 3023 3024 #ifdef CONFIG_SLUB_CPU_PARTIAL 3025 static void __put_partials(struct kmem_cache *s, struct slab *partial_slab) 3026 { 3027 struct kmem_cache_node *n = NULL, *n2 = NULL; 3028 struct slab *slab, *slab_to_discard = NULL; 3029 unsigned long flags = 0; 3030 3031 while (partial_slab) { 3032 slab = partial_slab; 3033 partial_slab = slab->next; 3034 3035 n2 = get_node(s, slab_nid(slab)); 3036 if (n != n2) { 3037 if (n) 3038 spin_unlock_irqrestore(&n->list_lock, flags); 3039 3040 n = n2; 3041 spin_lock_irqsave(&n->list_lock, flags); 3042 } 3043 3044 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) { 3045 slab->next = slab_to_discard; 3046 slab_to_discard = slab; 3047 } else { 3048 add_partial(n, slab, DEACTIVATE_TO_TAIL); 3049 stat(s, FREE_ADD_PARTIAL); 3050 } 3051 } 3052 3053 if (n) 3054 spin_unlock_irqrestore(&n->list_lock, flags); 3055 3056 while (slab_to_discard) { 3057 slab = slab_to_discard; 3058 slab_to_discard = slab_to_discard->next; 3059 3060 stat(s, DEACTIVATE_EMPTY); 3061 discard_slab(s, slab); 3062 stat(s, FREE_SLAB); 3063 } 3064 } 3065 3066 /* 3067 * Put all the cpu partial slabs to the node partial list. 3068 */ 3069 static void put_partials(struct kmem_cache *s) 3070 { 3071 struct slab *partial_slab; 3072 unsigned long flags; 3073 3074 local_lock_irqsave(&s->cpu_slab->lock, flags); 3075 partial_slab = this_cpu_read(s->cpu_slab->partial); 3076 this_cpu_write(s->cpu_slab->partial, NULL); 3077 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3078 3079 if (partial_slab) 3080 __put_partials(s, partial_slab); 3081 } 3082 3083 static void put_partials_cpu(struct kmem_cache *s, 3084 struct kmem_cache_cpu *c) 3085 { 3086 struct slab *partial_slab; 3087 3088 partial_slab = slub_percpu_partial(c); 3089 c->partial = NULL; 3090 3091 if (partial_slab) 3092 __put_partials(s, partial_slab); 3093 } 3094 3095 /* 3096 * Put a slab into a partial slab slot if available. 3097 * 3098 * If we did not find a slot then simply move all the partials to the 3099 * per node partial list. 3100 */ 3101 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) 3102 { 3103 struct slab *oldslab; 3104 struct slab *slab_to_put = NULL; 3105 unsigned long flags; 3106 int slabs = 0; 3107 3108 local_lock_irqsave(&s->cpu_slab->lock, flags); 3109 3110 oldslab = this_cpu_read(s->cpu_slab->partial); 3111 3112 if (oldslab) { 3113 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { 3114 /* 3115 * Partial array is full. Move the existing set to the 3116 * per node partial list. Postpone the actual unfreezing 3117 * outside of the critical section. 3118 */ 3119 slab_to_put = oldslab; 3120 oldslab = NULL; 3121 } else { 3122 slabs = oldslab->slabs; 3123 } 3124 } 3125 3126 slabs++; 3127 3128 slab->slabs = slabs; 3129 slab->next = oldslab; 3130 3131 this_cpu_write(s->cpu_slab->partial, slab); 3132 3133 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3134 3135 if (slab_to_put) { 3136 __put_partials(s, slab_to_put); 3137 stat(s, CPU_PARTIAL_DRAIN); 3138 } 3139 } 3140 3141 #else /* CONFIG_SLUB_CPU_PARTIAL */ 3142 3143 static inline void put_partials(struct kmem_cache *s) { } 3144 static inline void put_partials_cpu(struct kmem_cache *s, 3145 struct kmem_cache_cpu *c) { } 3146 3147 #endif /* CONFIG_SLUB_CPU_PARTIAL */ 3148 3149 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) 3150 { 3151 unsigned long flags; 3152 struct slab *slab; 3153 void *freelist; 3154 3155 local_lock_irqsave(&s->cpu_slab->lock, flags); 3156 3157 slab = c->slab; 3158 freelist = c->freelist; 3159 3160 c->slab = NULL; 3161 c->freelist = NULL; 3162 c->tid = next_tid(c->tid); 3163 3164 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3165 3166 if (slab) { 3167 deactivate_slab(s, slab, freelist); 3168 stat(s, CPUSLAB_FLUSH); 3169 } 3170 } 3171 3172 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) 3173 { 3174 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3175 void *freelist = c->freelist; 3176 struct slab *slab = c->slab; 3177 3178 c->slab = NULL; 3179 c->freelist = NULL; 3180 c->tid = next_tid(c->tid); 3181 3182 if (slab) { 3183 deactivate_slab(s, slab, freelist); 3184 stat(s, CPUSLAB_FLUSH); 3185 } 3186 3187 put_partials_cpu(s, c); 3188 } 3189 3190 struct slub_flush_work { 3191 struct work_struct work; 3192 struct kmem_cache *s; 3193 bool skip; 3194 }; 3195 3196 /* 3197 * Flush cpu slab. 3198 * 3199 * Called from CPU work handler with migration disabled. 3200 */ 3201 static void flush_cpu_slab(struct work_struct *w) 3202 { 3203 struct kmem_cache *s; 3204 struct kmem_cache_cpu *c; 3205 struct slub_flush_work *sfw; 3206 3207 sfw = container_of(w, struct slub_flush_work, work); 3208 3209 s = sfw->s; 3210 c = this_cpu_ptr(s->cpu_slab); 3211 3212 if (c->slab) 3213 flush_slab(s, c); 3214 3215 put_partials(s); 3216 } 3217 3218 static bool has_cpu_slab(int cpu, struct kmem_cache *s) 3219 { 3220 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); 3221 3222 return c->slab || slub_percpu_partial(c); 3223 } 3224 3225 static DEFINE_MUTEX(flush_lock); 3226 static DEFINE_PER_CPU(struct slub_flush_work, slub_flush); 3227 3228 static void flush_all_cpus_locked(struct kmem_cache *s) 3229 { 3230 struct slub_flush_work *sfw; 3231 unsigned int cpu; 3232 3233 lockdep_assert_cpus_held(); 3234 mutex_lock(&flush_lock); 3235 3236 for_each_online_cpu(cpu) { 3237 sfw = &per_cpu(slub_flush, cpu); 3238 if (!has_cpu_slab(cpu, s)) { 3239 sfw->skip = true; 3240 continue; 3241 } 3242 INIT_WORK(&sfw->work, flush_cpu_slab); 3243 sfw->skip = false; 3244 sfw->s = s; 3245 queue_work_on(cpu, flushwq, &sfw->work); 3246 } 3247 3248 for_each_online_cpu(cpu) { 3249 sfw = &per_cpu(slub_flush, cpu); 3250 if (sfw->skip) 3251 continue; 3252 flush_work(&sfw->work); 3253 } 3254 3255 mutex_unlock(&flush_lock); 3256 } 3257 3258 static void flush_all(struct kmem_cache *s) 3259 { 3260 cpus_read_lock(); 3261 flush_all_cpus_locked(s); 3262 cpus_read_unlock(); 3263 } 3264 3265 /* 3266 * Use the cpu notifier to insure that the cpu slabs are flushed when 3267 * necessary. 3268 */ 3269 static int slub_cpu_dead(unsigned int cpu) 3270 { 3271 struct kmem_cache *s; 3272 3273 mutex_lock(&slab_mutex); 3274 list_for_each_entry(s, &slab_caches, list) 3275 __flush_cpu_slab(s, cpu); 3276 mutex_unlock(&slab_mutex); 3277 return 0; 3278 } 3279 3280 #else /* CONFIG_SLUB_TINY */ 3281 static inline void flush_all_cpus_locked(struct kmem_cache *s) { } 3282 static inline void flush_all(struct kmem_cache *s) { } 3283 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } 3284 static inline int slub_cpu_dead(unsigned int cpu) { return 0; } 3285 #endif /* CONFIG_SLUB_TINY */ 3286 3287 /* 3288 * Check if the objects in a per cpu structure fit numa 3289 * locality expectations. 3290 */ 3291 static inline int node_match(struct slab *slab, int node) 3292 { 3293 #ifdef CONFIG_NUMA 3294 if (node != NUMA_NO_NODE && slab_nid(slab) != node) 3295 return 0; 3296 #endif 3297 return 1; 3298 } 3299 3300 #ifdef CONFIG_SLUB_DEBUG 3301 static int count_free(struct slab *slab) 3302 { 3303 return slab->objects - slab->inuse; 3304 } 3305 3306 static inline unsigned long node_nr_objs(struct kmem_cache_node *n) 3307 { 3308 return atomic_long_read(&n->total_objects); 3309 } 3310 3311 /* Supports checking bulk free of a constructed freelist */ 3312 static inline bool free_debug_processing(struct kmem_cache *s, 3313 struct slab *slab, void *head, void *tail, int *bulk_cnt, 3314 unsigned long addr, depot_stack_handle_t handle) 3315 { 3316 bool checks_ok = false; 3317 void *object = head; 3318 int cnt = 0; 3319 3320 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3321 if (!check_slab(s, slab)) 3322 goto out; 3323 } 3324 3325 if (slab->inuse < *bulk_cnt) { 3326 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", 3327 slab->inuse, *bulk_cnt); 3328 goto out; 3329 } 3330 3331 next_object: 3332 3333 if (++cnt > *bulk_cnt) 3334 goto out_cnt; 3335 3336 if (s->flags & SLAB_CONSISTENCY_CHECKS) { 3337 if (!free_consistency_checks(s, slab, object, addr)) 3338 goto out; 3339 } 3340 3341 if (s->flags & SLAB_STORE_USER) 3342 set_track_update(s, object, TRACK_FREE, addr, handle); 3343 trace(s, slab, object, 0); 3344 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ 3345 init_object(s, object, SLUB_RED_INACTIVE); 3346 3347 /* Reached end of constructed freelist yet? */ 3348 if (object != tail) { 3349 object = get_freepointer(s, object); 3350 goto next_object; 3351 } 3352 checks_ok = true; 3353 3354 out_cnt: 3355 if (cnt != *bulk_cnt) { 3356 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", 3357 *bulk_cnt, cnt); 3358 *bulk_cnt = cnt; 3359 } 3360 3361 out: 3362 3363 if (!checks_ok) 3364 slab_fix(s, "Object at 0x%p not freed", object); 3365 3366 return checks_ok; 3367 } 3368 #endif /* CONFIG_SLUB_DEBUG */ 3369 3370 #if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS) 3371 static unsigned long count_partial(struct kmem_cache_node *n, 3372 int (*get_count)(struct slab *)) 3373 { 3374 unsigned long flags; 3375 unsigned long x = 0; 3376 struct slab *slab; 3377 3378 spin_lock_irqsave(&n->list_lock, flags); 3379 list_for_each_entry(slab, &n->partial, slab_list) 3380 x += get_count(slab); 3381 spin_unlock_irqrestore(&n->list_lock, flags); 3382 return x; 3383 } 3384 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */ 3385 3386 #ifdef CONFIG_SLUB_DEBUG 3387 #define MAX_PARTIAL_TO_SCAN 10000 3388 3389 static unsigned long count_partial_free_approx(struct kmem_cache_node *n) 3390 { 3391 unsigned long flags; 3392 unsigned long x = 0; 3393 struct slab *slab; 3394 3395 spin_lock_irqsave(&n->list_lock, flags); 3396 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) { 3397 list_for_each_entry(slab, &n->partial, slab_list) 3398 x += slab->objects - slab->inuse; 3399 } else { 3400 /* 3401 * For a long list, approximate the total count of objects in 3402 * it to meet the limit on the number of slabs to scan. 3403 * Scan from both the list's head and tail for better accuracy. 3404 */ 3405 unsigned long scanned = 0; 3406 3407 list_for_each_entry(slab, &n->partial, slab_list) { 3408 x += slab->objects - slab->inuse; 3409 if (++scanned == MAX_PARTIAL_TO_SCAN / 2) 3410 break; 3411 } 3412 list_for_each_entry_reverse(slab, &n->partial, slab_list) { 3413 x += slab->objects - slab->inuse; 3414 if (++scanned == MAX_PARTIAL_TO_SCAN) 3415 break; 3416 } 3417 x = mult_frac(x, n->nr_partial, scanned); 3418 x = min(x, node_nr_objs(n)); 3419 } 3420 spin_unlock_irqrestore(&n->list_lock, flags); 3421 return x; 3422 } 3423 3424 static noinline void 3425 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) 3426 { 3427 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, 3428 DEFAULT_RATELIMIT_BURST); 3429 int node; 3430 struct kmem_cache_node *n; 3431 3432 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) 3433 return; 3434 3435 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", 3436 nid, gfpflags, &gfpflags); 3437 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", 3438 s->name, s->object_size, s->size, oo_order(s->oo), 3439 oo_order(s->min)); 3440 3441 if (oo_order(s->min) > get_order(s->object_size)) 3442 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n", 3443 s->name); 3444 3445 for_each_kmem_cache_node(s, node, n) { 3446 unsigned long nr_slabs; 3447 unsigned long nr_objs; 3448 unsigned long nr_free; 3449 3450 nr_free = count_partial_free_approx(n); 3451 nr_slabs = node_nr_slabs(n); 3452 nr_objs = node_nr_objs(n); 3453 3454 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", 3455 node, nr_slabs, nr_objs, nr_free); 3456 } 3457 } 3458 #else /* CONFIG_SLUB_DEBUG */ 3459 static inline void 3460 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } 3461 #endif 3462 3463 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags) 3464 { 3465 if (unlikely(slab_test_pfmemalloc(slab))) 3466 return gfp_pfmemalloc_allowed(gfpflags); 3467 3468 return true; 3469 } 3470 3471 #ifndef CONFIG_SLUB_TINY 3472 static inline bool 3473 __update_cpu_freelist_fast(struct kmem_cache *s, 3474 void *freelist_old, void *freelist_new, 3475 unsigned long tid) 3476 { 3477 freelist_aba_t old = { .freelist = freelist_old, .counter = tid }; 3478 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) }; 3479 3480 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, 3481 &old.full, new.full); 3482 } 3483 3484 /* 3485 * Check the slab->freelist and either transfer the freelist to the 3486 * per cpu freelist or deactivate the slab. 3487 * 3488 * The slab is still frozen if the return value is not NULL. 3489 * 3490 * If this function returns NULL then the slab has been unfrozen. 3491 */ 3492 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) 3493 { 3494 struct slab new; 3495 unsigned long counters; 3496 void *freelist; 3497 3498 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3499 3500 do { 3501 freelist = slab->freelist; 3502 counters = slab->counters; 3503 3504 new.counters = counters; 3505 3506 new.inuse = slab->objects; 3507 new.frozen = freelist != NULL; 3508 3509 } while (!__slab_update_freelist(s, slab, 3510 freelist, counters, 3511 NULL, new.counters, 3512 "get_freelist")); 3513 3514 return freelist; 3515 } 3516 3517 /* 3518 * Freeze the partial slab and return the pointer to the freelist. 3519 */ 3520 static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab) 3521 { 3522 struct slab new; 3523 unsigned long counters; 3524 void *freelist; 3525 3526 do { 3527 freelist = slab->freelist; 3528 counters = slab->counters; 3529 3530 new.counters = counters; 3531 VM_BUG_ON(new.frozen); 3532 3533 new.inuse = slab->objects; 3534 new.frozen = 1; 3535 3536 } while (!slab_update_freelist(s, slab, 3537 freelist, counters, 3538 NULL, new.counters, 3539 "freeze_slab")); 3540 3541 return freelist; 3542 } 3543 3544 /* 3545 * Slow path. The lockless freelist is empty or we need to perform 3546 * debugging duties. 3547 * 3548 * Processing is still very fast if new objects have been freed to the 3549 * regular freelist. In that case we simply take over the regular freelist 3550 * as the lockless freelist and zap the regular freelist. 3551 * 3552 * If that is not working then we fall back to the partial lists. We take the 3553 * first element of the freelist as the object to allocate now and move the 3554 * rest of the freelist to the lockless freelist. 3555 * 3556 * And if we were unable to get a new slab from the partial slab lists then 3557 * we need to allocate a new slab. This is the slowest path since it involves 3558 * a call to the page allocator and the setup of a new slab. 3559 * 3560 * Version of __slab_alloc to use when we know that preemption is 3561 * already disabled (which is the case for bulk allocation). 3562 */ 3563 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3564 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3565 { 3566 void *freelist; 3567 struct slab *slab; 3568 unsigned long flags; 3569 struct partial_context pc; 3570 bool try_thisnode = true; 3571 3572 stat(s, ALLOC_SLOWPATH); 3573 3574 reread_slab: 3575 3576 slab = READ_ONCE(c->slab); 3577 if (!slab) { 3578 /* 3579 * if the node is not online or has no normal memory, just 3580 * ignore the node constraint 3581 */ 3582 if (unlikely(node != NUMA_NO_NODE && 3583 !node_isset(node, slab_nodes))) 3584 node = NUMA_NO_NODE; 3585 goto new_slab; 3586 } 3587 3588 if (unlikely(!node_match(slab, node))) { 3589 /* 3590 * same as above but node_match() being false already 3591 * implies node != NUMA_NO_NODE 3592 */ 3593 if (!node_isset(node, slab_nodes)) { 3594 node = NUMA_NO_NODE; 3595 } else { 3596 stat(s, ALLOC_NODE_MISMATCH); 3597 goto deactivate_slab; 3598 } 3599 } 3600 3601 /* 3602 * By rights, we should be searching for a slab page that was 3603 * PFMEMALLOC but right now, we are losing the pfmemalloc 3604 * information when the page leaves the per-cpu allocator 3605 */ 3606 if (unlikely(!pfmemalloc_match(slab, gfpflags))) 3607 goto deactivate_slab; 3608 3609 /* must check again c->slab in case we got preempted and it changed */ 3610 local_lock_irqsave(&s->cpu_slab->lock, flags); 3611 if (unlikely(slab != c->slab)) { 3612 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3613 goto reread_slab; 3614 } 3615 freelist = c->freelist; 3616 if (freelist) 3617 goto load_freelist; 3618 3619 freelist = get_freelist(s, slab); 3620 3621 if (!freelist) { 3622 c->slab = NULL; 3623 c->tid = next_tid(c->tid); 3624 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3625 stat(s, DEACTIVATE_BYPASS); 3626 goto new_slab; 3627 } 3628 3629 stat(s, ALLOC_REFILL); 3630 3631 load_freelist: 3632 3633 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); 3634 3635 /* 3636 * freelist is pointing to the list of objects to be used. 3637 * slab is pointing to the slab from which the objects are obtained. 3638 * That slab must be frozen for per cpu allocations to work. 3639 */ 3640 VM_BUG_ON(!c->slab->frozen); 3641 c->freelist = get_freepointer(s, freelist); 3642 c->tid = next_tid(c->tid); 3643 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3644 return freelist; 3645 3646 deactivate_slab: 3647 3648 local_lock_irqsave(&s->cpu_slab->lock, flags); 3649 if (slab != c->slab) { 3650 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3651 goto reread_slab; 3652 } 3653 freelist = c->freelist; 3654 c->slab = NULL; 3655 c->freelist = NULL; 3656 c->tid = next_tid(c->tid); 3657 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3658 deactivate_slab(s, slab, freelist); 3659 3660 new_slab: 3661 3662 #ifdef CONFIG_SLUB_CPU_PARTIAL 3663 while (slub_percpu_partial(c)) { 3664 local_lock_irqsave(&s->cpu_slab->lock, flags); 3665 if (unlikely(c->slab)) { 3666 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3667 goto reread_slab; 3668 } 3669 if (unlikely(!slub_percpu_partial(c))) { 3670 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3671 /* we were preempted and partial list got empty */ 3672 goto new_objects; 3673 } 3674 3675 slab = slub_percpu_partial(c); 3676 slub_set_percpu_partial(c, slab); 3677 3678 if (likely(node_match(slab, node) && 3679 pfmemalloc_match(slab, gfpflags))) { 3680 c->slab = slab; 3681 freelist = get_freelist(s, slab); 3682 VM_BUG_ON(!freelist); 3683 stat(s, CPU_PARTIAL_ALLOC); 3684 goto load_freelist; 3685 } 3686 3687 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3688 3689 slab->next = NULL; 3690 __put_partials(s, slab); 3691 } 3692 #endif 3693 3694 new_objects: 3695 3696 pc.flags = gfpflags; 3697 /* 3698 * When a preferred node is indicated but no __GFP_THISNODE 3699 * 3700 * 1) try to get a partial slab from target node only by having 3701 * __GFP_THISNODE in pc.flags for get_partial() 3702 * 2) if 1) failed, try to allocate a new slab from target node with 3703 * GPF_NOWAIT | __GFP_THISNODE opportunistically 3704 * 3) if 2) failed, retry with original gfpflags which will allow 3705 * get_partial() try partial lists of other nodes before potentially 3706 * allocating new page from other nodes 3707 */ 3708 if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 3709 && try_thisnode)) 3710 pc.flags = GFP_NOWAIT | __GFP_THISNODE; 3711 3712 pc.orig_size = orig_size; 3713 slab = get_partial(s, node, &pc); 3714 if (slab) { 3715 if (kmem_cache_debug(s)) { 3716 freelist = pc.object; 3717 /* 3718 * For debug caches here we had to go through 3719 * alloc_single_from_partial() so just store the 3720 * tracking info and return the object. 3721 */ 3722 if (s->flags & SLAB_STORE_USER) 3723 set_track(s, freelist, TRACK_ALLOC, addr); 3724 3725 return freelist; 3726 } 3727 3728 freelist = freeze_slab(s, slab); 3729 goto retry_load_slab; 3730 } 3731 3732 slub_put_cpu_ptr(s->cpu_slab); 3733 slab = new_slab(s, pc.flags, node); 3734 c = slub_get_cpu_ptr(s->cpu_slab); 3735 3736 if (unlikely(!slab)) { 3737 if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE) 3738 && try_thisnode) { 3739 try_thisnode = false; 3740 goto new_objects; 3741 } 3742 slab_out_of_memory(s, gfpflags, node); 3743 return NULL; 3744 } 3745 3746 stat(s, ALLOC_SLAB); 3747 3748 if (kmem_cache_debug(s)) { 3749 freelist = alloc_single_from_new_slab(s, slab, orig_size); 3750 3751 if (unlikely(!freelist)) 3752 goto new_objects; 3753 3754 if (s->flags & SLAB_STORE_USER) 3755 set_track(s, freelist, TRACK_ALLOC, addr); 3756 3757 return freelist; 3758 } 3759 3760 /* 3761 * No other reference to the slab yet so we can 3762 * muck around with it freely without cmpxchg 3763 */ 3764 freelist = slab->freelist; 3765 slab->freelist = NULL; 3766 slab->inuse = slab->objects; 3767 slab->frozen = 1; 3768 3769 inc_slabs_node(s, slab_nid(slab), slab->objects); 3770 3771 if (unlikely(!pfmemalloc_match(slab, gfpflags))) { 3772 /* 3773 * For !pfmemalloc_match() case we don't load freelist so that 3774 * we don't make further mismatched allocations easier. 3775 */ 3776 deactivate_slab(s, slab, get_freepointer(s, freelist)); 3777 return freelist; 3778 } 3779 3780 retry_load_slab: 3781 3782 local_lock_irqsave(&s->cpu_slab->lock, flags); 3783 if (unlikely(c->slab)) { 3784 void *flush_freelist = c->freelist; 3785 struct slab *flush_slab = c->slab; 3786 3787 c->slab = NULL; 3788 c->freelist = NULL; 3789 c->tid = next_tid(c->tid); 3790 3791 local_unlock_irqrestore(&s->cpu_slab->lock, flags); 3792 3793 deactivate_slab(s, flush_slab, flush_freelist); 3794 3795 stat(s, CPUSLAB_FLUSH); 3796 3797 goto retry_load_slab; 3798 } 3799 c->slab = slab; 3800 3801 goto load_freelist; 3802 } 3803 3804 /* 3805 * A wrapper for ___slab_alloc() for contexts where preemption is not yet 3806 * disabled. Compensates for possible cpu changes by refetching the per cpu area 3807 * pointer. 3808 */ 3809 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 3810 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size) 3811 { 3812 void *p; 3813 3814 #ifdef CONFIG_PREEMPT_COUNT 3815 /* 3816 * We may have been preempted and rescheduled on a different 3817 * cpu before disabling preemption. Need to reload cpu area 3818 * pointer. 3819 */ 3820 c = slub_get_cpu_ptr(s->cpu_slab); 3821 #endif 3822 3823 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); 3824 #ifdef CONFIG_PREEMPT_COUNT 3825 slub_put_cpu_ptr(s->cpu_slab); 3826 #endif 3827 return p; 3828 } 3829 3830 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, 3831 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3832 { 3833 struct kmem_cache_cpu *c; 3834 struct slab *slab; 3835 unsigned long tid; 3836 void *object; 3837 3838 redo: 3839 /* 3840 * Must read kmem_cache cpu data via this cpu ptr. Preemption is 3841 * enabled. We may switch back and forth between cpus while 3842 * reading from one cpu area. That does not matter as long 3843 * as we end up on the original cpu again when doing the cmpxchg. 3844 * 3845 * We must guarantee that tid and kmem_cache_cpu are retrieved on the 3846 * same cpu. We read first the kmem_cache_cpu pointer and use it to read 3847 * the tid. If we are preempted and switched to another cpu between the 3848 * two reads, it's OK as the two are still associated with the same cpu 3849 * and cmpxchg later will validate the cpu. 3850 */ 3851 c = raw_cpu_ptr(s->cpu_slab); 3852 tid = READ_ONCE(c->tid); 3853 3854 /* 3855 * Irqless object alloc/free algorithm used here depends on sequence 3856 * of fetching cpu_slab's data. tid should be fetched before anything 3857 * on c to guarantee that object and slab associated with previous tid 3858 * won't be used with current tid. If we fetch tid first, object and 3859 * slab could be one associated with next tid and our alloc/free 3860 * request will be failed. In this case, we will retry. So, no problem. 3861 */ 3862 barrier(); 3863 3864 /* 3865 * The transaction ids are globally unique per cpu and per operation on 3866 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double 3867 * occurs on the right processor and that there was no operation on the 3868 * linked list in between. 3869 */ 3870 3871 object = c->freelist; 3872 slab = c->slab; 3873 3874 if (!USE_LOCKLESS_FAST_PATH() || 3875 unlikely(!object || !slab || !node_match(slab, node))) { 3876 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); 3877 } else { 3878 void *next_object = get_freepointer_safe(s, object); 3879 3880 /* 3881 * The cmpxchg will only match if there was no additional 3882 * operation and if we are on the right processor. 3883 * 3884 * The cmpxchg does the following atomically (without lock 3885 * semantics!) 3886 * 1. Relocate first pointer to the current per cpu area. 3887 * 2. Verify that tid and freelist have not been changed 3888 * 3. If they were not changed replace tid and freelist 3889 * 3890 * Since this is without lock semantics the protection is only 3891 * against code executing on this cpu *not* from access by 3892 * other cpus. 3893 */ 3894 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { 3895 note_cmpxchg_failure("slab_alloc", s, tid); 3896 goto redo; 3897 } 3898 prefetch_freepointer(s, next_object); 3899 stat(s, ALLOC_FASTPATH); 3900 } 3901 3902 return object; 3903 } 3904 #else /* CONFIG_SLUB_TINY */ 3905 static void *__slab_alloc_node(struct kmem_cache *s, 3906 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 3907 { 3908 struct partial_context pc; 3909 struct slab *slab; 3910 void *object; 3911 3912 pc.flags = gfpflags; 3913 pc.orig_size = orig_size; 3914 slab = get_partial(s, node, &pc); 3915 3916 if (slab) 3917 return pc.object; 3918 3919 slab = new_slab(s, gfpflags, node); 3920 if (unlikely(!slab)) { 3921 slab_out_of_memory(s, gfpflags, node); 3922 return NULL; 3923 } 3924 3925 object = alloc_single_from_new_slab(s, slab, orig_size); 3926 3927 return object; 3928 } 3929 #endif /* CONFIG_SLUB_TINY */ 3930 3931 /* 3932 * If the object has been wiped upon free, make sure it's fully initialized by 3933 * zeroing out freelist pointer. 3934 */ 3935 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, 3936 void *obj) 3937 { 3938 if (unlikely(slab_want_init_on_free(s)) && obj && 3939 !freeptr_outside_object(s)) 3940 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 3941 0, sizeof(void *)); 3942 } 3943 3944 static __fastpath_inline 3945 struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) 3946 { 3947 flags &= gfp_allowed_mask; 3948 3949 might_alloc(flags); 3950 3951 if (unlikely(should_failslab(s, flags))) 3952 return NULL; 3953 3954 return s; 3955 } 3956 3957 static __fastpath_inline 3958 bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 3959 gfp_t flags, size_t size, void **p, bool init, 3960 unsigned int orig_size) 3961 { 3962 unsigned int zero_size = s->object_size; 3963 bool kasan_init = init; 3964 size_t i; 3965 gfp_t init_flags = flags & gfp_allowed_mask; 3966 3967 /* 3968 * For kmalloc object, the allocated memory size(object_size) is likely 3969 * larger than the requested size(orig_size). If redzone check is 3970 * enabled for the extra space, don't zero it, as it will be redzoned 3971 * soon. The redzone operation for this extra space could be seen as a 3972 * replacement of current poisoning under certain debug option, and 3973 * won't break other sanity checks. 3974 */ 3975 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && 3976 (s->flags & SLAB_KMALLOC)) 3977 zero_size = orig_size; 3978 3979 /* 3980 * When slab_debug is enabled, avoid memory initialization integrated 3981 * into KASAN and instead zero out the memory via the memset below with 3982 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and 3983 * cause false-positive reports. This does not lead to a performance 3984 * penalty on production builds, as slab_debug is not intended to be 3985 * enabled there. 3986 */ 3987 if (__slub_debug_enabled()) 3988 kasan_init = false; 3989 3990 /* 3991 * As memory initialization might be integrated into KASAN, 3992 * kasan_slab_alloc and initialization memset must be 3993 * kept together to avoid discrepancies in behavior. 3994 * 3995 * As p[i] might get tagged, memset and kmemleak hook come after KASAN. 3996 */ 3997 for (i = 0; i < size; i++) { 3998 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init); 3999 if (p[i] && init && (!kasan_init || 4000 !kasan_has_integrated_init())) 4001 memset(p[i], 0, zero_size); 4002 kmemleak_alloc_recursive(p[i], s->object_size, 1, 4003 s->flags, init_flags); 4004 kmsan_slab_alloc(s, p[i], init_flags); 4005 alloc_tagging_slab_alloc_hook(s, p[i], flags); 4006 } 4007 4008 return memcg_slab_post_alloc_hook(s, lru, flags, size, p); 4009 } 4010 4011 /* 4012 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 4013 * have the fastpath folded into their functions. So no function call 4014 * overhead for requests that can be satisfied on the fastpath. 4015 * 4016 * The fastpath works by first checking if the lockless freelist can be used. 4017 * If not then __slab_alloc is called for slow processing. 4018 * 4019 * Otherwise we can simply pick the next object from the lockless free list. 4020 */ 4021 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, 4022 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) 4023 { 4024 void *object; 4025 bool init = false; 4026 4027 s = slab_pre_alloc_hook(s, gfpflags); 4028 if (unlikely(!s)) 4029 return NULL; 4030 4031 object = kfence_alloc(s, orig_size, gfpflags); 4032 if (unlikely(object)) 4033 goto out; 4034 4035 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); 4036 4037 maybe_wipe_obj_freeptr(s, object); 4038 init = slab_want_init_on_alloc(gfpflags, s); 4039 4040 out: 4041 /* 4042 * When init equals 'true', like for kzalloc() family, only 4043 * @orig_size bytes might be zeroed instead of s->object_size 4044 * In case this fails due to memcg_slab_post_alloc_hook(), 4045 * object is set to NULL 4046 */ 4047 slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size); 4048 4049 return object; 4050 } 4051 4052 void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags) 4053 { 4054 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, 4055 s->object_size); 4056 4057 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4058 4059 return ret; 4060 } 4061 EXPORT_SYMBOL(kmem_cache_alloc_noprof); 4062 4063 void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, 4064 gfp_t gfpflags) 4065 { 4066 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, 4067 s->object_size); 4068 4069 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); 4070 4071 return ret; 4072 } 4073 EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof); 4074 4075 /** 4076 * kmem_cache_alloc_node - Allocate an object on the specified node 4077 * @s: The cache to allocate from. 4078 * @gfpflags: See kmalloc(). 4079 * @node: node number of the target node. 4080 * 4081 * Identical to kmem_cache_alloc but it will allocate memory on the given 4082 * node, which can improve the performance for cpu bound structures. 4083 * 4084 * Fallback to other node is possible if __GFP_THISNODE is not set. 4085 * 4086 * Return: pointer to the new object or %NULL in case of error 4087 */ 4088 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node) 4089 { 4090 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); 4091 4092 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); 4093 4094 return ret; 4095 } 4096 EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); 4097 4098 /* 4099 * To avoid unnecessary overhead, we pass through large allocation requests 4100 * directly to the page allocator. We use __GFP_COMP, because we will need to 4101 * know the allocation order to free the pages properly in kfree. 4102 */ 4103 static void *___kmalloc_large_node(size_t size, gfp_t flags, int node) 4104 { 4105 struct folio *folio; 4106 void *ptr = NULL; 4107 unsigned int order = get_order(size); 4108 4109 if (unlikely(flags & GFP_SLAB_BUG_MASK)) 4110 flags = kmalloc_fix_flags(flags); 4111 4112 flags |= __GFP_COMP; 4113 folio = (struct folio *)alloc_pages_node_noprof(node, flags, order); 4114 if (folio) { 4115 ptr = folio_address(folio); 4116 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4117 PAGE_SIZE << order); 4118 } 4119 4120 ptr = kasan_kmalloc_large(ptr, size, flags); 4121 /* As ptr might get tagged, call kmemleak hook after KASAN. */ 4122 kmemleak_alloc(ptr, size, 1, flags); 4123 kmsan_kmalloc_large(ptr, size, flags); 4124 4125 return ptr; 4126 } 4127 4128 void *__kmalloc_large_noprof(size_t size, gfp_t flags) 4129 { 4130 void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE); 4131 4132 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4133 flags, NUMA_NO_NODE); 4134 return ret; 4135 } 4136 EXPORT_SYMBOL(__kmalloc_large_noprof); 4137 4138 void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) 4139 { 4140 void *ret = ___kmalloc_large_node(size, flags, node); 4141 4142 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), 4143 flags, node); 4144 return ret; 4145 } 4146 EXPORT_SYMBOL(__kmalloc_large_node_noprof); 4147 4148 static __always_inline 4149 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node, 4150 unsigned long caller) 4151 { 4152 struct kmem_cache *s; 4153 void *ret; 4154 4155 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { 4156 ret = __kmalloc_large_node_noprof(size, flags, node); 4157 trace_kmalloc(caller, ret, size, 4158 PAGE_SIZE << get_order(size), flags, node); 4159 return ret; 4160 } 4161 4162 if (unlikely(!size)) 4163 return ZERO_SIZE_PTR; 4164 4165 s = kmalloc_slab(size, b, flags, caller); 4166 4167 ret = slab_alloc_node(s, NULL, flags, node, caller, size); 4168 ret = kasan_kmalloc(s, ret, size, flags); 4169 trace_kmalloc(caller, ret, size, s->size, flags, node); 4170 return ret; 4171 } 4172 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) 4173 { 4174 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_); 4175 } 4176 EXPORT_SYMBOL(__kmalloc_node_noprof); 4177 4178 void *__kmalloc_noprof(size_t size, gfp_t flags) 4179 { 4180 return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_); 4181 } 4182 EXPORT_SYMBOL(__kmalloc_noprof); 4183 4184 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, 4185 int node, unsigned long caller) 4186 { 4187 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller); 4188 4189 } 4190 EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof); 4191 4192 void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size) 4193 { 4194 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, 4195 _RET_IP_, size); 4196 4197 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); 4198 4199 ret = kasan_kmalloc(s, ret, size, gfpflags); 4200 return ret; 4201 } 4202 EXPORT_SYMBOL(__kmalloc_cache_noprof); 4203 4204 void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, 4205 int node, size_t size) 4206 { 4207 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); 4208 4209 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); 4210 4211 ret = kasan_kmalloc(s, ret, size, gfpflags); 4212 return ret; 4213 } 4214 EXPORT_SYMBOL(__kmalloc_cache_node_noprof); 4215 4216 static noinline void free_to_partial_list( 4217 struct kmem_cache *s, struct slab *slab, 4218 void *head, void *tail, int bulk_cnt, 4219 unsigned long addr) 4220 { 4221 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); 4222 struct slab *slab_free = NULL; 4223 int cnt = bulk_cnt; 4224 unsigned long flags; 4225 depot_stack_handle_t handle = 0; 4226 4227 if (s->flags & SLAB_STORE_USER) 4228 handle = set_track_prepare(); 4229 4230 spin_lock_irqsave(&n->list_lock, flags); 4231 4232 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { 4233 void *prior = slab->freelist; 4234 4235 /* Perform the actual freeing while we still hold the locks */ 4236 slab->inuse -= cnt; 4237 set_freepointer(s, tail, prior); 4238 slab->freelist = head; 4239 4240 /* 4241 * If the slab is empty, and node's partial list is full, 4242 * it should be discarded anyway no matter it's on full or 4243 * partial list. 4244 */ 4245 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) 4246 slab_free = slab; 4247 4248 if (!prior) { 4249 /* was on full list */ 4250 remove_full(s, n, slab); 4251 if (!slab_free) { 4252 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4253 stat(s, FREE_ADD_PARTIAL); 4254 } 4255 } else if (slab_free) { 4256 remove_partial(n, slab); 4257 stat(s, FREE_REMOVE_PARTIAL); 4258 } 4259 } 4260 4261 if (slab_free) { 4262 /* 4263 * Update the counters while still holding n->list_lock to 4264 * prevent spurious validation warnings 4265 */ 4266 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); 4267 } 4268 4269 spin_unlock_irqrestore(&n->list_lock, flags); 4270 4271 if (slab_free) { 4272 stat(s, FREE_SLAB); 4273 free_slab(s, slab_free); 4274 } 4275 } 4276 4277 /* 4278 * Slow path handling. This may still be called frequently since objects 4279 * have a longer lifetime than the cpu slabs in most processing loads. 4280 * 4281 * So we still attempt to reduce cache line usage. Just take the slab 4282 * lock and free the item. If there is no additional partial slab 4283 * handling required then we can return immediately. 4284 */ 4285 static void __slab_free(struct kmem_cache *s, struct slab *slab, 4286 void *head, void *tail, int cnt, 4287 unsigned long addr) 4288 4289 { 4290 void *prior; 4291 int was_frozen; 4292 struct slab new; 4293 unsigned long counters; 4294 struct kmem_cache_node *n = NULL; 4295 unsigned long flags; 4296 bool on_node_partial; 4297 4298 stat(s, FREE_SLOWPATH); 4299 4300 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { 4301 free_to_partial_list(s, slab, head, tail, cnt, addr); 4302 return; 4303 } 4304 4305 do { 4306 if (unlikely(n)) { 4307 spin_unlock_irqrestore(&n->list_lock, flags); 4308 n = NULL; 4309 } 4310 prior = slab->freelist; 4311 counters = slab->counters; 4312 set_freepointer(s, tail, prior); 4313 new.counters = counters; 4314 was_frozen = new.frozen; 4315 new.inuse -= cnt; 4316 if ((!new.inuse || !prior) && !was_frozen) { 4317 /* Needs to be taken off a list */ 4318 if (!kmem_cache_has_cpu_partial(s) || prior) { 4319 4320 n = get_node(s, slab_nid(slab)); 4321 /* 4322 * Speculatively acquire the list_lock. 4323 * If the cmpxchg does not succeed then we may 4324 * drop the list_lock without any processing. 4325 * 4326 * Otherwise the list_lock will synchronize with 4327 * other processors updating the list of slabs. 4328 */ 4329 spin_lock_irqsave(&n->list_lock, flags); 4330 4331 on_node_partial = slab_test_node_partial(slab); 4332 } 4333 } 4334 4335 } while (!slab_update_freelist(s, slab, 4336 prior, counters, 4337 head, new.counters, 4338 "__slab_free")); 4339 4340 if (likely(!n)) { 4341 4342 if (likely(was_frozen)) { 4343 /* 4344 * The list lock was not taken therefore no list 4345 * activity can be necessary. 4346 */ 4347 stat(s, FREE_FROZEN); 4348 } else if (kmem_cache_has_cpu_partial(s) && !prior) { 4349 /* 4350 * If we started with a full slab then put it onto the 4351 * per cpu partial list. 4352 */ 4353 put_cpu_partial(s, slab, 1); 4354 stat(s, CPU_PARTIAL_FREE); 4355 } 4356 4357 return; 4358 } 4359 4360 /* 4361 * This slab was partially empty but not on the per-node partial list, 4362 * in which case we shouldn't manipulate its list, just return. 4363 */ 4364 if (prior && !on_node_partial) { 4365 spin_unlock_irqrestore(&n->list_lock, flags); 4366 return; 4367 } 4368 4369 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) 4370 goto slab_empty; 4371 4372 /* 4373 * Objects left in the slab. If it was not on the partial list before 4374 * then add it. 4375 */ 4376 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { 4377 add_partial(n, slab, DEACTIVATE_TO_TAIL); 4378 stat(s, FREE_ADD_PARTIAL); 4379 } 4380 spin_unlock_irqrestore(&n->list_lock, flags); 4381 return; 4382 4383 slab_empty: 4384 if (prior) { 4385 /* 4386 * Slab on the partial list. 4387 */ 4388 remove_partial(n, slab); 4389 stat(s, FREE_REMOVE_PARTIAL); 4390 } 4391 4392 spin_unlock_irqrestore(&n->list_lock, flags); 4393 stat(s, FREE_SLAB); 4394 discard_slab(s, slab); 4395 } 4396 4397 #ifndef CONFIG_SLUB_TINY 4398 /* 4399 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that 4400 * can perform fastpath freeing without additional function calls. 4401 * 4402 * The fastpath is only possible if we are freeing to the current cpu slab 4403 * of this processor. This typically the case if we have just allocated 4404 * the item before. 4405 * 4406 * If fastpath is not possible then fall back to __slab_free where we deal 4407 * with all sorts of special processing. 4408 * 4409 * Bulk free of a freelist with several objects (all pointing to the 4410 * same slab) possible by specifying head and tail ptr, plus objects 4411 * count (cnt). Bulk free indicated by tail pointer being set. 4412 */ 4413 static __always_inline void do_slab_free(struct kmem_cache *s, 4414 struct slab *slab, void *head, void *tail, 4415 int cnt, unsigned long addr) 4416 { 4417 struct kmem_cache_cpu *c; 4418 unsigned long tid; 4419 void **freelist; 4420 4421 redo: 4422 /* 4423 * Determine the currently cpus per cpu slab. 4424 * The cpu may change afterward. However that does not matter since 4425 * data is retrieved via this pointer. If we are on the same cpu 4426 * during the cmpxchg then the free will succeed. 4427 */ 4428 c = raw_cpu_ptr(s->cpu_slab); 4429 tid = READ_ONCE(c->tid); 4430 4431 /* Same with comment on barrier() in __slab_alloc_node() */ 4432 barrier(); 4433 4434 if (unlikely(slab != c->slab)) { 4435 __slab_free(s, slab, head, tail, cnt, addr); 4436 return; 4437 } 4438 4439 if (USE_LOCKLESS_FAST_PATH()) { 4440 freelist = READ_ONCE(c->freelist); 4441 4442 set_freepointer(s, tail, freelist); 4443 4444 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { 4445 note_cmpxchg_failure("slab_free", s, tid); 4446 goto redo; 4447 } 4448 } else { 4449 /* Update the free list under the local lock */ 4450 local_lock(&s->cpu_slab->lock); 4451 c = this_cpu_ptr(s->cpu_slab); 4452 if (unlikely(slab != c->slab)) { 4453 local_unlock(&s->cpu_slab->lock); 4454 goto redo; 4455 } 4456 tid = c->tid; 4457 freelist = c->freelist; 4458 4459 set_freepointer(s, tail, freelist); 4460 c->freelist = head; 4461 c->tid = next_tid(tid); 4462 4463 local_unlock(&s->cpu_slab->lock); 4464 } 4465 stat_add(s, FREE_FASTPATH, cnt); 4466 } 4467 #else /* CONFIG_SLUB_TINY */ 4468 static void do_slab_free(struct kmem_cache *s, 4469 struct slab *slab, void *head, void *tail, 4470 int cnt, unsigned long addr) 4471 { 4472 __slab_free(s, slab, head, tail, cnt, addr); 4473 } 4474 #endif /* CONFIG_SLUB_TINY */ 4475 4476 static __fastpath_inline 4477 void slab_free(struct kmem_cache *s, struct slab *slab, void *object, 4478 unsigned long addr) 4479 { 4480 memcg_slab_free_hook(s, slab, &object, 1); 4481 alloc_tagging_slab_free_hook(s, slab, &object, 1); 4482 4483 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) 4484 do_slab_free(s, slab, object, object, 1, addr); 4485 } 4486 4487 #ifdef CONFIG_MEMCG 4488 /* Do not inline the rare memcg charging failed path into the allocation path */ 4489 static noinline 4490 void memcg_alloc_abort_single(struct kmem_cache *s, void *object) 4491 { 4492 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) 4493 do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_); 4494 } 4495 #endif 4496 4497 static __fastpath_inline 4498 void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, 4499 void *tail, void **p, int cnt, unsigned long addr) 4500 { 4501 memcg_slab_free_hook(s, slab, p, cnt); 4502 alloc_tagging_slab_free_hook(s, slab, p, cnt); 4503 /* 4504 * With KASAN enabled slab_free_freelist_hook modifies the freelist 4505 * to remove objects, whose reuse must be delayed. 4506 */ 4507 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt))) 4508 do_slab_free(s, slab, head, tail, cnt, addr); 4509 } 4510 4511 #ifdef CONFIG_KASAN_GENERIC 4512 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) 4513 { 4514 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr); 4515 } 4516 #endif 4517 4518 static inline struct kmem_cache *virt_to_cache(const void *obj) 4519 { 4520 struct slab *slab; 4521 4522 slab = virt_to_slab(obj); 4523 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__)) 4524 return NULL; 4525 return slab->slab_cache; 4526 } 4527 4528 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 4529 { 4530 struct kmem_cache *cachep; 4531 4532 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && 4533 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) 4534 return s; 4535 4536 cachep = virt_to_cache(x); 4537 if (WARN(cachep && cachep != s, 4538 "%s: Wrong slab cache. %s but object is from %s\n", 4539 __func__, s->name, cachep->name)) 4540 print_tracking(cachep, x); 4541 return cachep; 4542 } 4543 4544 /** 4545 * kmem_cache_free - Deallocate an object 4546 * @s: The cache the allocation was from. 4547 * @x: The previously allocated object. 4548 * 4549 * Free an object which was previously allocated from this 4550 * cache. 4551 */ 4552 void kmem_cache_free(struct kmem_cache *s, void *x) 4553 { 4554 s = cache_from_obj(s, x); 4555 if (!s) 4556 return; 4557 trace_kmem_cache_free(_RET_IP_, x, s); 4558 slab_free(s, virt_to_slab(x), x, _RET_IP_); 4559 } 4560 EXPORT_SYMBOL(kmem_cache_free); 4561 4562 static void free_large_kmalloc(struct folio *folio, void *object) 4563 { 4564 unsigned int order = folio_order(folio); 4565 4566 if (WARN_ON_ONCE(order == 0)) 4567 pr_warn_once("object pointer: 0x%p\n", object); 4568 4569 kmemleak_free(object); 4570 kasan_kfree_large(object); 4571 kmsan_kfree_large(object); 4572 4573 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, 4574 -(PAGE_SIZE << order)); 4575 folio_put(folio); 4576 } 4577 4578 /** 4579 * kfree - free previously allocated memory 4580 * @object: pointer returned by kmalloc() or kmem_cache_alloc() 4581 * 4582 * If @object is NULL, no operation is performed. 4583 */ 4584 void kfree(const void *object) 4585 { 4586 struct folio *folio; 4587 struct slab *slab; 4588 struct kmem_cache *s; 4589 void *x = (void *)object; 4590 4591 trace_kfree(_RET_IP_, object); 4592 4593 if (unlikely(ZERO_OR_NULL_PTR(object))) 4594 return; 4595 4596 folio = virt_to_folio(object); 4597 if (unlikely(!folio_test_slab(folio))) { 4598 free_large_kmalloc(folio, (void *)object); 4599 return; 4600 } 4601 4602 slab = folio_slab(folio); 4603 s = slab->slab_cache; 4604 slab_free(s, slab, x, _RET_IP_); 4605 } 4606 EXPORT_SYMBOL(kfree); 4607 4608 struct detached_freelist { 4609 struct slab *slab; 4610 void *tail; 4611 void *freelist; 4612 int cnt; 4613 struct kmem_cache *s; 4614 }; 4615 4616 /* 4617 * This function progressively scans the array with free objects (with 4618 * a limited look ahead) and extract objects belonging to the same 4619 * slab. It builds a detached freelist directly within the given 4620 * slab/objects. This can happen without any need for 4621 * synchronization, because the objects are owned by running process. 4622 * The freelist is build up as a single linked list in the objects. 4623 * The idea is, that this detached freelist can then be bulk 4624 * transferred to the real freelist(s), but only requiring a single 4625 * synchronization primitive. Look ahead in the array is limited due 4626 * to performance reasons. 4627 */ 4628 static inline 4629 int build_detached_freelist(struct kmem_cache *s, size_t size, 4630 void **p, struct detached_freelist *df) 4631 { 4632 int lookahead = 3; 4633 void *object; 4634 struct folio *folio; 4635 size_t same; 4636 4637 object = p[--size]; 4638 folio = virt_to_folio(object); 4639 if (!s) { 4640 /* Handle kalloc'ed objects */ 4641 if (unlikely(!folio_test_slab(folio))) { 4642 free_large_kmalloc(folio, object); 4643 df->slab = NULL; 4644 return size; 4645 } 4646 /* Derive kmem_cache from object */ 4647 df->slab = folio_slab(folio); 4648 df->s = df->slab->slab_cache; 4649 } else { 4650 df->slab = folio_slab(folio); 4651 df->s = cache_from_obj(s, object); /* Support for memcg */ 4652 } 4653 4654 /* Start new detached freelist */ 4655 df->tail = object; 4656 df->freelist = object; 4657 df->cnt = 1; 4658 4659 if (is_kfence_address(object)) 4660 return size; 4661 4662 set_freepointer(df->s, object, NULL); 4663 4664 same = size; 4665 while (size) { 4666 object = p[--size]; 4667 /* df->slab is always set at this point */ 4668 if (df->slab == virt_to_slab(object)) { 4669 /* Opportunity build freelist */ 4670 set_freepointer(df->s, object, df->freelist); 4671 df->freelist = object; 4672 df->cnt++; 4673 same--; 4674 if (size != same) 4675 swap(p[size], p[same]); 4676 continue; 4677 } 4678 4679 /* Limit look ahead search */ 4680 if (!--lookahead) 4681 break; 4682 } 4683 4684 return same; 4685 } 4686 4687 /* 4688 * Internal bulk free of objects that were not initialised by the post alloc 4689 * hooks and thus should not be processed by the free hooks 4690 */ 4691 static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4692 { 4693 if (!size) 4694 return; 4695 4696 do { 4697 struct detached_freelist df; 4698 4699 size = build_detached_freelist(s, size, p, &df); 4700 if (!df.slab) 4701 continue; 4702 4703 if (kfence_free(df.freelist)) 4704 continue; 4705 4706 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, 4707 _RET_IP_); 4708 } while (likely(size)); 4709 } 4710 4711 /* Note that interrupts must be enabled when calling this function. */ 4712 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) 4713 { 4714 if (!size) 4715 return; 4716 4717 do { 4718 struct detached_freelist df; 4719 4720 size = build_detached_freelist(s, size, p, &df); 4721 if (!df.slab) 4722 continue; 4723 4724 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size], 4725 df.cnt, _RET_IP_); 4726 } while (likely(size)); 4727 } 4728 EXPORT_SYMBOL(kmem_cache_free_bulk); 4729 4730 #ifndef CONFIG_SLUB_TINY 4731 static inline 4732 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 4733 void **p) 4734 { 4735 struct kmem_cache_cpu *c; 4736 unsigned long irqflags; 4737 int i; 4738 4739 /* 4740 * Drain objects in the per cpu slab, while disabling local 4741 * IRQs, which protects against PREEMPT and interrupts 4742 * handlers invoking normal fastpath. 4743 */ 4744 c = slub_get_cpu_ptr(s->cpu_slab); 4745 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4746 4747 for (i = 0; i < size; i++) { 4748 void *object = kfence_alloc(s, s->object_size, flags); 4749 4750 if (unlikely(object)) { 4751 p[i] = object; 4752 continue; 4753 } 4754 4755 object = c->freelist; 4756 if (unlikely(!object)) { 4757 /* 4758 * We may have removed an object from c->freelist using 4759 * the fastpath in the previous iteration; in that case, 4760 * c->tid has not been bumped yet. 4761 * Since ___slab_alloc() may reenable interrupts while 4762 * allocating memory, we should bump c->tid now. 4763 */ 4764 c->tid = next_tid(c->tid); 4765 4766 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4767 4768 /* 4769 * Invoking slow path likely have side-effect 4770 * of re-populating per CPU c->freelist 4771 */ 4772 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, 4773 _RET_IP_, c, s->object_size); 4774 if (unlikely(!p[i])) 4775 goto error; 4776 4777 c = this_cpu_ptr(s->cpu_slab); 4778 maybe_wipe_obj_freeptr(s, p[i]); 4779 4780 local_lock_irqsave(&s->cpu_slab->lock, irqflags); 4781 4782 continue; /* goto for-loop */ 4783 } 4784 c->freelist = get_freepointer(s, object); 4785 p[i] = object; 4786 maybe_wipe_obj_freeptr(s, p[i]); 4787 stat(s, ALLOC_FASTPATH); 4788 } 4789 c->tid = next_tid(c->tid); 4790 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); 4791 slub_put_cpu_ptr(s->cpu_slab); 4792 4793 return i; 4794 4795 error: 4796 slub_put_cpu_ptr(s->cpu_slab); 4797 __kmem_cache_free_bulk(s, i, p); 4798 return 0; 4799 4800 } 4801 #else /* CONFIG_SLUB_TINY */ 4802 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, 4803 size_t size, void **p) 4804 { 4805 int i; 4806 4807 for (i = 0; i < size; i++) { 4808 void *object = kfence_alloc(s, s->object_size, flags); 4809 4810 if (unlikely(object)) { 4811 p[i] = object; 4812 continue; 4813 } 4814 4815 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, 4816 _RET_IP_, s->object_size); 4817 if (unlikely(!p[i])) 4818 goto error; 4819 4820 maybe_wipe_obj_freeptr(s, p[i]); 4821 } 4822 4823 return i; 4824 4825 error: 4826 __kmem_cache_free_bulk(s, i, p); 4827 return 0; 4828 } 4829 #endif /* CONFIG_SLUB_TINY */ 4830 4831 /* Note that interrupts must be enabled when calling this function. */ 4832 int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, 4833 void **p) 4834 { 4835 int i; 4836 4837 if (!size) 4838 return 0; 4839 4840 s = slab_pre_alloc_hook(s, flags); 4841 if (unlikely(!s)) 4842 return 0; 4843 4844 i = __kmem_cache_alloc_bulk(s, flags, size, p); 4845 if (unlikely(i == 0)) 4846 return 0; 4847 4848 /* 4849 * memcg and kmem_cache debug support and memory initialization. 4850 * Done outside of the IRQ disabled fastpath loop. 4851 */ 4852 if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p, 4853 slab_want_init_on_alloc(flags, s), s->object_size))) { 4854 return 0; 4855 } 4856 return i; 4857 } 4858 EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof); 4859 4860 4861 /* 4862 * Object placement in a slab is made very easy because we always start at 4863 * offset 0. If we tune the size of the object to the alignment then we can 4864 * get the required alignment by putting one properly sized object after 4865 * another. 4866 * 4867 * Notice that the allocation order determines the sizes of the per cpu 4868 * caches. Each processor has always one slab available for allocations. 4869 * Increasing the allocation order reduces the number of times that slabs 4870 * must be moved on and off the partial lists and is therefore a factor in 4871 * locking overhead. 4872 */ 4873 4874 /* 4875 * Minimum / Maximum order of slab pages. This influences locking overhead 4876 * and slab fragmentation. A higher order reduces the number of partial slabs 4877 * and increases the number of allocations possible without having to 4878 * take the list_lock. 4879 */ 4880 static unsigned int slub_min_order; 4881 static unsigned int slub_max_order = 4882 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; 4883 static unsigned int slub_min_objects; 4884 4885 /* 4886 * Calculate the order of allocation given an slab object size. 4887 * 4888 * The order of allocation has significant impact on performance and other 4889 * system components. Generally order 0 allocations should be preferred since 4890 * order 0 does not cause fragmentation in the page allocator. Larger objects 4891 * be problematic to put into order 0 slabs because there may be too much 4892 * unused space left. We go to a higher order if more than 1/16th of the slab 4893 * would be wasted. 4894 * 4895 * In order to reach satisfactory performance we must ensure that a minimum 4896 * number of objects is in one slab. Otherwise we may generate too much 4897 * activity on the partial lists which requires taking the list_lock. This is 4898 * less a concern for large slabs though which are rarely used. 4899 * 4900 * slab_max_order specifies the order where we begin to stop considering the 4901 * number of objects in a slab as critical. If we reach slab_max_order then 4902 * we try to keep the page order as low as possible. So we accept more waste 4903 * of space in favor of a small page order. 4904 * 4905 * Higher order allocations also allow the placement of more objects in a 4906 * slab and thereby reduce object handling overhead. If the user has 4907 * requested a higher minimum order then we start with that one instead of 4908 * the smallest order which will fit the object. 4909 */ 4910 static inline unsigned int calc_slab_order(unsigned int size, 4911 unsigned int min_order, unsigned int max_order, 4912 unsigned int fract_leftover) 4913 { 4914 unsigned int order; 4915 4916 for (order = min_order; order <= max_order; order++) { 4917 4918 unsigned int slab_size = (unsigned int)PAGE_SIZE << order; 4919 unsigned int rem; 4920 4921 rem = slab_size % size; 4922 4923 if (rem <= slab_size / fract_leftover) 4924 break; 4925 } 4926 4927 return order; 4928 } 4929 4930 static inline int calculate_order(unsigned int size) 4931 { 4932 unsigned int order; 4933 unsigned int min_objects; 4934 unsigned int max_objects; 4935 unsigned int min_order; 4936 4937 min_objects = slub_min_objects; 4938 if (!min_objects) { 4939 /* 4940 * Some architectures will only update present cpus when 4941 * onlining them, so don't trust the number if it's just 1. But 4942 * we also don't want to use nr_cpu_ids always, as on some other 4943 * architectures, there can be many possible cpus, but never 4944 * onlined. Here we compromise between trying to avoid too high 4945 * order on systems that appear larger than they are, and too 4946 * low order on systems that appear smaller than they are. 4947 */ 4948 unsigned int nr_cpus = num_present_cpus(); 4949 if (nr_cpus <= 1) 4950 nr_cpus = nr_cpu_ids; 4951 min_objects = 4 * (fls(nr_cpus) + 1); 4952 } 4953 /* min_objects can't be 0 because get_order(0) is undefined */ 4954 max_objects = max(order_objects(slub_max_order, size), 1U); 4955 min_objects = min(min_objects, max_objects); 4956 4957 min_order = max_t(unsigned int, slub_min_order, 4958 get_order(min_objects * size)); 4959 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) 4960 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 4961 4962 /* 4963 * Attempt to find best configuration for a slab. This works by first 4964 * attempting to generate a layout with the best possible configuration 4965 * and backing off gradually. 4966 * 4967 * We start with accepting at most 1/16 waste and try to find the 4968 * smallest order from min_objects-derived/slab_min_order up to 4969 * slab_max_order that will satisfy the constraint. Note that increasing 4970 * the order can only result in same or less fractional waste, not more. 4971 * 4972 * If that fails, we increase the acceptable fraction of waste and try 4973 * again. The last iteration with fraction of 1/2 would effectively 4974 * accept any waste and give us the order determined by min_objects, as 4975 * long as at least single object fits within slab_max_order. 4976 */ 4977 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) { 4978 order = calc_slab_order(size, min_order, slub_max_order, 4979 fraction); 4980 if (order <= slub_max_order) 4981 return order; 4982 } 4983 4984 /* 4985 * Doh this slab cannot be placed using slab_max_order. 4986 */ 4987 order = get_order(size); 4988 if (order <= MAX_PAGE_ORDER) 4989 return order; 4990 return -ENOSYS; 4991 } 4992 4993 static void 4994 init_kmem_cache_node(struct kmem_cache_node *n) 4995 { 4996 n->nr_partial = 0; 4997 spin_lock_init(&n->list_lock); 4998 INIT_LIST_HEAD(&n->partial); 4999 #ifdef CONFIG_SLUB_DEBUG 5000 atomic_long_set(&n->nr_slabs, 0); 5001 atomic_long_set(&n->total_objects, 0); 5002 INIT_LIST_HEAD(&n->full); 5003 #endif 5004 } 5005 5006 #ifndef CONFIG_SLUB_TINY 5007 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 5008 { 5009 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 5010 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH * 5011 sizeof(struct kmem_cache_cpu)); 5012 5013 /* 5014 * Must align to double word boundary for the double cmpxchg 5015 * instructions to work; see __pcpu_double_call_return_bool(). 5016 */ 5017 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 5018 2 * sizeof(void *)); 5019 5020 if (!s->cpu_slab) 5021 return 0; 5022 5023 init_kmem_cache_cpus(s); 5024 5025 return 1; 5026 } 5027 #else 5028 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) 5029 { 5030 return 1; 5031 } 5032 #endif /* CONFIG_SLUB_TINY */ 5033 5034 static struct kmem_cache *kmem_cache_node; 5035 5036 /* 5037 * No kmalloc_node yet so do it by hand. We know that this is the first 5038 * slab on the node for this slabcache. There are no concurrent accesses 5039 * possible. 5040 * 5041 * Note that this function only works on the kmem_cache_node 5042 * when allocating for the kmem_cache_node. This is used for bootstrapping 5043 * memory on a fresh node that has no slab structures yet. 5044 */ 5045 static void early_kmem_cache_node_alloc(int node) 5046 { 5047 struct slab *slab; 5048 struct kmem_cache_node *n; 5049 5050 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); 5051 5052 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); 5053 5054 BUG_ON(!slab); 5055 if (slab_nid(slab) != node) { 5056 pr_err("SLUB: Unable to allocate memory from node %d\n", node); 5057 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); 5058 } 5059 5060 n = slab->freelist; 5061 BUG_ON(!n); 5062 #ifdef CONFIG_SLUB_DEBUG 5063 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 5064 #endif 5065 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); 5066 slab->freelist = get_freepointer(kmem_cache_node, n); 5067 slab->inuse = 1; 5068 kmem_cache_node->node[node] = n; 5069 init_kmem_cache_node(n); 5070 inc_slabs_node(kmem_cache_node, node, slab->objects); 5071 5072 /* 5073 * No locks need to be taken here as it has just been 5074 * initialized and there is no concurrent access. 5075 */ 5076 __add_partial(n, slab, DEACTIVATE_TO_HEAD); 5077 } 5078 5079 static void free_kmem_cache_nodes(struct kmem_cache *s) 5080 { 5081 int node; 5082 struct kmem_cache_node *n; 5083 5084 for_each_kmem_cache_node(s, node, n) { 5085 s->node[node] = NULL; 5086 kmem_cache_free(kmem_cache_node, n); 5087 } 5088 } 5089 5090 void __kmem_cache_release(struct kmem_cache *s) 5091 { 5092 cache_random_seq_destroy(s); 5093 #ifndef CONFIG_SLUB_TINY 5094 free_percpu(s->cpu_slab); 5095 #endif 5096 free_kmem_cache_nodes(s); 5097 } 5098 5099 static int init_kmem_cache_nodes(struct kmem_cache *s) 5100 { 5101 int node; 5102 5103 for_each_node_mask(node, slab_nodes) { 5104 struct kmem_cache_node *n; 5105 5106 if (slab_state == DOWN) { 5107 early_kmem_cache_node_alloc(node); 5108 continue; 5109 } 5110 n = kmem_cache_alloc_node(kmem_cache_node, 5111 GFP_KERNEL, node); 5112 5113 if (!n) { 5114 free_kmem_cache_nodes(s); 5115 return 0; 5116 } 5117 5118 init_kmem_cache_node(n); 5119 s->node[node] = n; 5120 } 5121 return 1; 5122 } 5123 5124 static void set_cpu_partial(struct kmem_cache *s) 5125 { 5126 #ifdef CONFIG_SLUB_CPU_PARTIAL 5127 unsigned int nr_objects; 5128 5129 /* 5130 * cpu_partial determined the maximum number of objects kept in the 5131 * per cpu partial lists of a processor. 5132 * 5133 * Per cpu partial lists mainly contain slabs that just have one 5134 * object freed. If they are used for allocation then they can be 5135 * filled up again with minimal effort. The slab will never hit the 5136 * per node partial lists and therefore no locking will be required. 5137 * 5138 * For backwards compatibility reasons, this is determined as number 5139 * of objects, even though we now limit maximum number of pages, see 5140 * slub_set_cpu_partial() 5141 */ 5142 if (!kmem_cache_has_cpu_partial(s)) 5143 nr_objects = 0; 5144 else if (s->size >= PAGE_SIZE) 5145 nr_objects = 6; 5146 else if (s->size >= 1024) 5147 nr_objects = 24; 5148 else if (s->size >= 256) 5149 nr_objects = 52; 5150 else 5151 nr_objects = 120; 5152 5153 slub_set_cpu_partial(s, nr_objects); 5154 #endif 5155 } 5156 5157 /* 5158 * calculate_sizes() determines the order and the distribution of data within 5159 * a slab object. 5160 */ 5161 static int calculate_sizes(struct kmem_cache *s) 5162 { 5163 slab_flags_t flags = s->flags; 5164 unsigned int size = s->object_size; 5165 unsigned int order; 5166 5167 /* 5168 * Round up object size to the next word boundary. We can only 5169 * place the free pointer at word boundaries and this determines 5170 * the possible location of the free pointer. 5171 */ 5172 size = ALIGN(size, sizeof(void *)); 5173 5174 #ifdef CONFIG_SLUB_DEBUG 5175 /* 5176 * Determine if we can poison the object itself. If the user of 5177 * the slab may touch the object after free or before allocation 5178 * then we should never poison the object itself. 5179 */ 5180 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) && 5181 !s->ctor) 5182 s->flags |= __OBJECT_POISON; 5183 else 5184 s->flags &= ~__OBJECT_POISON; 5185 5186 5187 /* 5188 * If we are Redzoning then check if there is some space between the 5189 * end of the object and the free pointer. If not then add an 5190 * additional word to have some bytes to store Redzone information. 5191 */ 5192 if ((flags & SLAB_RED_ZONE) && size == s->object_size) 5193 size += sizeof(void *); 5194 #endif 5195 5196 /* 5197 * With that we have determined the number of bytes in actual use 5198 * by the object and redzoning. 5199 */ 5200 s->inuse = size; 5201 5202 if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor || 5203 ((flags & SLAB_RED_ZONE) && 5204 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) { 5205 /* 5206 * Relocate free pointer after the object if it is not 5207 * permitted to overwrite the first word of the object on 5208 * kmem_cache_free. 5209 * 5210 * This is the case if we do RCU, have a constructor or 5211 * destructor, are poisoning the objects, or are 5212 * redzoning an object smaller than sizeof(void *) or are 5213 * redzoning an object with slub_debug_orig_size() enabled, 5214 * in which case the right redzone may be extended. 5215 * 5216 * The assumption that s->offset >= s->inuse means free 5217 * pointer is outside of the object is used in the 5218 * freeptr_outside_object() function. If that is no 5219 * longer true, the function needs to be modified. 5220 */ 5221 s->offset = size; 5222 size += sizeof(void *); 5223 } else { 5224 /* 5225 * Store freelist pointer near middle of object to keep 5226 * it away from the edges of the object to avoid small 5227 * sized over/underflows from neighboring allocations. 5228 */ 5229 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); 5230 } 5231 5232 #ifdef CONFIG_SLUB_DEBUG 5233 if (flags & SLAB_STORE_USER) { 5234 /* 5235 * Need to store information about allocs and frees after 5236 * the object. 5237 */ 5238 size += 2 * sizeof(struct track); 5239 5240 /* Save the original kmalloc request size */ 5241 if (flags & SLAB_KMALLOC) 5242 size += sizeof(unsigned int); 5243 } 5244 #endif 5245 5246 kasan_cache_create(s, &size, &s->flags); 5247 #ifdef CONFIG_SLUB_DEBUG 5248 if (flags & SLAB_RED_ZONE) { 5249 /* 5250 * Add some empty padding so that we can catch 5251 * overwrites from earlier objects rather than let 5252 * tracking information or the free pointer be 5253 * corrupted if a user writes before the start 5254 * of the object. 5255 */ 5256 size += sizeof(void *); 5257 5258 s->red_left_pad = sizeof(void *); 5259 s->red_left_pad = ALIGN(s->red_left_pad, s->align); 5260 size += s->red_left_pad; 5261 } 5262 #endif 5263 5264 /* 5265 * SLUB stores one object immediately after another beginning from 5266 * offset 0. In order to align the objects we have to simply size 5267 * each object to conform to the alignment. 5268 */ 5269 size = ALIGN(size, s->align); 5270 s->size = size; 5271 s->reciprocal_size = reciprocal_value(size); 5272 order = calculate_order(size); 5273 5274 if ((int)order < 0) 5275 return 0; 5276 5277 s->allocflags = __GFP_COMP; 5278 5279 if (s->flags & SLAB_CACHE_DMA) 5280 s->allocflags |= GFP_DMA; 5281 5282 if (s->flags & SLAB_CACHE_DMA32) 5283 s->allocflags |= GFP_DMA32; 5284 5285 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5286 s->allocflags |= __GFP_RECLAIMABLE; 5287 5288 /* 5289 * Determine the number of objects per slab 5290 */ 5291 s->oo = oo_make(order, size); 5292 s->min = oo_make(get_order(size), size); 5293 5294 return !!oo_objects(s->oo); 5295 } 5296 5297 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 5298 { 5299 s->flags = kmem_cache_flags(flags, s->name); 5300 #ifdef CONFIG_SLAB_FREELIST_HARDENED 5301 s->random = get_random_long(); 5302 #endif 5303 5304 if (!calculate_sizes(s)) 5305 goto error; 5306 if (disable_higher_order_debug) { 5307 /* 5308 * Disable debugging flags that store metadata if the min slab 5309 * order increased. 5310 */ 5311 if (get_order(s->size) > get_order(s->object_size)) { 5312 s->flags &= ~DEBUG_METADATA_FLAGS; 5313 s->offset = 0; 5314 if (!calculate_sizes(s)) 5315 goto error; 5316 } 5317 } 5318 5319 #ifdef system_has_freelist_aba 5320 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { 5321 /* Enable fast mode */ 5322 s->flags |= __CMPXCHG_DOUBLE; 5323 } 5324 #endif 5325 5326 /* 5327 * The larger the object size is, the more slabs we want on the partial 5328 * list to avoid pounding the page allocator excessively. 5329 */ 5330 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); 5331 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); 5332 5333 set_cpu_partial(s); 5334 5335 #ifdef CONFIG_NUMA 5336 s->remote_node_defrag_ratio = 1000; 5337 #endif 5338 5339 /* Initialize the pre-computed randomized freelist if slab is up */ 5340 if (slab_state >= UP) { 5341 if (init_cache_random_seq(s)) 5342 goto error; 5343 } 5344 5345 if (!init_kmem_cache_nodes(s)) 5346 goto error; 5347 5348 if (alloc_kmem_cache_cpus(s)) 5349 return 0; 5350 5351 error: 5352 __kmem_cache_release(s); 5353 return -EINVAL; 5354 } 5355 5356 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, 5357 const char *text) 5358 { 5359 #ifdef CONFIG_SLUB_DEBUG 5360 void *addr = slab_address(slab); 5361 void *p; 5362 5363 slab_err(s, slab, text, s->name); 5364 5365 spin_lock(&object_map_lock); 5366 __fill_map(object_map, s, slab); 5367 5368 for_each_object(p, s, addr, slab->objects) { 5369 5370 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { 5371 pr_err("Object 0x%p @offset=%tu\n", p, p - addr); 5372 print_tracking(s, p); 5373 } 5374 } 5375 spin_unlock(&object_map_lock); 5376 #endif 5377 } 5378 5379 /* 5380 * Attempt to free all partial slabs on a node. 5381 * This is called from __kmem_cache_shutdown(). We must take list_lock 5382 * because sysfs file might still access partial list after the shutdowning. 5383 */ 5384 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 5385 { 5386 LIST_HEAD(discard); 5387 struct slab *slab, *h; 5388 5389 BUG_ON(irqs_disabled()); 5390 spin_lock_irq(&n->list_lock); 5391 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { 5392 if (!slab->inuse) { 5393 remove_partial(n, slab); 5394 list_add(&slab->slab_list, &discard); 5395 } else { 5396 list_slab_objects(s, slab, 5397 "Objects remaining in %s on __kmem_cache_shutdown()"); 5398 } 5399 } 5400 spin_unlock_irq(&n->list_lock); 5401 5402 list_for_each_entry_safe(slab, h, &discard, slab_list) 5403 discard_slab(s, slab); 5404 } 5405 5406 bool __kmem_cache_empty(struct kmem_cache *s) 5407 { 5408 int node; 5409 struct kmem_cache_node *n; 5410 5411 for_each_kmem_cache_node(s, node, n) 5412 if (n->nr_partial || node_nr_slabs(n)) 5413 return false; 5414 return true; 5415 } 5416 5417 /* 5418 * Release all resources used by a slab cache. 5419 */ 5420 int __kmem_cache_shutdown(struct kmem_cache *s) 5421 { 5422 int node; 5423 struct kmem_cache_node *n; 5424 5425 flush_all_cpus_locked(s); 5426 /* Attempt to free all objects */ 5427 for_each_kmem_cache_node(s, node, n) { 5428 free_partial(s, n); 5429 if (n->nr_partial || node_nr_slabs(n)) 5430 return 1; 5431 } 5432 return 0; 5433 } 5434 5435 #ifdef CONFIG_PRINTK 5436 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) 5437 { 5438 void *base; 5439 int __maybe_unused i; 5440 unsigned int objnr; 5441 void *objp; 5442 void *objp0; 5443 struct kmem_cache *s = slab->slab_cache; 5444 struct track __maybe_unused *trackp; 5445 5446 kpp->kp_ptr = object; 5447 kpp->kp_slab = slab; 5448 kpp->kp_slab_cache = s; 5449 base = slab_address(slab); 5450 objp0 = kasan_reset_tag(object); 5451 #ifdef CONFIG_SLUB_DEBUG 5452 objp = restore_red_left(s, objp0); 5453 #else 5454 objp = objp0; 5455 #endif 5456 objnr = obj_to_index(s, slab, objp); 5457 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); 5458 objp = base + s->size * objnr; 5459 kpp->kp_objp = objp; 5460 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size 5461 || (objp - base) % s->size) || 5462 !(s->flags & SLAB_STORE_USER)) 5463 return; 5464 #ifdef CONFIG_SLUB_DEBUG 5465 objp = fixup_red_left(s, objp); 5466 trackp = get_track(s, objp, TRACK_ALLOC); 5467 kpp->kp_ret = (void *)trackp->addr; 5468 #ifdef CONFIG_STACKDEPOT 5469 { 5470 depot_stack_handle_t handle; 5471 unsigned long *entries; 5472 unsigned int nr_entries; 5473 5474 handle = READ_ONCE(trackp->handle); 5475 if (handle) { 5476 nr_entries = stack_depot_fetch(handle, &entries); 5477 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5478 kpp->kp_stack[i] = (void *)entries[i]; 5479 } 5480 5481 trackp = get_track(s, objp, TRACK_FREE); 5482 handle = READ_ONCE(trackp->handle); 5483 if (handle) { 5484 nr_entries = stack_depot_fetch(handle, &entries); 5485 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) 5486 kpp->kp_free_stack[i] = (void *)entries[i]; 5487 } 5488 } 5489 #endif 5490 #endif 5491 } 5492 #endif 5493 5494 /******************************************************************** 5495 * Kmalloc subsystem 5496 *******************************************************************/ 5497 5498 static int __init setup_slub_min_order(char *str) 5499 { 5500 get_option(&str, (int *)&slub_min_order); 5501 5502 if (slub_min_order > slub_max_order) 5503 slub_max_order = slub_min_order; 5504 5505 return 1; 5506 } 5507 5508 __setup("slab_min_order=", setup_slub_min_order); 5509 __setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0); 5510 5511 5512 static int __init setup_slub_max_order(char *str) 5513 { 5514 get_option(&str, (int *)&slub_max_order); 5515 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER); 5516 5517 if (slub_min_order > slub_max_order) 5518 slub_min_order = slub_max_order; 5519 5520 return 1; 5521 } 5522 5523 __setup("slab_max_order=", setup_slub_max_order); 5524 __setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0); 5525 5526 static int __init setup_slub_min_objects(char *str) 5527 { 5528 get_option(&str, (int *)&slub_min_objects); 5529 5530 return 1; 5531 } 5532 5533 __setup("slab_min_objects=", setup_slub_min_objects); 5534 __setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0); 5535 5536 #ifdef CONFIG_HARDENED_USERCOPY 5537 /* 5538 * Rejects incorrectly sized objects and objects that are to be copied 5539 * to/from userspace but do not fall entirely within the containing slab 5540 * cache's usercopy region. 5541 * 5542 * Returns NULL if check passes, otherwise const char * to name of cache 5543 * to indicate an error. 5544 */ 5545 void __check_heap_object(const void *ptr, unsigned long n, 5546 const struct slab *slab, bool to_user) 5547 { 5548 struct kmem_cache *s; 5549 unsigned int offset; 5550 bool is_kfence = is_kfence_address(ptr); 5551 5552 ptr = kasan_reset_tag(ptr); 5553 5554 /* Find object and usable object size. */ 5555 s = slab->slab_cache; 5556 5557 /* Reject impossible pointers. */ 5558 if (ptr < slab_address(slab)) 5559 usercopy_abort("SLUB object not in SLUB page?!", NULL, 5560 to_user, 0, n); 5561 5562 /* Find offset within object. */ 5563 if (is_kfence) 5564 offset = ptr - kfence_object_start(ptr); 5565 else 5566 offset = (ptr - slab_address(slab)) % s->size; 5567 5568 /* Adjust for redzone and reject if within the redzone. */ 5569 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { 5570 if (offset < s->red_left_pad) 5571 usercopy_abort("SLUB object in left red zone", 5572 s->name, to_user, offset, n); 5573 offset -= s->red_left_pad; 5574 } 5575 5576 /* Allow address range falling entirely within usercopy region. */ 5577 if (offset >= s->useroffset && 5578 offset - s->useroffset <= s->usersize && 5579 n <= s->useroffset - offset + s->usersize) 5580 return; 5581 5582 usercopy_abort("SLUB object", s->name, to_user, offset, n); 5583 } 5584 #endif /* CONFIG_HARDENED_USERCOPY */ 5585 5586 #define SHRINK_PROMOTE_MAX 32 5587 5588 /* 5589 * kmem_cache_shrink discards empty slabs and promotes the slabs filled 5590 * up most to the head of the partial lists. New allocations will then 5591 * fill those up and thus they can be removed from the partial lists. 5592 * 5593 * The slabs with the least items are placed last. This results in them 5594 * being allocated from last increasing the chance that the last objects 5595 * are freed in them. 5596 */ 5597 static int __kmem_cache_do_shrink(struct kmem_cache *s) 5598 { 5599 int node; 5600 int i; 5601 struct kmem_cache_node *n; 5602 struct slab *slab; 5603 struct slab *t; 5604 struct list_head discard; 5605 struct list_head promote[SHRINK_PROMOTE_MAX]; 5606 unsigned long flags; 5607 int ret = 0; 5608 5609 for_each_kmem_cache_node(s, node, n) { 5610 INIT_LIST_HEAD(&discard); 5611 for (i = 0; i < SHRINK_PROMOTE_MAX; i++) 5612 INIT_LIST_HEAD(promote + i); 5613 5614 spin_lock_irqsave(&n->list_lock, flags); 5615 5616 /* 5617 * Build lists of slabs to discard or promote. 5618 * 5619 * Note that concurrent frees may occur while we hold the 5620 * list_lock. slab->inuse here is the upper limit. 5621 */ 5622 list_for_each_entry_safe(slab, t, &n->partial, slab_list) { 5623 int free = slab->objects - slab->inuse; 5624 5625 /* Do not reread slab->inuse */ 5626 barrier(); 5627 5628 /* We do not keep full slabs on the list */ 5629 BUG_ON(free <= 0); 5630 5631 if (free == slab->objects) { 5632 list_move(&slab->slab_list, &discard); 5633 slab_clear_node_partial(slab); 5634 n->nr_partial--; 5635 dec_slabs_node(s, node, slab->objects); 5636 } else if (free <= SHRINK_PROMOTE_MAX) 5637 list_move(&slab->slab_list, promote + free - 1); 5638 } 5639 5640 /* 5641 * Promote the slabs filled up most to the head of the 5642 * partial list. 5643 */ 5644 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) 5645 list_splice(promote + i, &n->partial); 5646 5647 spin_unlock_irqrestore(&n->list_lock, flags); 5648 5649 /* Release empty slabs */ 5650 list_for_each_entry_safe(slab, t, &discard, slab_list) 5651 free_slab(s, slab); 5652 5653 if (node_nr_slabs(n)) 5654 ret = 1; 5655 } 5656 5657 return ret; 5658 } 5659 5660 int __kmem_cache_shrink(struct kmem_cache *s) 5661 { 5662 flush_all(s); 5663 return __kmem_cache_do_shrink(s); 5664 } 5665 5666 static int slab_mem_going_offline_callback(void *arg) 5667 { 5668 struct kmem_cache *s; 5669 5670 mutex_lock(&slab_mutex); 5671 list_for_each_entry(s, &slab_caches, list) { 5672 flush_all_cpus_locked(s); 5673 __kmem_cache_do_shrink(s); 5674 } 5675 mutex_unlock(&slab_mutex); 5676 5677 return 0; 5678 } 5679 5680 static void slab_mem_offline_callback(void *arg) 5681 { 5682 struct memory_notify *marg = arg; 5683 int offline_node; 5684 5685 offline_node = marg->status_change_nid_normal; 5686 5687 /* 5688 * If the node still has available memory. we need kmem_cache_node 5689 * for it yet. 5690 */ 5691 if (offline_node < 0) 5692 return; 5693 5694 mutex_lock(&slab_mutex); 5695 node_clear(offline_node, slab_nodes); 5696 /* 5697 * We no longer free kmem_cache_node structures here, as it would be 5698 * racy with all get_node() users, and infeasible to protect them with 5699 * slab_mutex. 5700 */ 5701 mutex_unlock(&slab_mutex); 5702 } 5703 5704 static int slab_mem_going_online_callback(void *arg) 5705 { 5706 struct kmem_cache_node *n; 5707 struct kmem_cache *s; 5708 struct memory_notify *marg = arg; 5709 int nid = marg->status_change_nid_normal; 5710 int ret = 0; 5711 5712 /* 5713 * If the node's memory is already available, then kmem_cache_node is 5714 * already created. Nothing to do. 5715 */ 5716 if (nid < 0) 5717 return 0; 5718 5719 /* 5720 * We are bringing a node online. No memory is available yet. We must 5721 * allocate a kmem_cache_node structure in order to bring the node 5722 * online. 5723 */ 5724 mutex_lock(&slab_mutex); 5725 list_for_each_entry(s, &slab_caches, list) { 5726 /* 5727 * The structure may already exist if the node was previously 5728 * onlined and offlined. 5729 */ 5730 if (get_node(s, nid)) 5731 continue; 5732 /* 5733 * XXX: kmem_cache_alloc_node will fallback to other nodes 5734 * since memory is not yet available from the node that 5735 * is brought up. 5736 */ 5737 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); 5738 if (!n) { 5739 ret = -ENOMEM; 5740 goto out; 5741 } 5742 init_kmem_cache_node(n); 5743 s->node[nid] = n; 5744 } 5745 /* 5746 * Any cache created after this point will also have kmem_cache_node 5747 * initialized for the new node. 5748 */ 5749 node_set(nid, slab_nodes); 5750 out: 5751 mutex_unlock(&slab_mutex); 5752 return ret; 5753 } 5754 5755 static int slab_memory_callback(struct notifier_block *self, 5756 unsigned long action, void *arg) 5757 { 5758 int ret = 0; 5759 5760 switch (action) { 5761 case MEM_GOING_ONLINE: 5762 ret = slab_mem_going_online_callback(arg); 5763 break; 5764 case MEM_GOING_OFFLINE: 5765 ret = slab_mem_going_offline_callback(arg); 5766 break; 5767 case MEM_OFFLINE: 5768 case MEM_CANCEL_ONLINE: 5769 slab_mem_offline_callback(arg); 5770 break; 5771 case MEM_ONLINE: 5772 case MEM_CANCEL_OFFLINE: 5773 break; 5774 } 5775 if (ret) 5776 ret = notifier_from_errno(ret); 5777 else 5778 ret = NOTIFY_OK; 5779 return ret; 5780 } 5781 5782 /******************************************************************** 5783 * Basic setup of slabs 5784 *******************************************************************/ 5785 5786 /* 5787 * Used for early kmem_cache structures that were allocated using 5788 * the page allocator. Allocate them properly then fix up the pointers 5789 * that may be pointing to the wrong kmem_cache structure. 5790 */ 5791 5792 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) 5793 { 5794 int node; 5795 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 5796 struct kmem_cache_node *n; 5797 5798 memcpy(s, static_cache, kmem_cache->object_size); 5799 5800 /* 5801 * This runs very early, and only the boot processor is supposed to be 5802 * up. Even if it weren't true, IRQs are not up so we couldn't fire 5803 * IPIs around. 5804 */ 5805 __flush_cpu_slab(s, smp_processor_id()); 5806 for_each_kmem_cache_node(s, node, n) { 5807 struct slab *p; 5808 5809 list_for_each_entry(p, &n->partial, slab_list) 5810 p->slab_cache = s; 5811 5812 #ifdef CONFIG_SLUB_DEBUG 5813 list_for_each_entry(p, &n->full, slab_list) 5814 p->slab_cache = s; 5815 #endif 5816 } 5817 list_add(&s->list, &slab_caches); 5818 return s; 5819 } 5820 5821 void __init kmem_cache_init(void) 5822 { 5823 static __initdata struct kmem_cache boot_kmem_cache, 5824 boot_kmem_cache_node; 5825 int node; 5826 5827 if (debug_guardpage_minorder()) 5828 slub_max_order = 0; 5829 5830 /* Print slub debugging pointers without hashing */ 5831 if (__slub_debug_enabled()) 5832 no_hash_pointers_enable(NULL); 5833 5834 kmem_cache_node = &boot_kmem_cache_node; 5835 kmem_cache = &boot_kmem_cache; 5836 5837 /* 5838 * Initialize the nodemask for which we will allocate per node 5839 * structures. Here we don't need taking slab_mutex yet. 5840 */ 5841 for_each_node_state(node, N_NORMAL_MEMORY) 5842 node_set(node, slab_nodes); 5843 5844 create_boot_cache(kmem_cache_node, "kmem_cache_node", 5845 sizeof(struct kmem_cache_node), 5846 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 5847 5848 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); 5849 5850 /* Able to allocate the per node structures */ 5851 slab_state = PARTIAL; 5852 5853 create_boot_cache(kmem_cache, "kmem_cache", 5854 offsetof(struct kmem_cache, node) + 5855 nr_node_ids * sizeof(struct kmem_cache_node *), 5856 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); 5857 5858 kmem_cache = bootstrap(&boot_kmem_cache); 5859 kmem_cache_node = bootstrap(&boot_kmem_cache_node); 5860 5861 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 5862 setup_kmalloc_cache_index_table(); 5863 create_kmalloc_caches(); 5864 5865 /* Setup random freelists for each cache */ 5866 init_freelist_randomization(); 5867 5868 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, 5869 slub_cpu_dead); 5870 5871 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n", 5872 cache_line_size(), 5873 slub_min_order, slub_max_order, slub_min_objects, 5874 nr_cpu_ids, nr_node_ids); 5875 } 5876 5877 void __init kmem_cache_init_late(void) 5878 { 5879 #ifndef CONFIG_SLUB_TINY 5880 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); 5881 WARN_ON(!flushwq); 5882 #endif 5883 } 5884 5885 struct kmem_cache * 5886 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, 5887 slab_flags_t flags, void (*ctor)(void *)) 5888 { 5889 struct kmem_cache *s; 5890 5891 s = find_mergeable(size, align, flags, name, ctor); 5892 if (s) { 5893 if (sysfs_slab_alias(s, name)) 5894 return NULL; 5895 5896 s->refcount++; 5897 5898 /* 5899 * Adjust the object sizes so that we clear 5900 * the complete object on kzalloc. 5901 */ 5902 s->object_size = max(s->object_size, size); 5903 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); 5904 } 5905 5906 return s; 5907 } 5908 5909 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 5910 { 5911 int err; 5912 5913 err = kmem_cache_open(s, flags); 5914 if (err) 5915 return err; 5916 5917 /* Mutex is not taken during early boot */ 5918 if (slab_state <= UP) 5919 return 0; 5920 5921 err = sysfs_slab_add(s); 5922 if (err) { 5923 __kmem_cache_release(s); 5924 return err; 5925 } 5926 5927 if (s->flags & SLAB_STORE_USER) 5928 debugfs_slab_add(s); 5929 5930 return 0; 5931 } 5932 5933 #ifdef SLAB_SUPPORTS_SYSFS 5934 static int count_inuse(struct slab *slab) 5935 { 5936 return slab->inuse; 5937 } 5938 5939 static int count_total(struct slab *slab) 5940 { 5941 return slab->objects; 5942 } 5943 #endif 5944 5945 #ifdef CONFIG_SLUB_DEBUG 5946 static void validate_slab(struct kmem_cache *s, struct slab *slab, 5947 unsigned long *obj_map) 5948 { 5949 void *p; 5950 void *addr = slab_address(slab); 5951 5952 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) 5953 return; 5954 5955 /* Now we know that a valid freelist exists */ 5956 __fill_map(obj_map, s, slab); 5957 for_each_object(p, s, addr, slab->objects) { 5958 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? 5959 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; 5960 5961 if (!check_object(s, slab, p, val)) 5962 break; 5963 } 5964 } 5965 5966 static int validate_slab_node(struct kmem_cache *s, 5967 struct kmem_cache_node *n, unsigned long *obj_map) 5968 { 5969 unsigned long count = 0; 5970 struct slab *slab; 5971 unsigned long flags; 5972 5973 spin_lock_irqsave(&n->list_lock, flags); 5974 5975 list_for_each_entry(slab, &n->partial, slab_list) { 5976 validate_slab(s, slab, obj_map); 5977 count++; 5978 } 5979 if (count != n->nr_partial) { 5980 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n", 5981 s->name, count, n->nr_partial); 5982 slab_add_kunit_errors(); 5983 } 5984 5985 if (!(s->flags & SLAB_STORE_USER)) 5986 goto out; 5987 5988 list_for_each_entry(slab, &n->full, slab_list) { 5989 validate_slab(s, slab, obj_map); 5990 count++; 5991 } 5992 if (count != node_nr_slabs(n)) { 5993 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n", 5994 s->name, count, node_nr_slabs(n)); 5995 slab_add_kunit_errors(); 5996 } 5997 5998 out: 5999 spin_unlock_irqrestore(&n->list_lock, flags); 6000 return count; 6001 } 6002 6003 long validate_slab_cache(struct kmem_cache *s) 6004 { 6005 int node; 6006 unsigned long count = 0; 6007 struct kmem_cache_node *n; 6008 unsigned long *obj_map; 6009 6010 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 6011 if (!obj_map) 6012 return -ENOMEM; 6013 6014 flush_all(s); 6015 for_each_kmem_cache_node(s, node, n) 6016 count += validate_slab_node(s, n, obj_map); 6017 6018 bitmap_free(obj_map); 6019 6020 return count; 6021 } 6022 EXPORT_SYMBOL(validate_slab_cache); 6023 6024 #ifdef CONFIG_DEBUG_FS 6025 /* 6026 * Generate lists of code addresses where slabcache objects are allocated 6027 * and freed. 6028 */ 6029 6030 struct location { 6031 depot_stack_handle_t handle; 6032 unsigned long count; 6033 unsigned long addr; 6034 unsigned long waste; 6035 long long sum_time; 6036 long min_time; 6037 long max_time; 6038 long min_pid; 6039 long max_pid; 6040 DECLARE_BITMAP(cpus, NR_CPUS); 6041 nodemask_t nodes; 6042 }; 6043 6044 struct loc_track { 6045 unsigned long max; 6046 unsigned long count; 6047 struct location *loc; 6048 loff_t idx; 6049 }; 6050 6051 static struct dentry *slab_debugfs_root; 6052 6053 static void free_loc_track(struct loc_track *t) 6054 { 6055 if (t->max) 6056 free_pages((unsigned long)t->loc, 6057 get_order(sizeof(struct location) * t->max)); 6058 } 6059 6060 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) 6061 { 6062 struct location *l; 6063 int order; 6064 6065 order = get_order(sizeof(struct location) * max); 6066 6067 l = (void *)__get_free_pages(flags, order); 6068 if (!l) 6069 return 0; 6070 6071 if (t->count) { 6072 memcpy(l, t->loc, sizeof(struct location) * t->count); 6073 free_loc_track(t); 6074 } 6075 t->max = max; 6076 t->loc = l; 6077 return 1; 6078 } 6079 6080 static int add_location(struct loc_track *t, struct kmem_cache *s, 6081 const struct track *track, 6082 unsigned int orig_size) 6083 { 6084 long start, end, pos; 6085 struct location *l; 6086 unsigned long caddr, chandle, cwaste; 6087 unsigned long age = jiffies - track->when; 6088 depot_stack_handle_t handle = 0; 6089 unsigned int waste = s->object_size - orig_size; 6090 6091 #ifdef CONFIG_STACKDEPOT 6092 handle = READ_ONCE(track->handle); 6093 #endif 6094 start = -1; 6095 end = t->count; 6096 6097 for ( ; ; ) { 6098 pos = start + (end - start + 1) / 2; 6099 6100 /* 6101 * There is nothing at "end". If we end up there 6102 * we need to add something to before end. 6103 */ 6104 if (pos == end) 6105 break; 6106 6107 l = &t->loc[pos]; 6108 caddr = l->addr; 6109 chandle = l->handle; 6110 cwaste = l->waste; 6111 if ((track->addr == caddr) && (handle == chandle) && 6112 (waste == cwaste)) { 6113 6114 l->count++; 6115 if (track->when) { 6116 l->sum_time += age; 6117 if (age < l->min_time) 6118 l->min_time = age; 6119 if (age > l->max_time) 6120 l->max_time = age; 6121 6122 if (track->pid < l->min_pid) 6123 l->min_pid = track->pid; 6124 if (track->pid > l->max_pid) 6125 l->max_pid = track->pid; 6126 6127 cpumask_set_cpu(track->cpu, 6128 to_cpumask(l->cpus)); 6129 } 6130 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6131 return 1; 6132 } 6133 6134 if (track->addr < caddr) 6135 end = pos; 6136 else if (track->addr == caddr && handle < chandle) 6137 end = pos; 6138 else if (track->addr == caddr && handle == chandle && 6139 waste < cwaste) 6140 end = pos; 6141 else 6142 start = pos; 6143 } 6144 6145 /* 6146 * Not found. Insert new tracking element. 6147 */ 6148 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) 6149 return 0; 6150 6151 l = t->loc + pos; 6152 if (pos < t->count) 6153 memmove(l + 1, l, 6154 (t->count - pos) * sizeof(struct location)); 6155 t->count++; 6156 l->count = 1; 6157 l->addr = track->addr; 6158 l->sum_time = age; 6159 l->min_time = age; 6160 l->max_time = age; 6161 l->min_pid = track->pid; 6162 l->max_pid = track->pid; 6163 l->handle = handle; 6164 l->waste = waste; 6165 cpumask_clear(to_cpumask(l->cpus)); 6166 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); 6167 nodes_clear(l->nodes); 6168 node_set(page_to_nid(virt_to_page(track)), l->nodes); 6169 return 1; 6170 } 6171 6172 static void process_slab(struct loc_track *t, struct kmem_cache *s, 6173 struct slab *slab, enum track_item alloc, 6174 unsigned long *obj_map) 6175 { 6176 void *addr = slab_address(slab); 6177 bool is_alloc = (alloc == TRACK_ALLOC); 6178 void *p; 6179 6180 __fill_map(obj_map, s, slab); 6181 6182 for_each_object(p, s, addr, slab->objects) 6183 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) 6184 add_location(t, s, get_track(s, p, alloc), 6185 is_alloc ? get_orig_size(s, p) : 6186 s->object_size); 6187 } 6188 #endif /* CONFIG_DEBUG_FS */ 6189 #endif /* CONFIG_SLUB_DEBUG */ 6190 6191 #ifdef SLAB_SUPPORTS_SYSFS 6192 enum slab_stat_type { 6193 SL_ALL, /* All slabs */ 6194 SL_PARTIAL, /* Only partially allocated slabs */ 6195 SL_CPU, /* Only slabs used for cpu caches */ 6196 SL_OBJECTS, /* Determine allocated objects not slabs */ 6197 SL_TOTAL /* Determine object capacity not slabs */ 6198 }; 6199 6200 #define SO_ALL (1 << SL_ALL) 6201 #define SO_PARTIAL (1 << SL_PARTIAL) 6202 #define SO_CPU (1 << SL_CPU) 6203 #define SO_OBJECTS (1 << SL_OBJECTS) 6204 #define SO_TOTAL (1 << SL_TOTAL) 6205 6206 static ssize_t show_slab_objects(struct kmem_cache *s, 6207 char *buf, unsigned long flags) 6208 { 6209 unsigned long total = 0; 6210 int node; 6211 int x; 6212 unsigned long *nodes; 6213 int len = 0; 6214 6215 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); 6216 if (!nodes) 6217 return -ENOMEM; 6218 6219 if (flags & SO_CPU) { 6220 int cpu; 6221 6222 for_each_possible_cpu(cpu) { 6223 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, 6224 cpu); 6225 int node; 6226 struct slab *slab; 6227 6228 slab = READ_ONCE(c->slab); 6229 if (!slab) 6230 continue; 6231 6232 node = slab_nid(slab); 6233 if (flags & SO_TOTAL) 6234 x = slab->objects; 6235 else if (flags & SO_OBJECTS) 6236 x = slab->inuse; 6237 else 6238 x = 1; 6239 6240 total += x; 6241 nodes[node] += x; 6242 6243 #ifdef CONFIG_SLUB_CPU_PARTIAL 6244 slab = slub_percpu_partial_read_once(c); 6245 if (slab) { 6246 node = slab_nid(slab); 6247 if (flags & SO_TOTAL) 6248 WARN_ON_ONCE(1); 6249 else if (flags & SO_OBJECTS) 6250 WARN_ON_ONCE(1); 6251 else 6252 x = data_race(slab->slabs); 6253 total += x; 6254 nodes[node] += x; 6255 } 6256 #endif 6257 } 6258 } 6259 6260 /* 6261 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex" 6262 * already held which will conflict with an existing lock order: 6263 * 6264 * mem_hotplug_lock->slab_mutex->kernfs_mutex 6265 * 6266 * We don't really need mem_hotplug_lock (to hold off 6267 * slab_mem_going_offline_callback) here because slab's memory hot 6268 * unplug code doesn't destroy the kmem_cache->node[] data. 6269 */ 6270 6271 #ifdef CONFIG_SLUB_DEBUG 6272 if (flags & SO_ALL) { 6273 struct kmem_cache_node *n; 6274 6275 for_each_kmem_cache_node(s, node, n) { 6276 6277 if (flags & SO_TOTAL) 6278 x = node_nr_objs(n); 6279 else if (flags & SO_OBJECTS) 6280 x = node_nr_objs(n) - count_partial(n, count_free); 6281 else 6282 x = node_nr_slabs(n); 6283 total += x; 6284 nodes[node] += x; 6285 } 6286 6287 } else 6288 #endif 6289 if (flags & SO_PARTIAL) { 6290 struct kmem_cache_node *n; 6291 6292 for_each_kmem_cache_node(s, node, n) { 6293 if (flags & SO_TOTAL) 6294 x = count_partial(n, count_total); 6295 else if (flags & SO_OBJECTS) 6296 x = count_partial(n, count_inuse); 6297 else 6298 x = n->nr_partial; 6299 total += x; 6300 nodes[node] += x; 6301 } 6302 } 6303 6304 len += sysfs_emit_at(buf, len, "%lu", total); 6305 #ifdef CONFIG_NUMA 6306 for (node = 0; node < nr_node_ids; node++) { 6307 if (nodes[node]) 6308 len += sysfs_emit_at(buf, len, " N%d=%lu", 6309 node, nodes[node]); 6310 } 6311 #endif 6312 len += sysfs_emit_at(buf, len, "\n"); 6313 kfree(nodes); 6314 6315 return len; 6316 } 6317 6318 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr) 6319 #define to_slab(n) container_of(n, struct kmem_cache, kobj) 6320 6321 struct slab_attribute { 6322 struct attribute attr; 6323 ssize_t (*show)(struct kmem_cache *s, char *buf); 6324 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); 6325 }; 6326 6327 #define SLAB_ATTR_RO(_name) \ 6328 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) 6329 6330 #define SLAB_ATTR(_name) \ 6331 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) 6332 6333 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) 6334 { 6335 return sysfs_emit(buf, "%u\n", s->size); 6336 } 6337 SLAB_ATTR_RO(slab_size); 6338 6339 static ssize_t align_show(struct kmem_cache *s, char *buf) 6340 { 6341 return sysfs_emit(buf, "%u\n", s->align); 6342 } 6343 SLAB_ATTR_RO(align); 6344 6345 static ssize_t object_size_show(struct kmem_cache *s, char *buf) 6346 { 6347 return sysfs_emit(buf, "%u\n", s->object_size); 6348 } 6349 SLAB_ATTR_RO(object_size); 6350 6351 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) 6352 { 6353 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); 6354 } 6355 SLAB_ATTR_RO(objs_per_slab); 6356 6357 static ssize_t order_show(struct kmem_cache *s, char *buf) 6358 { 6359 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); 6360 } 6361 SLAB_ATTR_RO(order); 6362 6363 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) 6364 { 6365 return sysfs_emit(buf, "%lu\n", s->min_partial); 6366 } 6367 6368 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, 6369 size_t length) 6370 { 6371 unsigned long min; 6372 int err; 6373 6374 err = kstrtoul(buf, 10, &min); 6375 if (err) 6376 return err; 6377 6378 s->min_partial = min; 6379 return length; 6380 } 6381 SLAB_ATTR(min_partial); 6382 6383 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) 6384 { 6385 unsigned int nr_partial = 0; 6386 #ifdef CONFIG_SLUB_CPU_PARTIAL 6387 nr_partial = s->cpu_partial; 6388 #endif 6389 6390 return sysfs_emit(buf, "%u\n", nr_partial); 6391 } 6392 6393 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, 6394 size_t length) 6395 { 6396 unsigned int objects; 6397 int err; 6398 6399 err = kstrtouint(buf, 10, &objects); 6400 if (err) 6401 return err; 6402 if (objects && !kmem_cache_has_cpu_partial(s)) 6403 return -EINVAL; 6404 6405 slub_set_cpu_partial(s, objects); 6406 flush_all(s); 6407 return length; 6408 } 6409 SLAB_ATTR(cpu_partial); 6410 6411 static ssize_t ctor_show(struct kmem_cache *s, char *buf) 6412 { 6413 if (!s->ctor) 6414 return 0; 6415 return sysfs_emit(buf, "%pS\n", s->ctor); 6416 } 6417 SLAB_ATTR_RO(ctor); 6418 6419 static ssize_t aliases_show(struct kmem_cache *s, char *buf) 6420 { 6421 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); 6422 } 6423 SLAB_ATTR_RO(aliases); 6424 6425 static ssize_t partial_show(struct kmem_cache *s, char *buf) 6426 { 6427 return show_slab_objects(s, buf, SO_PARTIAL); 6428 } 6429 SLAB_ATTR_RO(partial); 6430 6431 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) 6432 { 6433 return show_slab_objects(s, buf, SO_CPU); 6434 } 6435 SLAB_ATTR_RO(cpu_slabs); 6436 6437 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) 6438 { 6439 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); 6440 } 6441 SLAB_ATTR_RO(objects_partial); 6442 6443 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) 6444 { 6445 int objects = 0; 6446 int slabs = 0; 6447 int cpu __maybe_unused; 6448 int len = 0; 6449 6450 #ifdef CONFIG_SLUB_CPU_PARTIAL 6451 for_each_online_cpu(cpu) { 6452 struct slab *slab; 6453 6454 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6455 6456 if (slab) 6457 slabs += data_race(slab->slabs); 6458 } 6459 #endif 6460 6461 /* Approximate half-full slabs, see slub_set_cpu_partial() */ 6462 objects = (slabs * oo_objects(s->oo)) / 2; 6463 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs); 6464 6465 #ifdef CONFIG_SLUB_CPU_PARTIAL 6466 for_each_online_cpu(cpu) { 6467 struct slab *slab; 6468 6469 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); 6470 if (slab) { 6471 slabs = data_race(slab->slabs); 6472 objects = (slabs * oo_objects(s->oo)) / 2; 6473 len += sysfs_emit_at(buf, len, " C%d=%d(%d)", 6474 cpu, objects, slabs); 6475 } 6476 } 6477 #endif 6478 len += sysfs_emit_at(buf, len, "\n"); 6479 6480 return len; 6481 } 6482 SLAB_ATTR_RO(slabs_cpu_partial); 6483 6484 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) 6485 { 6486 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); 6487 } 6488 SLAB_ATTR_RO(reclaim_account); 6489 6490 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 6491 { 6492 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); 6493 } 6494 SLAB_ATTR_RO(hwcache_align); 6495 6496 #ifdef CONFIG_ZONE_DMA 6497 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) 6498 { 6499 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); 6500 } 6501 SLAB_ATTR_RO(cache_dma); 6502 #endif 6503 6504 #ifdef CONFIG_HARDENED_USERCOPY 6505 static ssize_t usersize_show(struct kmem_cache *s, char *buf) 6506 { 6507 return sysfs_emit(buf, "%u\n", s->usersize); 6508 } 6509 SLAB_ATTR_RO(usersize); 6510 #endif 6511 6512 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) 6513 { 6514 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); 6515 } 6516 SLAB_ATTR_RO(destroy_by_rcu); 6517 6518 #ifdef CONFIG_SLUB_DEBUG 6519 static ssize_t slabs_show(struct kmem_cache *s, char *buf) 6520 { 6521 return show_slab_objects(s, buf, SO_ALL); 6522 } 6523 SLAB_ATTR_RO(slabs); 6524 6525 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) 6526 { 6527 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); 6528 } 6529 SLAB_ATTR_RO(total_objects); 6530 6531 static ssize_t objects_show(struct kmem_cache *s, char *buf) 6532 { 6533 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); 6534 } 6535 SLAB_ATTR_RO(objects); 6536 6537 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) 6538 { 6539 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); 6540 } 6541 SLAB_ATTR_RO(sanity_checks); 6542 6543 static ssize_t trace_show(struct kmem_cache *s, char *buf) 6544 { 6545 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); 6546 } 6547 SLAB_ATTR_RO(trace); 6548 6549 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) 6550 { 6551 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); 6552 } 6553 6554 SLAB_ATTR_RO(red_zone); 6555 6556 static ssize_t poison_show(struct kmem_cache *s, char *buf) 6557 { 6558 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); 6559 } 6560 6561 SLAB_ATTR_RO(poison); 6562 6563 static ssize_t store_user_show(struct kmem_cache *s, char *buf) 6564 { 6565 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); 6566 } 6567 6568 SLAB_ATTR_RO(store_user); 6569 6570 static ssize_t validate_show(struct kmem_cache *s, char *buf) 6571 { 6572 return 0; 6573 } 6574 6575 static ssize_t validate_store(struct kmem_cache *s, 6576 const char *buf, size_t length) 6577 { 6578 int ret = -EINVAL; 6579 6580 if (buf[0] == '1' && kmem_cache_debug(s)) { 6581 ret = validate_slab_cache(s); 6582 if (ret >= 0) 6583 ret = length; 6584 } 6585 return ret; 6586 } 6587 SLAB_ATTR(validate); 6588 6589 #endif /* CONFIG_SLUB_DEBUG */ 6590 6591 #ifdef CONFIG_FAILSLAB 6592 static ssize_t failslab_show(struct kmem_cache *s, char *buf) 6593 { 6594 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); 6595 } 6596 6597 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, 6598 size_t length) 6599 { 6600 if (s->refcount > 1) 6601 return -EINVAL; 6602 6603 if (buf[0] == '1') 6604 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); 6605 else 6606 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); 6607 6608 return length; 6609 } 6610 SLAB_ATTR(failslab); 6611 #endif 6612 6613 static ssize_t shrink_show(struct kmem_cache *s, char *buf) 6614 { 6615 return 0; 6616 } 6617 6618 static ssize_t shrink_store(struct kmem_cache *s, 6619 const char *buf, size_t length) 6620 { 6621 if (buf[0] == '1') 6622 kmem_cache_shrink(s); 6623 else 6624 return -EINVAL; 6625 return length; 6626 } 6627 SLAB_ATTR(shrink); 6628 6629 #ifdef CONFIG_NUMA 6630 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) 6631 { 6632 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); 6633 } 6634 6635 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 6636 const char *buf, size_t length) 6637 { 6638 unsigned int ratio; 6639 int err; 6640 6641 err = kstrtouint(buf, 10, &ratio); 6642 if (err) 6643 return err; 6644 if (ratio > 100) 6645 return -ERANGE; 6646 6647 s->remote_node_defrag_ratio = ratio * 10; 6648 6649 return length; 6650 } 6651 SLAB_ATTR(remote_node_defrag_ratio); 6652 #endif 6653 6654 #ifdef CONFIG_SLUB_STATS 6655 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) 6656 { 6657 unsigned long sum = 0; 6658 int cpu; 6659 int len = 0; 6660 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); 6661 6662 if (!data) 6663 return -ENOMEM; 6664 6665 for_each_online_cpu(cpu) { 6666 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; 6667 6668 data[cpu] = x; 6669 sum += x; 6670 } 6671 6672 len += sysfs_emit_at(buf, len, "%lu", sum); 6673 6674 #ifdef CONFIG_SMP 6675 for_each_online_cpu(cpu) { 6676 if (data[cpu]) 6677 len += sysfs_emit_at(buf, len, " C%d=%u", 6678 cpu, data[cpu]); 6679 } 6680 #endif 6681 kfree(data); 6682 len += sysfs_emit_at(buf, len, "\n"); 6683 6684 return len; 6685 } 6686 6687 static void clear_stat(struct kmem_cache *s, enum stat_item si) 6688 { 6689 int cpu; 6690 6691 for_each_online_cpu(cpu) 6692 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; 6693 } 6694 6695 #define STAT_ATTR(si, text) \ 6696 static ssize_t text##_show(struct kmem_cache *s, char *buf) \ 6697 { \ 6698 return show_stat(s, buf, si); \ 6699 } \ 6700 static ssize_t text##_store(struct kmem_cache *s, \ 6701 const char *buf, size_t length) \ 6702 { \ 6703 if (buf[0] != '') \ 6704 return -EINVAL; \ 6705 clear_stat(s, si); \ 6706 return length; \ 6707 } \ 6708 SLAB_ATTR(text); \ 6709 6710 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); 6711 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); 6712 STAT_ATTR(FREE_FASTPATH, free_fastpath); 6713 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 6714 STAT_ATTR(FREE_FROZEN, free_frozen); 6715 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 6716 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 6717 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); 6718 STAT_ATTR(ALLOC_SLAB, alloc_slab); 6719 STAT_ATTR(ALLOC_REFILL, alloc_refill); 6720 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); 6721 STAT_ATTR(FREE_SLAB, free_slab); 6722 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 6723 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 6724 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 6725 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 6726 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 6727 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 6728 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 6729 STAT_ATTR(ORDER_FALLBACK, order_fallback); 6730 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); 6731 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); 6732 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); 6733 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); 6734 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node); 6735 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain); 6736 #endif /* CONFIG_SLUB_STATS */ 6737 6738 #ifdef CONFIG_KFENCE 6739 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) 6740 { 6741 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); 6742 } 6743 6744 static ssize_t skip_kfence_store(struct kmem_cache *s, 6745 const char *buf, size_t length) 6746 { 6747 int ret = length; 6748 6749 if (buf[0] == '') 6750 s->flags &= ~SLAB_SKIP_KFENCE; 6751 else if (buf[0] == '1') 6752 s->flags |= SLAB_SKIP_KFENCE; 6753 else 6754 ret = -EINVAL; 6755 6756 return ret; 6757 } 6758 SLAB_ATTR(skip_kfence); 6759 #endif 6760 6761 static struct attribute *slab_attrs[] = { 6762 &slab_size_attr.attr, 6763 &object_size_attr.attr, 6764 &objs_per_slab_attr.attr, 6765 &order_attr.attr, 6766 &min_partial_attr.attr, 6767 &cpu_partial_attr.attr, 6768 &objects_partial_attr.attr, 6769 &partial_attr.attr, 6770 &cpu_slabs_attr.attr, 6771 &ctor_attr.attr, 6772 &aliases_attr.attr, 6773 &align_attr.attr, 6774 &hwcache_align_attr.attr, 6775 &reclaim_account_attr.attr, 6776 &destroy_by_rcu_attr.attr, 6777 &shrink_attr.attr, 6778 &slabs_cpu_partial_attr.attr, 6779 #ifdef CONFIG_SLUB_DEBUG 6780 &total_objects_attr.attr, 6781 &objects_attr.attr, 6782 &slabs_attr.attr, 6783 &sanity_checks_attr.attr, 6784 &trace_attr.attr, 6785 &red_zone_attr.attr, 6786 &poison_attr.attr, 6787 &store_user_attr.attr, 6788 &validate_attr.attr, 6789 #endif 6790 #ifdef CONFIG_ZONE_DMA 6791 &cache_dma_attr.attr, 6792 #endif 6793 #ifdef CONFIG_NUMA 6794 &remote_node_defrag_ratio_attr.attr, 6795 #endif 6796 #ifdef CONFIG_SLUB_STATS 6797 &alloc_fastpath_attr.attr, 6798 &alloc_slowpath_attr.attr, 6799 &free_fastpath_attr.attr, 6800 &free_slowpath_attr.attr, 6801 &free_frozen_attr.attr, 6802 &free_add_partial_attr.attr, 6803 &free_remove_partial_attr.attr, 6804 &alloc_from_partial_attr.attr, 6805 &alloc_slab_attr.attr, 6806 &alloc_refill_attr.attr, 6807 &alloc_node_mismatch_attr.attr, 6808 &free_slab_attr.attr, 6809 &cpuslab_flush_attr.attr, 6810 &deactivate_full_attr.attr, 6811 &deactivate_empty_attr.attr, 6812 &deactivate_to_head_attr.attr, 6813 &deactivate_to_tail_attr.attr, 6814 &deactivate_remote_frees_attr.attr, 6815 &deactivate_bypass_attr.attr, 6816 &order_fallback_attr.attr, 6817 &cmpxchg_double_fail_attr.attr, 6818 &cmpxchg_double_cpu_fail_attr.attr, 6819 &cpu_partial_alloc_attr.attr, 6820 &cpu_partial_free_attr.attr, 6821 &cpu_partial_node_attr.attr, 6822 &cpu_partial_drain_attr.attr, 6823 #endif 6824 #ifdef CONFIG_FAILSLAB 6825 &failslab_attr.attr, 6826 #endif 6827 #ifdef CONFIG_HARDENED_USERCOPY 6828 &usersize_attr.attr, 6829 #endif 6830 #ifdef CONFIG_KFENCE 6831 &skip_kfence_attr.attr, 6832 #endif 6833 6834 NULL 6835 }; 6836 6837 static const struct attribute_group slab_attr_group = { 6838 .attrs = slab_attrs, 6839 }; 6840 6841 static ssize_t slab_attr_show(struct kobject *kobj, 6842 struct attribute *attr, 6843 char *buf) 6844 { 6845 struct slab_attribute *attribute; 6846 struct kmem_cache *s; 6847 6848 attribute = to_slab_attr(attr); 6849 s = to_slab(kobj); 6850 6851 if (!attribute->show) 6852 return -EIO; 6853 6854 return attribute->show(s, buf); 6855 } 6856 6857 static ssize_t slab_attr_store(struct kobject *kobj, 6858 struct attribute *attr, 6859 const char *buf, size_t len) 6860 { 6861 struct slab_attribute *attribute; 6862 struct kmem_cache *s; 6863 6864 attribute = to_slab_attr(attr); 6865 s = to_slab(kobj); 6866 6867 if (!attribute->store) 6868 return -EIO; 6869 6870 return attribute->store(s, buf, len); 6871 } 6872 6873 static void kmem_cache_release(struct kobject *k) 6874 { 6875 slab_kmem_cache_release(to_slab(k)); 6876 } 6877 6878 static const struct sysfs_ops slab_sysfs_ops = { 6879 .show = slab_attr_show, 6880 .store = slab_attr_store, 6881 }; 6882 6883 static const struct kobj_type slab_ktype = { 6884 .sysfs_ops = &slab_sysfs_ops, 6885 .release = kmem_cache_release, 6886 }; 6887 6888 static struct kset *slab_kset; 6889 6890 static inline struct kset *cache_kset(struct kmem_cache *s) 6891 { 6892 return slab_kset; 6893 } 6894 6895 #define ID_STR_LENGTH 32 6896 6897 /* Create a unique string id for a slab cache: 6898 * 6899 * Format :[flags-]size 6900 */ 6901 static char *create_unique_id(struct kmem_cache *s) 6902 { 6903 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); 6904 char *p = name; 6905 6906 if (!name) 6907 return ERR_PTR(-ENOMEM); 6908 6909 *p++ = ':'; 6910 /* 6911 * First flags affecting slabcache operations. We will only 6912 * get here for aliasable slabs so we do not need to support 6913 * too many flags. The flags here must cover all flags that 6914 * are matched during merging to guarantee that the id is 6915 * unique. 6916 */ 6917 if (s->flags & SLAB_CACHE_DMA) 6918 *p++ = 'd'; 6919 if (s->flags & SLAB_CACHE_DMA32) 6920 *p++ = 'D'; 6921 if (s->flags & SLAB_RECLAIM_ACCOUNT) 6922 *p++ = 'a'; 6923 if (s->flags & SLAB_CONSISTENCY_CHECKS) 6924 *p++ = 'F'; 6925 if (s->flags & SLAB_ACCOUNT) 6926 *p++ = 'A'; 6927 if (p != name + 1) 6928 *p++ = '-'; 6929 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); 6930 6931 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) { 6932 kfree(name); 6933 return ERR_PTR(-EINVAL); 6934 } 6935 kmsan_unpoison_memory(name, p - name); 6936 return name; 6937 } 6938 6939 static int sysfs_slab_add(struct kmem_cache *s) 6940 { 6941 int err; 6942 const char *name; 6943 struct kset *kset = cache_kset(s); 6944 int unmergeable = slab_unmergeable(s); 6945 6946 if (!unmergeable && disable_higher_order_debug && 6947 (slub_debug & DEBUG_METADATA_FLAGS)) 6948 unmergeable = 1; 6949 6950 if (unmergeable) { 6951 /* 6952 * Slabcache can never be merged so we can use the name proper. 6953 * This is typically the case for debug situations. In that 6954 * case we can catch duplicate names easily. 6955 */ 6956 sysfs_remove_link(&slab_kset->kobj, s->name); 6957 name = s->name; 6958 } else { 6959 /* 6960 * Create a unique name for the slab as a target 6961 * for the symlinks. 6962 */ 6963 name = create_unique_id(s); 6964 if (IS_ERR(name)) 6965 return PTR_ERR(name); 6966 } 6967 6968 s->kobj.kset = kset; 6969 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); 6970 if (err) 6971 goto out; 6972 6973 err = sysfs_create_group(&s->kobj, &slab_attr_group); 6974 if (err) 6975 goto out_del_kobj; 6976 6977 if (!unmergeable) { 6978 /* Setup first alias */ 6979 sysfs_slab_alias(s, s->name); 6980 } 6981 out: 6982 if (!unmergeable) 6983 kfree(name); 6984 return err; 6985 out_del_kobj: 6986 kobject_del(&s->kobj); 6987 goto out; 6988 } 6989 6990 void sysfs_slab_unlink(struct kmem_cache *s) 6991 { 6992 kobject_del(&s->kobj); 6993 } 6994 6995 void sysfs_slab_release(struct kmem_cache *s) 6996 { 6997 kobject_put(&s->kobj); 6998 } 6999 7000 /* 7001 * Need to buffer aliases during bootup until sysfs becomes 7002 * available lest we lose that information. 7003 */ 7004 struct saved_alias { 7005 struct kmem_cache *s; 7006 const char *name; 7007 struct saved_alias *next; 7008 }; 7009 7010 static struct saved_alias *alias_list; 7011 7012 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 7013 { 7014 struct saved_alias *al; 7015 7016 if (slab_state == FULL) { 7017 /* 7018 * If we have a leftover link then remove it. 7019 */ 7020 sysfs_remove_link(&slab_kset->kobj, name); 7021 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); 7022 } 7023 7024 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 7025 if (!al) 7026 return -ENOMEM; 7027 7028 al->s = s; 7029 al->name = name; 7030 al->next = alias_list; 7031 alias_list = al; 7032 kmsan_unpoison_memory(al, sizeof(*al)); 7033 return 0; 7034 } 7035 7036 static int __init slab_sysfs_init(void) 7037 { 7038 struct kmem_cache *s; 7039 int err; 7040 7041 mutex_lock(&slab_mutex); 7042 7043 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj); 7044 if (!slab_kset) { 7045 mutex_unlock(&slab_mutex); 7046 pr_err("Cannot register slab subsystem.\n"); 7047 return -ENOMEM; 7048 } 7049 7050 slab_state = FULL; 7051 7052 list_for_each_entry(s, &slab_caches, list) { 7053 err = sysfs_slab_add(s); 7054 if (err) 7055 pr_err("SLUB: Unable to add boot slab %s to sysfs\n", 7056 s->name); 7057 } 7058 7059 while (alias_list) { 7060 struct saved_alias *al = alias_list; 7061 7062 alias_list = alias_list->next; 7063 err = sysfs_slab_alias(al->s, al->name); 7064 if (err) 7065 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n", 7066 al->name); 7067 kfree(al); 7068 } 7069 7070 mutex_unlock(&slab_mutex); 7071 return 0; 7072 } 7073 late_initcall(slab_sysfs_init); 7074 #endif /* SLAB_SUPPORTS_SYSFS */ 7075 7076 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) 7077 static int slab_debugfs_show(struct seq_file *seq, void *v) 7078 { 7079 struct loc_track *t = seq->private; 7080 struct location *l; 7081 unsigned long idx; 7082 7083 idx = (unsigned long) t->idx; 7084 if (idx < t->count) { 7085 l = &t->loc[idx]; 7086 7087 seq_printf(seq, "%7ld ", l->count); 7088 7089 if (l->addr) 7090 seq_printf(seq, "%pS", (void *)l->addr); 7091 else 7092 seq_puts(seq, "<not-available>"); 7093 7094 if (l->waste) 7095 seq_printf(seq, " waste=%lu/%lu", 7096 l->count * l->waste, l->waste); 7097 7098 if (l->sum_time != l->min_time) { 7099 seq_printf(seq, " age=%ld/%llu/%ld", 7100 l->min_time, div_u64(l->sum_time, l->count), 7101 l->max_time); 7102 } else 7103 seq_printf(seq, " age=%ld", l->min_time); 7104 7105 if (l->min_pid != l->max_pid) 7106 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid); 7107 else 7108 seq_printf(seq, " pid=%ld", 7109 l->min_pid); 7110 7111 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus))) 7112 seq_printf(seq, " cpus=%*pbl", 7113 cpumask_pr_args(to_cpumask(l->cpus))); 7114 7115 if (nr_online_nodes > 1 && !nodes_empty(l->nodes)) 7116 seq_printf(seq, " nodes=%*pbl", 7117 nodemask_pr_args(&l->nodes)); 7118 7119 #ifdef CONFIG_STACKDEPOT 7120 { 7121 depot_stack_handle_t handle; 7122 unsigned long *entries; 7123 unsigned int nr_entries, j; 7124 7125 handle = READ_ONCE(l->handle); 7126 if (handle) { 7127 nr_entries = stack_depot_fetch(handle, &entries); 7128 seq_puts(seq, "\n"); 7129 for (j = 0; j < nr_entries; j++) 7130 seq_printf(seq, " %pS\n", (void *)entries[j]); 7131 } 7132 } 7133 #endif 7134 seq_puts(seq, "\n"); 7135 } 7136 7137 if (!idx && !t->count) 7138 seq_puts(seq, "No data\n"); 7139 7140 return 0; 7141 } 7142 7143 static void slab_debugfs_stop(struct seq_file *seq, void *v) 7144 { 7145 } 7146 7147 static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) 7148 { 7149 struct loc_track *t = seq->private; 7150 7151 t->idx = ++(*ppos); 7152 if (*ppos <= t->count) 7153 return ppos; 7154 7155 return NULL; 7156 } 7157 7158 static int cmp_loc_by_count(const void *a, const void *b, const void *data) 7159 { 7160 struct location *loc1 = (struct location *)a; 7161 struct location *loc2 = (struct location *)b; 7162 7163 if (loc1->count > loc2->count) 7164 return -1; 7165 else 7166 return 1; 7167 } 7168 7169 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) 7170 { 7171 struct loc_track *t = seq->private; 7172 7173 t->idx = *ppos; 7174 return ppos; 7175 } 7176 7177 static const struct seq_operations slab_debugfs_sops = { 7178 .start = slab_debugfs_start, 7179 .next = slab_debugfs_next, 7180 .stop = slab_debugfs_stop, 7181 .show = slab_debugfs_show, 7182 }; 7183 7184 static int slab_debug_trace_open(struct inode *inode, struct file *filep) 7185 { 7186 7187 struct kmem_cache_node *n; 7188 enum track_item alloc; 7189 int node; 7190 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops, 7191 sizeof(struct loc_track)); 7192 struct kmem_cache *s = file_inode(filep)->i_private; 7193 unsigned long *obj_map; 7194 7195 if (!t) 7196 return -ENOMEM; 7197 7198 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); 7199 if (!obj_map) { 7200 seq_release_private(inode, filep); 7201 return -ENOMEM; 7202 } 7203 7204 if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) 7205 alloc = TRACK_ALLOC; 7206 else 7207 alloc = TRACK_FREE; 7208 7209 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { 7210 bitmap_free(obj_map); 7211 seq_release_private(inode, filep); 7212 return -ENOMEM; 7213 } 7214 7215 for_each_kmem_cache_node(s, node, n) { 7216 unsigned long flags; 7217 struct slab *slab; 7218 7219 if (!node_nr_slabs(n)) 7220 continue; 7221 7222 spin_lock_irqsave(&n->list_lock, flags); 7223 list_for_each_entry(slab, &n->partial, slab_list) 7224 process_slab(t, s, slab, alloc, obj_map); 7225 list_for_each_entry(slab, &n->full, slab_list) 7226 process_slab(t, s, slab, alloc, obj_map); 7227 spin_unlock_irqrestore(&n->list_lock, flags); 7228 } 7229 7230 /* Sort locations by count */ 7231 sort_r(t->loc, t->count, sizeof(struct location), 7232 cmp_loc_by_count, NULL, NULL); 7233 7234 bitmap_free(obj_map); 7235 return 0; 7236 } 7237 7238 static int slab_debug_trace_release(struct inode *inode, struct file *file) 7239 { 7240 struct seq_file *seq = file->private_data; 7241 struct loc_track *t = seq->private; 7242 7243 free_loc_track(t); 7244 return seq_release_private(inode, file); 7245 } 7246 7247 static const struct file_operations slab_debugfs_fops = { 7248 .open = slab_debug_trace_open, 7249 .read = seq_read, 7250 .llseek = seq_lseek, 7251 .release = slab_debug_trace_release, 7252 }; 7253 7254 static void debugfs_slab_add(struct kmem_cache *s) 7255 { 7256 struct dentry *slab_cache_dir; 7257 7258 if (unlikely(!slab_debugfs_root)) 7259 return; 7260 7261 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); 7262 7263 debugfs_create_file("alloc_traces", 0400, 7264 slab_cache_dir, s, &slab_debugfs_fops); 7265 7266 debugfs_create_file("free_traces", 0400, 7267 slab_cache_dir, s, &slab_debugfs_fops); 7268 } 7269 7270 void debugfs_slab_release(struct kmem_cache *s) 7271 { 7272 debugfs_lookup_and_remove(s->name, slab_debugfs_root); 7273 } 7274 7275 static int __init slab_debugfs_init(void) 7276 { 7277 struct kmem_cache *s; 7278 7279 slab_debugfs_root = debugfs_create_dir("slab", NULL); 7280 7281 list_for_each_entry(s, &slab_caches, list) 7282 if (s->flags & SLAB_STORE_USER) 7283 debugfs_slab_add(s); 7284 7285 return 0; 7286 7287 } 7288 __initcall(slab_debugfs_init); 7289 #endif 7290 /* 7291 * The /proc/slabinfo ABI 7292 */ 7293 #ifdef CONFIG_SLUB_DEBUG 7294 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) 7295 { 7296 unsigned long nr_slabs = 0; 7297 unsigned long nr_objs = 0; 7298 unsigned long nr_free = 0; 7299 int node; 7300 struct kmem_cache_node *n; 7301 7302 for_each_kmem_cache_node(s, node, n) { 7303 nr_slabs += node_nr_slabs(n); 7304 nr_objs += node_nr_objs(n); 7305 nr_free += count_partial_free_approx(n); 7306 } 7307 7308 sinfo->active_objs = nr_objs - nr_free; 7309 sinfo->num_objs = nr_objs; 7310 sinfo->active_slabs = nr_slabs; 7311 sinfo->num_slabs = nr_slabs; 7312 sinfo->objects_per_slab = oo_objects(s->oo); 7313 sinfo->cache_order = oo_order(s->oo); 7314 } 7315 #endif /* CONFIG_SLUB_DEBUG */ 7316
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.