1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * Written by Mark Hemment, 1996 (markhe@nextd 3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). 4 * 4 * 5 * (C) SGI 2006, Christoph Lameter 5 * (C) SGI 2006, Christoph Lameter 6 * Cleaned up and restructured to ease th 6 * Cleaned up and restructured to ease the addition of alternative 7 * implementations of SLAB allocators. 7 * implementations of SLAB allocators. 8 * (C) Linux Foundation 2008-2013 8 * (C) Linux Foundation 2008-2013 9 * Unified interface for all slab allocat 9 * Unified interface for all slab allocators 10 */ 10 */ 11 11 12 #ifndef _LINUX_SLAB_H 12 #ifndef _LINUX_SLAB_H 13 #define _LINUX_SLAB_H 13 #define _LINUX_SLAB_H 14 14 15 #include <linux/cache.h> << 16 #include <linux/gfp.h> 15 #include <linux/gfp.h> 17 #include <linux/overflow.h> << 18 #include <linux/types.h> 16 #include <linux/types.h> 19 #include <linux/workqueue.h> 17 #include <linux/workqueue.h> 20 #include <linux/percpu-refcount.h> << 21 #include <linux/cleanup.h> << 22 #include <linux/hash.h> << 23 << 24 enum _slab_flag_bits { << 25 _SLAB_CONSISTENCY_CHECKS, << 26 _SLAB_RED_ZONE, << 27 _SLAB_POISON, << 28 _SLAB_KMALLOC, << 29 _SLAB_HWCACHE_ALIGN, << 30 _SLAB_CACHE_DMA, << 31 _SLAB_CACHE_DMA32, << 32 _SLAB_STORE_USER, << 33 _SLAB_PANIC, << 34 _SLAB_TYPESAFE_BY_RCU, << 35 _SLAB_TRACE, << 36 #ifdef CONFIG_DEBUG_OBJECTS << 37 _SLAB_DEBUG_OBJECTS, << 38 #endif << 39 _SLAB_NOLEAKTRACE, << 40 _SLAB_NO_MERGE, << 41 #ifdef CONFIG_FAILSLAB << 42 _SLAB_FAILSLAB, << 43 #endif << 44 #ifdef CONFIG_MEMCG << 45 _SLAB_ACCOUNT, << 46 #endif << 47 #ifdef CONFIG_KASAN_GENERIC << 48 _SLAB_KASAN, << 49 #endif << 50 _SLAB_NO_USER_FLAGS, << 51 #ifdef CONFIG_KFENCE << 52 _SLAB_SKIP_KFENCE, << 53 #endif << 54 #ifndef CONFIG_SLUB_TINY << 55 _SLAB_RECLAIM_ACCOUNT, << 56 #endif << 57 _SLAB_OBJECT_POISON, << 58 _SLAB_CMPXCHG_DOUBLE, << 59 #ifdef CONFIG_SLAB_OBJ_EXT << 60 _SLAB_NO_OBJ_EXT, << 61 #endif << 62 _SLAB_FLAGS_LAST_BIT << 63 }; << 64 18 65 #define __SLAB_FLAG_BIT(nr) ((slab_flags_t << 66 #define __SLAB_FLAG_UNUSED ((slab_flags_t << 67 19 68 /* 20 /* 69 * Flags to pass to kmem_cache_create(). 21 * Flags to pass to kmem_cache_create(). 70 * The ones marked DEBUG need CONFIG_SLUB_DEBU !! 22 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 71 */ 23 */ 72 /* DEBUG: Perform (expensive) checks on alloc/ 24 /* DEBUG: Perform (expensive) checks on alloc/free */ 73 #define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BI !! 25 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) 74 /* DEBUG: Red zone objs in a cache */ 26 /* DEBUG: Red zone objs in a cache */ 75 #define SLAB_RED_ZONE __SLAB_FLAG_BI !! 27 #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) 76 /* DEBUG: Poison objects */ 28 /* DEBUG: Poison objects */ 77 #define SLAB_POISON __SLAB_FLAG_BI !! 29 #define SLAB_POISON ((slab_flags_t __force)0x00000800U) 78 /* Indicate a kmalloc slab */ << 79 #define SLAB_KMALLOC __SLAB_FLAG_BI << 80 /* Align objs on cache lines */ 30 /* Align objs on cache lines */ 81 #define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BI !! 31 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) 82 /* Use GFP_DMA memory */ 32 /* Use GFP_DMA memory */ 83 #define SLAB_CACHE_DMA __SLAB_FLAG_BI !! 33 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) 84 /* Use GFP_DMA32 memory */ << 85 #define SLAB_CACHE_DMA32 __SLAB_FLAG_BI << 86 /* DEBUG: Store the last owner for bug hunting 34 /* DEBUG: Store the last owner for bug hunting */ 87 #define SLAB_STORE_USER __SLAB_FLAG_BI !! 35 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) 88 /* Panic if kmem_cache_create() fails */ 36 /* Panic if kmem_cache_create() fails */ 89 #define SLAB_PANIC __SLAB_FLAG_BI !! 37 #define SLAB_PANIC ((slab_flags_t __force)0x00040000U) 90 /* 38 /* 91 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THI 39 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 92 * 40 * 93 * This delays freeing the SLAB page by a grac 41 * This delays freeing the SLAB page by a grace period, it does _NOT_ 94 * delay object freeing. This means that if yo 42 * delay object freeing. This means that if you do kmem_cache_free() 95 * that memory location is free to be reused a 43 * that memory location is free to be reused at any time. Thus it may 96 * be possible to see another object there in 44 * be possible to see another object there in the same RCU grace period. 97 * 45 * 98 * This feature only ensures the memory locati 46 * This feature only ensures the memory location backing the object 99 * stays valid, the trick to using this is rel 47 * stays valid, the trick to using this is relying on an independent 100 * object validation pass. Something like: 48 * object validation pass. Something like: 101 * 49 * 102 * begin: !! 50 * rcu_read_lock() 103 * rcu_read_lock(); !! 51 * again: 104 * obj = lockless_lookup(key); 52 * obj = lockless_lookup(key); 105 * if (obj) { 53 * if (obj) { 106 * if (!try_get_ref(obj)) // might fail for 54 * if (!try_get_ref(obj)) // might fail for free objects 107 * rcu_read_unlock(); !! 55 * goto again; 108 * goto begin; << 109 * 56 * 110 * if (obj->key != key) { // not the object 57 * if (obj->key != key) { // not the object we expected 111 * put_ref(obj); 58 * put_ref(obj); 112 * rcu_read_unlock(); !! 59 * goto again; 113 * goto begin; << 114 * } 60 * } 115 * } 61 * } 116 * rcu_read_unlock(); 62 * rcu_read_unlock(); 117 * 63 * 118 * This is useful if we need to approach a ker 64 * This is useful if we need to approach a kernel structure obliquely, 119 * from its address obtained without the usual 65 * from its address obtained without the usual locking. We can lock 120 * the structure to stabilize it and check it' 66 * the structure to stabilize it and check it's still at the given address, 121 * only if we can be sure that the memory has 67 * only if we can be sure that the memory has not been meanwhile reused 122 * for some other kind of object (which our su 68 * for some other kind of object (which our subsystem's lock might corrupt). 123 * 69 * 124 * rcu_read_lock before reading the address, t 70 * rcu_read_lock before reading the address, then rcu_read_unlock after 125 * taking the spinlock within the structure ex 71 * taking the spinlock within the structure expected at that address. 126 * 72 * 127 * Note that it is not possible to acquire a l << 128 * allocated with SLAB_TYPESAFE_BY_RCU without << 129 * as described above. The reason is that SLA << 130 * are not zeroed before being given to the sl << 131 * locks must be initialized after each and ev << 132 * Alternatively, make the ctor passed to kmem << 133 * the locks at page-allocation time, as is do << 134 * sighand_ctor(), and anon_vma_ctor(). Such << 135 * to safely acquire those ctor-initialized lo << 136 * protection. << 137 * << 138 * Note that SLAB_TYPESAFE_BY_RCU was original 73 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 139 */ 74 */ 140 /* Defer freeing slabs to RCU */ 75 /* Defer freeing slabs to RCU */ 141 #define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BI !! 76 #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) >> 77 /* Spread some memory over cpuset */ >> 78 #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) 142 /* Trace allocations and frees */ 79 /* Trace allocations and frees */ 143 #define SLAB_TRACE __SLAB_FLAG_BI !! 80 #define SLAB_TRACE ((slab_flags_t __force)0x00200000U) 144 81 145 /* Flag to prevent checks on free */ 82 /* Flag to prevent checks on free */ 146 #ifdef CONFIG_DEBUG_OBJECTS 83 #ifdef CONFIG_DEBUG_OBJECTS 147 # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BI !! 84 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) 148 #else 85 #else 149 # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UN !! 86 # define SLAB_DEBUG_OBJECTS 0 150 #endif 87 #endif 151 88 152 /* Avoid kmemleak tracing */ 89 /* Avoid kmemleak tracing */ 153 #define SLAB_NOLEAKTRACE __SLAB_FLAG_BI !! 90 #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) 154 << 155 /* << 156 * Prevent merging with compatible kmem caches << 157 * cautiously. Valid use cases: << 158 * << 159 * - caches created for self-tests (e.g. kunit << 160 * - general caches created and used by a subs << 161 * (subsystem-specific) debug option is enab << 162 * - performance critical caches, should be ve << 163 * maintainers, and not used together with C << 164 */ << 165 #define SLAB_NO_MERGE __SLAB_FLAG_BI << 166 91 167 /* Fault injection mark */ 92 /* Fault injection mark */ 168 #ifdef CONFIG_FAILSLAB 93 #ifdef CONFIG_FAILSLAB 169 # define SLAB_FAILSLAB __SLAB_FLAG_BI !! 94 # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) 170 #else 95 #else 171 # define SLAB_FAILSLAB __SLAB_FLAG_UN !! 96 # define SLAB_FAILSLAB 0 172 #endif 97 #endif 173 /* Account to memcg */ 98 /* Account to memcg */ 174 #ifdef CONFIG_MEMCG !! 99 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 175 # define SLAB_ACCOUNT __SLAB_FLAG_BI !! 100 # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) 176 #else << 177 # define SLAB_ACCOUNT __SLAB_FLAG_UN << 178 #endif << 179 << 180 #ifdef CONFIG_KASAN_GENERIC << 181 #define SLAB_KASAN __SLAB_FLAG_BI << 182 #else 101 #else 183 #define SLAB_KASAN __SLAB_FLAG_UN !! 102 # define SLAB_ACCOUNT 0 184 #endif 103 #endif 185 104 186 /* !! 105 #ifdef CONFIG_KASAN 187 * Ignore user specified debugging flags. !! 106 #define SLAB_KASAN ((slab_flags_t __force)0x08000000U) 188 * Intended for caches created for self-tests << 189 * specified in the code and other flags are i << 190 */ << 191 #define SLAB_NO_USER_FLAGS __SLAB_FLAG_BI << 192 << 193 #ifdef CONFIG_KFENCE << 194 #define SLAB_SKIP_KFENCE __SLAB_FLAG_BI << 195 #else 107 #else 196 #define SLAB_SKIP_KFENCE __SLAB_FLAG_UN !! 108 #define SLAB_KASAN 0 197 #endif 109 #endif 198 110 199 /* The following flags affect the page allocat 111 /* The following flags affect the page allocator grouping pages by mobility */ 200 /* Objects are reclaimable */ 112 /* Objects are reclaimable */ 201 #ifndef CONFIG_SLUB_TINY !! 113 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) 202 #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BI << 203 #else << 204 #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UN << 205 #endif << 206 #define SLAB_TEMPORARY SLAB_RECLAIM_A 114 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 207 << 208 /* Slab created using create_boot_cache */ << 209 #ifdef CONFIG_SLAB_OBJ_EXT << 210 #define SLAB_NO_OBJ_EXT __SLAB_FLAG_BI << 211 #else << 212 #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UN << 213 #endif << 214 << 215 /* << 216 * freeptr_t represents a SLUB freelist pointe << 217 * and not dereferenceable if CONFIG_SLAB_FREE << 218 */ << 219 typedef struct { unsigned long v; } freeptr_t; << 220 << 221 /* 115 /* 222 * ZERO_SIZE_PTR will be returned for zero siz 116 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 223 * 117 * 224 * Dereferencing ZERO_SIZE_PTR will lead to a 118 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 225 * 119 * 226 * ZERO_SIZE_PTR can be passed to kfree though 120 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 227 * Both make kfree a no-op. 121 * Both make kfree a no-op. 228 */ 122 */ 229 #define ZERO_SIZE_PTR ((void *)16) 123 #define ZERO_SIZE_PTR ((void *)16) 230 124 231 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x 125 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 232 (unsigned long 126 (unsigned long)ZERO_SIZE_PTR) 233 127 234 #include <linux/kasan.h> 128 #include <linux/kasan.h> 235 129 236 struct list_lru; << 237 struct mem_cgroup; 130 struct mem_cgroup; 238 /* 131 /* 239 * struct kmem_cache related prototypes 132 * struct kmem_cache related prototypes 240 */ 133 */ >> 134 void __init kmem_cache_init(void); 241 bool slab_is_available(void); 135 bool slab_is_available(void); 242 136 243 /** !! 137 extern bool usercopy_fallback; 244 * struct kmem_cache_args - Less common argume << 245 * << 246 * Any uninitialized fields of the structure a << 247 * exception is @freeptr_offset where %0 is a << 248 * @use_freeptr_offset must be also set to %tr << 249 * as used. For @useroffset %0 is also valid, << 250 * @usersize. << 251 * << 252 * When %NULL args is passed to kmem_cache_cre << 253 * fields unused. << 254 */ << 255 struct kmem_cache_args { << 256 /** << 257 * @align: The required alignment for << 258 * << 259 * %0 means no specific alignment is r << 260 */ << 261 unsigned int align; << 262 /** << 263 * @useroffset: Usercopy region offset << 264 * << 265 * %0 is a valid offset, when @usersiz << 266 */ << 267 unsigned int useroffset; << 268 /** << 269 * @usersize: Usercopy region size. << 270 * << 271 * %0 means no usercopy region is spec << 272 */ << 273 unsigned int usersize; << 274 /** << 275 * @freeptr_offset: Custom offset for << 276 * in &SLAB_TYPESAFE_BY_RCU caches << 277 * << 278 * By default &SLAB_TYPESAFE_BY_RCU ca << 279 * outside of the object. This might c << 280 * Cache creators that have a reason t << 281 * free pointer offset in their struct << 282 * placed. << 283 * << 284 * Note that placing the free pointer << 285 * caller to ensure that no fields are << 286 * guard against object recycling (See << 287 * details). << 288 * << 289 * Using %0 as a value for @freeptr_of << 290 * is specified, %use_freeptr_offset m << 291 * << 292 * Note that @ctor currently isn't sup << 293 * as a @ctor requires an external fre << 294 */ << 295 unsigned int freeptr_offset; << 296 /** << 297 * @use_freeptr_offset: Whether a @fre << 298 */ << 299 bool use_freeptr_offset; << 300 /** << 301 * @ctor: A constructor for the object << 302 * << 303 * The constructor is invoked for each << 304 * page. It is the cache user's respon << 305 * same state as after calling the con << 306 * with any differences between a fres << 307 * object. << 308 * << 309 * %NULL means no constructor. << 310 */ << 311 void (*ctor)(void *); << 312 }; << 313 << 314 struct kmem_cache *__kmem_cache_create_args(co << 315 un << 316 st << 317 sl << 318 static inline struct kmem_cache * << 319 __kmem_cache_create(const char *name, unsigned << 320 slab_flags_t flags, void ( << 321 { << 322 struct kmem_cache_args kmem_args = { << 323 .align = align, << 324 .ctor = ctor, << 325 }; << 326 << 327 return __kmem_cache_create_args(name, << 328 } << 329 << 330 /** << 331 * kmem_cache_create_usercopy - Create a kmem << 332 * for copying to userspace. << 333 * @name: A string which is used in /proc/slab << 334 * @size: The size of objects to be created in << 335 * @align: The required alignment for the obje << 336 * @flags: SLAB flags << 337 * @useroffset: Usercopy region offset << 338 * @usersize: Usercopy region size << 339 * @ctor: A constructor for the objects, or %N << 340 * << 341 * This is a legacy wrapper, new code should u << 342 * if whitelisting a single field is sufficien << 343 * the necessary parameters passed via the arg << 344 * &struct kmem_cache_args) << 345 * << 346 * Return: a pointer to the cache on success, << 347 */ << 348 static inline struct kmem_cache * << 349 kmem_cache_create_usercopy(const char *name, u << 350 unsigned int align, << 351 unsigned int userof << 352 void (*ctor)(void * << 353 { << 354 struct kmem_cache_args kmem_args = { << 355 .align = align, << 356 .ctor = ctor, << 357 .useroffset = useroffset, << 358 .usersize = usersize, << 359 }; << 360 << 361 return __kmem_cache_create_args(name, << 362 } << 363 138 364 /* If NULL is passed for @args, use this varia !! 139 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, 365 static inline struct kmem_cache * !! 140 unsigned int align, slab_flags_t flags, 366 __kmem_cache_default_args(const char *name, un !! 141 void (*ctor)(void *)); 367 struct kmem_cache_ar !! 142 struct kmem_cache *kmem_cache_create_usercopy(const char *name, 368 slab_flags_t flags) !! 143 unsigned int size, unsigned int align, 369 { !! 144 slab_flags_t flags, 370 struct kmem_cache_args kmem_default_ar !! 145 unsigned int useroffset, unsigned int usersize, 371 !! 146 void (*ctor)(void *)); 372 /* Make sure we don't get passed garba !! 147 void kmem_cache_destroy(struct kmem_cache *); 373 if (WARN_ON_ONCE(args)) !! 148 int kmem_cache_shrink(struct kmem_cache *); 374 return ERR_PTR(-EINVAL); !! 149 375 !! 150 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); 376 return __kmem_cache_create_args(name, !! 151 void memcg_deactivate_kmem_caches(struct mem_cgroup *); 377 } !! 152 void memcg_destroy_kmem_caches(struct mem_cgroup *); 378 << 379 /** << 380 * kmem_cache_create - Create a kmem cache. << 381 * @__name: A string which is used in /proc/sl << 382 * @__object_size: The size of objects to be c << 383 * @__args: Optional arguments, see &struct km << 384 * means defaults will be used for al << 385 * << 386 * This is currently implemented as a macro us << 387 * either the new variant of the function, or << 388 * << 389 * The new variant has 4 parameters: << 390 * ``kmem_cache_create(name, object_size, args << 391 * << 392 * See __kmem_cache_create_args() which implem << 393 * << 394 * The legacy variant has 5 parameters: << 395 * ``kmem_cache_create(name, object_size, alig << 396 * << 397 * The align and ctor parameters map to the re << 398 * &struct kmem_cache_args << 399 * << 400 * Context: Cannot be called within a interrup << 401 * << 402 * Return: a pointer to the cache on success, << 403 */ << 404 #define kmem_cache_create(__name, __object_siz << 405 _Generic((__args), << 406 struct kmem_cache_args *: __km << 407 void *: __kmem_cache_default_a << 408 default: __kmem_cache_create)( << 409 << 410 void kmem_cache_destroy(struct kmem_cache *s); << 411 int kmem_cache_shrink(struct kmem_cache *s); << 412 153 413 /* 154 /* 414 * Please use this macro to create slab caches 155 * Please use this macro to create slab caches. Simply specify the 415 * name of the structure and maybe some flags 156 * name of the structure and maybe some flags that are listed above. 416 * 157 * 417 * The alignment of the struct determines obje 158 * The alignment of the struct determines object alignment. If you 418 * f.e. add ____cacheline_aligned_in_smp to th 159 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 419 * then the objects will be properly aligned i 160 * then the objects will be properly aligned in SMP configurations. 420 */ 161 */ 421 #define KMEM_CACHE(__struct, __flags) !! 162 #define KMEM_CACHE(__struct, __flags) \ 422 __kmem_cache_create_args(#__struct, si !! 163 kmem_cache_create(#__struct, sizeof(struct __struct), \ 423 &(struct kmem_cache_ar !! 164 __alignof__(struct __struct), (__flags), NULL) 424 .align = __al << 425 }, (__flags)) << 426 165 427 /* 166 /* 428 * To whitelist a single field for copying to/ 167 * To whitelist a single field for copying to/from usercopy, use this 429 * macro instead for KMEM_CACHE() above. 168 * macro instead for KMEM_CACHE() above. 430 */ 169 */ 431 #define KMEM_CACHE_USERCOPY(__struct, __flags, !! 170 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ 432 __kmem_cache_create_args(#__struct, si !! 171 kmem_cache_create_usercopy(#__struct, \ 433 &(struct kmem_cache_ar !! 172 sizeof(struct __struct), \ 434 .align !! 173 __alignof__(struct __struct), (__flags), \ 435 .useroffset !! 174 offsetof(struct __struct, __field), \ 436 .usersize !! 175 sizeof_field(struct __struct, __field), NULL) 437 }, (__flags)) << 438 176 439 /* 177 /* 440 * Common kmalloc functions provided by all al 178 * Common kmalloc functions provided by all allocators 441 */ 179 */ 442 void * __must_check krealloc_noprof(const void !! 180 void * __must_check __krealloc(const void *, size_t, gfp_t); 443 gfp_t flag !! 181 void * __must_check krealloc(const void *, size_t, gfp_t); 444 #define krealloc(...) !! 182 void kfree(const void *); 445 !! 183 void kzfree(const void *); 446 void kfree(const void *objp); !! 184 size_t ksize(const void *); 447 void kfree_sensitive(const void *objp); !! 185 448 size_t __ksize(const void *objp); !! 186 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 449 !! 187 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 450 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL !! 188 bool to_user); 451 << 452 /** << 453 * ksize - Report actual allocation size of as << 454 * << 455 * @objp: Pointer returned from a prior kmallo << 456 * << 457 * This should not be used for writing beyond << 458 * allocation size. Either use krealloc() or r << 459 * with kmalloc_size_roundup() prior to alloca << 460 * access beyond the originally requested allo << 461 * and/or FORTIFY_SOURCE may trip, since they << 462 * originally allocated size via the __alloc_s << 463 */ << 464 size_t ksize(const void *objp); << 465 << 466 #ifdef CONFIG_PRINTK << 467 bool kmem_dump_obj(void *object); << 468 #else 189 #else 469 static inline bool kmem_dump_obj(void *object) !! 190 static inline void __check_heap_object(const void *ptr, unsigned long n, >> 191 struct page *page, bool to_user) { } 470 #endif 192 #endif 471 193 472 /* 194 /* 473 * Some archs want to perform DMA into kmalloc 195 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 474 * alignment larger than the alignment of a 64 196 * alignment larger than the alignment of a 64-bit integer. 475 * Setting ARCH_DMA_MINALIGN in arch headers a !! 197 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 476 */ 198 */ 477 #ifdef ARCH_HAS_DMA_MINALIGN !! 199 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 478 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMA << 479 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIG 200 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 480 #endif !! 201 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 481 #endif !! 202 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 482 !! 203 #else 483 #ifndef ARCH_KMALLOC_MINALIGN << 484 #define ARCH_KMALLOC_MINALIGN __alignof__(unsi 204 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 485 #elif ARCH_KMALLOC_MINALIGN > 8 << 486 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN << 487 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SI << 488 #endif 205 #endif 489 206 490 /* 207 /* 491 * Setting ARCH_SLAB_MINALIGN in arch headers 208 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 492 * Intended for arches that get misalignment f 209 * Intended for arches that get misalignment faults even for 64 bit integer 493 * aligned buffers. 210 * aligned buffers. 494 */ 211 */ 495 #ifndef ARCH_SLAB_MINALIGN 212 #ifndef ARCH_SLAB_MINALIGN 496 #define ARCH_SLAB_MINALIGN __alignof__(unsigne 213 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 497 #endif 214 #endif 498 215 499 /* 216 /* 500 * Arches can define this function if they wan !! 217 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned 501 * alignment at runtime. The value returned by !! 218 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN 502 * of two and >= ARCH_SLAB_MINALIGN. !! 219 * aligned pointers. 503 */ << 504 #ifndef arch_slab_minalign << 505 static inline unsigned int arch_slab_minalign( << 506 { << 507 return ARCH_SLAB_MINALIGN; << 508 } << 509 #endif << 510 << 511 /* << 512 * kmem_cache_alloc and friends return pointer << 513 * kmalloc and friends return pointers aligned << 514 * and ARCH_SLAB_MINALIGN, but here we only as << 515 */ 220 */ 516 #define __assume_kmalloc_alignment __assume_al 221 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) 517 #define __assume_slab_alignment __assume_align 222 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) 518 #define __assume_page_alignment __assume_align 223 #define __assume_page_alignment __assume_aligned(PAGE_SIZE) 519 224 520 /* 225 /* 521 * Kmalloc array related definitions 226 * Kmalloc array related definitions 522 */ 227 */ 523 228 >> 229 #ifdef CONFIG_SLAB >> 230 /* >> 231 * The largest kmalloc size supported by the SLAB allocators is >> 232 * 32 megabyte (2^25) or the maximum allocatable page order if that is >> 233 * less than 32 MB. >> 234 * >> 235 * WARNING: Its not easy to increase this value since the allocators have >> 236 * to do various tricks to work around compiler limitations in order to >> 237 * ensure proper constant folding. >> 238 */ >> 239 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ >> 240 (MAX_ORDER + PAGE_SHIFT - 1) : 25) >> 241 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH >> 242 #ifndef KMALLOC_SHIFT_LOW >> 243 #define KMALLOC_SHIFT_LOW 5 >> 244 #endif >> 245 #endif >> 246 >> 247 #ifdef CONFIG_SLUB 524 /* 248 /* 525 * SLUB directly allocates requests fitting in 249 * SLUB directly allocates requests fitting in to an order-1 page 526 * (PAGE_SIZE*2). Larger requests are passed 250 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 527 */ 251 */ 528 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 252 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 529 #define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDE !! 253 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) 530 #ifndef KMALLOC_SHIFT_LOW 254 #ifndef KMALLOC_SHIFT_LOW 531 #define KMALLOC_SHIFT_LOW 3 255 #define KMALLOC_SHIFT_LOW 3 532 #endif 256 #endif >> 257 #endif >> 258 >> 259 #ifdef CONFIG_SLOB >> 260 /* >> 261 * SLOB passes all requests larger than one page to the page allocator. >> 262 * No kmalloc array is necessary since objects of different sizes can >> 263 * be allocated from the same page. >> 264 */ >> 265 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT >> 266 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) >> 267 #ifndef KMALLOC_SHIFT_LOW >> 268 #define KMALLOC_SHIFT_LOW 3 >> 269 #endif >> 270 #endif 533 271 534 /* Maximum allocatable size */ 272 /* Maximum allocatable size */ 535 #define KMALLOC_MAX_SIZE (1UL << KMALLO 273 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 536 /* Maximum size for which we actually use a sl 274 /* Maximum size for which we actually use a slab cache */ 537 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLO 275 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 538 /* Maximum order allocatable via the slab allo !! 276 /* Maximum order allocatable via the slab allocagtor */ 539 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT 277 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 540 278 541 /* 279 /* 542 * Kmalloc subsystem. 280 * Kmalloc subsystem. 543 */ 281 */ 544 #ifndef KMALLOC_MIN_SIZE 282 #ifndef KMALLOC_MIN_SIZE 545 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_L 283 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 546 #endif 284 #endif 547 285 548 /* 286 /* 549 * This restriction comes from byte sized inde 287 * This restriction comes from byte sized index implementation. 550 * Page size is normally 2^12 bytes and, in th 288 * Page size is normally 2^12 bytes and, in this case, if we want to use 551 * byte sized index which can represent 2^8 en 289 * byte sized index which can represent 2^8 entries, the size of the object 552 * should be equal or greater to 2^12 / 2^8 = 290 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. 553 * If minimum size of kmalloc is less than 16, 291 * If minimum size of kmalloc is less than 16, we use it as minimum object 554 * size and give up to use byte sized index. 292 * size and give up to use byte sized index. 555 */ 293 */ 556 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SI 294 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ 557 (KMALLOC_MIN_SI 295 (KMALLOC_MIN_SIZE) : 16) 558 296 559 #ifdef CONFIG_RANDOM_KMALLOC_CACHES !! 297 #ifndef CONFIG_SLOB 560 #define RANDOM_KMALLOC_CACHES_NR 15 // !! 298 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; 561 #else << 562 #define RANDOM_KMALLOC_CACHES_NR 0 << 563 #endif << 564 << 565 /* << 566 * Whenever changing this, take care of that k << 567 * create_kmalloc_caches() still work as inten << 568 * << 569 * KMALLOC_NORMAL can contain only unaccounted << 570 * is for accounted but unreclaimable and non- << 571 * kmem caches can have both accounted and una << 572 */ << 573 enum kmalloc_cache_type { << 574 KMALLOC_NORMAL = 0, << 575 #ifndef CONFIG_ZONE_DMA << 576 KMALLOC_DMA = KMALLOC_NORMAL, << 577 #endif << 578 #ifndef CONFIG_MEMCG << 579 KMALLOC_CGROUP = KMALLOC_NORMAL, << 580 #endif << 581 KMALLOC_RANDOM_START = KMALLOC_NORMAL, << 582 KMALLOC_RANDOM_END = KMALLOC_RANDOM_ST << 583 #ifdef CONFIG_SLUB_TINY << 584 KMALLOC_RECLAIM = KMALLOC_NORMAL, << 585 #else << 586 KMALLOC_RECLAIM, << 587 #endif << 588 #ifdef CONFIG_ZONE_DMA 299 #ifdef CONFIG_ZONE_DMA 589 KMALLOC_DMA, !! 300 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; 590 #endif << 591 #ifdef CONFIG_MEMCG << 592 KMALLOC_CGROUP, << 593 #endif << 594 NR_KMALLOC_TYPES << 595 }; << 596 << 597 typedef struct kmem_cache * kmem_buckets[KMALL << 598 << 599 extern kmem_buckets kmalloc_caches[NR_KMALLOC_ << 600 << 601 /* << 602 * Define gfp bits that should not be set for << 603 */ << 604 #define KMALLOC_NOT_NORMAL_BITS << 605 (__GFP_RECLAIMABLE | << 606 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP << 607 (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCO << 608 << 609 extern unsigned long random_kmalloc_seed; << 610 << 611 static __always_inline enum kmalloc_cache_type << 612 { << 613 /* << 614 * The most common case is KMALLOC_NOR << 615 * with a single branch for all the re << 616 */ << 617 if (likely((flags & KMALLOC_NOT_NORMAL << 618 #ifdef CONFIG_RANDOM_KMALLOC_CACHES << 619 /* RANDOM_KMALLOC_CACHES_NR (= << 620 return KMALLOC_RANDOM_START + << 621 << 622 #else << 623 return KMALLOC_NORMAL; << 624 #endif 301 #endif 625 302 626 /* << 627 * At least one of the flags has to be << 628 * decreasing order are: << 629 * 1) __GFP_DMA << 630 * 2) __GFP_RECLAIMABLE << 631 * 3) __GFP_ACCOUNT << 632 */ << 633 if (IS_ENABLED(CONFIG_ZONE_DMA) && (fl << 634 return KMALLOC_DMA; << 635 if (!IS_ENABLED(CONFIG_MEMCG) || (flag << 636 return KMALLOC_RECLAIM; << 637 else << 638 return KMALLOC_CGROUP; << 639 } << 640 << 641 /* 303 /* 642 * Figure out which kmalloc slab an allocation 304 * Figure out which kmalloc slab an allocation of a certain size 643 * belongs to. 305 * belongs to. 644 * 0 = zero alloc 306 * 0 = zero alloc 645 * 1 = 65 .. 96 bytes 307 * 1 = 65 .. 96 bytes 646 * 2 = 129 .. 192 bytes 308 * 2 = 129 .. 192 bytes 647 * n = 2^(n-1)+1 .. 2^n 309 * n = 2^(n-1)+1 .. 2^n 648 * << 649 * Note: __kmalloc_index() is compile-time opt << 650 * typical usage is via kmalloc_index() and th << 651 * Callers where !size_is_constant should only << 652 * overheads of __kmalloc_index() can be toler << 653 */ 310 */ 654 static __always_inline unsigned int __kmalloc_ !! 311 static __always_inline unsigned int kmalloc_index(size_t size) 655 << 656 { 312 { 657 if (!size) 313 if (!size) 658 return 0; 314 return 0; 659 315 660 if (size <= KMALLOC_MIN_SIZE) 316 if (size <= KMALLOC_MIN_SIZE) 661 return KMALLOC_SHIFT_LOW; 317 return KMALLOC_SHIFT_LOW; 662 318 663 if (KMALLOC_MIN_SIZE <= 32 && size > 6 319 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 664 return 1; 320 return 1; 665 if (KMALLOC_MIN_SIZE <= 64 && size > 1 321 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 666 return 2; 322 return 2; 667 if (size <= 8) return 3; 323 if (size <= 8) return 3; 668 if (size <= 16) return 4; 324 if (size <= 16) return 4; 669 if (size <= 32) return 5; 325 if (size <= 32) return 5; 670 if (size <= 64) return 6; 326 if (size <= 64) return 6; 671 if (size <= 128) return 7; 327 if (size <= 128) return 7; 672 if (size <= 256) return 8; 328 if (size <= 256) return 8; 673 if (size <= 512) return 9; 329 if (size <= 512) return 9; 674 if (size <= 1024) return 10; 330 if (size <= 1024) return 10; 675 if (size <= 2 * 1024) return 11; 331 if (size <= 2 * 1024) return 11; 676 if (size <= 4 * 1024) return 12; 332 if (size <= 4 * 1024) return 12; 677 if (size <= 8 * 1024) return 13; 333 if (size <= 8 * 1024) return 13; 678 if (size <= 16 * 1024) return 14; 334 if (size <= 16 * 1024) return 14; 679 if (size <= 32 * 1024) return 15; 335 if (size <= 32 * 1024) return 15; 680 if (size <= 64 * 1024) return 16; 336 if (size <= 64 * 1024) return 16; 681 if (size <= 128 * 1024) return 17; 337 if (size <= 128 * 1024) return 17; 682 if (size <= 256 * 1024) return 18; 338 if (size <= 256 * 1024) return 18; 683 if (size <= 512 * 1024) return 19; 339 if (size <= 512 * 1024) return 19; 684 if (size <= 1024 * 1024) return 20; 340 if (size <= 1024 * 1024) return 20; 685 if (size <= 2 * 1024 * 1024) return 2 341 if (size <= 2 * 1024 * 1024) return 21; 686 !! 342 if (size <= 4 * 1024 * 1024) return 22; 687 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRA !! 343 if (size <= 8 * 1024 * 1024) return 23; 688 BUILD_BUG_ON_MSG(1, "unexpecte !! 344 if (size <= 16 * 1024 * 1024) return 24; 689 else !! 345 if (size <= 32 * 1024 * 1024) return 25; 690 BUG(); !! 346 if (size <= 64 * 1024 * 1024) return 26; >> 347 BUG(); 691 348 692 /* Will never be reached. Needed becau 349 /* Will never be reached. Needed because the compiler may complain */ 693 return -1; 350 return -1; 694 } 351 } 695 static_assert(PAGE_SHIFT <= 20); !! 352 #endif /* !CONFIG_SLOB */ 696 #define kmalloc_index(s) __kmalloc_index(s, tr << 697 << 698 #include <linux/alloc_tag.h> << 699 353 700 /** !! 354 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; 701 * kmem_cache_alloc - Allocate an object !! 355 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; 702 * @cachep: The cache to allocate from. !! 356 void kmem_cache_free(struct kmem_cache *, void *); 703 * @flags: See kmalloc(). << 704 * << 705 * Allocate an object from this cache. << 706 * See kmem_cache_zalloc() for a shortcut of a << 707 * << 708 * Return: pointer to the new object or %NULL << 709 */ << 710 void *kmem_cache_alloc_noprof(struct kmem_cach << 711 gfp_t flags) __a << 712 #define kmem_cache_alloc(...) << 713 << 714 void *kmem_cache_alloc_lru_noprof(struct kmem_ << 715 gfp_t gfpflags) __ << 716 #define kmem_cache_alloc_lru(...) alloc_ << 717 << 718 /** << 719 * kmem_cache_charge - memcg charge an already << 720 * @objp: address of the slab object to memcg << 721 * @gfpflags: describe the allocation context << 722 * << 723 * kmem_cache_charge allows charging a slab ob << 724 * primarily in cases where charging at alloca << 725 * because the target memcg is not known (i.e. << 726 * << 727 * The objp should be pointer returned by the << 728 * kmalloc (with __GFP_ACCOUNT in flags) or km << 729 * behavior can be controlled through gfpflags << 730 * necessary internal metadata can be allocate << 731 * that overcharging is requested instead of f << 732 * internal metadata allocation. << 733 * << 734 * There are several cases where it will retur << 735 * not done: << 736 * More specifically: << 737 * << 738 * 1. For !CONFIG_MEMCG or cgroup_disable=memo << 739 * 2. Already charged slab objects. << 740 * 3. For slab objects from KMALLOC_NORMAL cac << 741 * without __GFP_ACCOUNT << 742 * 4. Allocating internal metadata has failed << 743 * << 744 * Return: true if charge was successful other << 745 */ << 746 bool kmem_cache_charge(void *objp, gfp_t gfpfl << 747 void kmem_cache_free(struct kmem_cache *s, voi << 748 << 749 kmem_buckets *kmem_buckets_create(const char * << 750 unsigned int << 751 void (*ctor) << 752 357 753 /* 358 /* 754 * Bulk allocation and freeing operations. The 359 * Bulk allocation and freeing operations. These are accelerated in an 755 * allocator specific way to avoid taking lock 360 * allocator specific way to avoid taking locks repeatedly or building 756 * metadata structures unnecessarily. 361 * metadata structures unnecessarily. 757 * 362 * 758 * Note that interrupts must be enabled when c 363 * Note that interrupts must be enabled when calling these functions. 759 */ 364 */ 760 void kmem_cache_free_bulk(struct kmem_cache *s !! 365 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 761 !! 366 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 762 int kmem_cache_alloc_bulk_noprof(struct kmem_c << 763 #define kmem_cache_alloc_bulk(...) alloc_ << 764 367 >> 368 /* >> 369 * Caller must not use kfree_bulk() on memory not originally allocated >> 370 * by kmalloc(), because the SLOB allocator cannot handle this. >> 371 */ 765 static __always_inline void kfree_bulk(size_t 372 static __always_inline void kfree_bulk(size_t size, void **p) 766 { 373 { 767 kmem_cache_free_bulk(NULL, size, p); 374 kmem_cache_free_bulk(NULL, size, p); 768 } 375 } 769 376 770 void *kmem_cache_alloc_node_noprof(struct kmem !! 377 #ifdef CONFIG_NUMA 771 int node) _ !! 378 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; 772 #define kmem_cache_alloc_node(...) alloc_ !! 379 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; 773 << 774 /* << 775 * These macros allow declaring a kmem_buckets << 776 * can be compiled out with CONFIG_SLAB_BUCKET << 777 * sites don't have to pass NULL. << 778 */ << 779 #ifdef CONFIG_SLAB_BUCKETS << 780 #define DECL_BUCKET_PARAMS(_size, _b) size_t << 781 #define PASS_BUCKET_PARAMS(_size, _b) (_size << 782 #define PASS_BUCKET_PARAM(_b) (_b) << 783 #else 380 #else 784 #define DECL_BUCKET_PARAMS(_size, _b) size_t !! 381 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 785 #define PASS_BUCKET_PARAMS(_size, _b) (_size !! 382 { 786 #define PASS_BUCKET_PARAM(_b) NULL !! 383 return __kmalloc(size, flags); >> 384 } >> 385 >> 386 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) >> 387 { >> 388 return kmem_cache_alloc(s, flags); >> 389 } 787 #endif 390 #endif 788 391 789 /* !! 392 #ifdef CONFIG_TRACING 790 * The following functions are not to be used !! 393 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; 791 * for internal use from kmalloc() and kmalloc << 792 * with the exception of kunit tests << 793 */ << 794 394 795 void *__kmalloc_noprof(size_t size, gfp_t flag !! 395 #ifdef CONFIG_NUMA 796 __assume_kmall !! 396 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, >> 397 gfp_t gfpflags, >> 398 int node, size_t size) __assume_slab_alignment __malloc; >> 399 #else >> 400 static __always_inline void * >> 401 kmem_cache_alloc_node_trace(struct kmem_cache *s, >> 402 gfp_t gfpflags, >> 403 int node, size_t size) >> 404 { >> 405 return kmem_cache_alloc_trace(s, gfpflags, size); >> 406 } >> 407 #endif /* CONFIG_NUMA */ 797 408 798 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS !! 409 #else /* CONFIG_TRACING */ 799 __assume_kmall !! 410 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, >> 411 gfp_t flags, size_t size) >> 412 { >> 413 void *ret = kmem_cache_alloc(s, flags); 800 414 801 void *__kmalloc_cache_noprof(struct kmem_cache !! 415 kasan_kmalloc(s, ret, size, flags); 802 __assume_kmall !! 416 return ret; >> 417 } 803 418 804 void *__kmalloc_cache_node_noprof(struct kmem_ !! 419 static __always_inline void * 805 int node, si !! 420 kmem_cache_alloc_node_trace(struct kmem_cache *s, 806 __assume_kmall !! 421 gfp_t gfpflags, >> 422 int node, size_t size) >> 423 { >> 424 void *ret = kmem_cache_alloc_node(s, gfpflags, node); >> 425 >> 426 kasan_kmalloc(s, ret, size, gfpflags); >> 427 return ret; >> 428 } >> 429 #endif /* CONFIG_TRACING */ 807 430 808 void *__kmalloc_large_noprof(size_t size, gfp_ !! 431 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; 809 __assume_page_ << 810 432 811 void *__kmalloc_large_node_noprof(size_t size, !! 433 #ifdef CONFIG_TRACING 812 __assume_page_ !! 434 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; >> 435 #else >> 436 static __always_inline void * >> 437 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) >> 438 { >> 439 return kmalloc_order(size, flags, order); >> 440 } >> 441 #endif >> 442 >> 443 static __always_inline void *kmalloc_large(size_t size, gfp_t flags) >> 444 { >> 445 unsigned int order = get_order(size); >> 446 return kmalloc_order_trace(size, flags, order); >> 447 } 813 448 814 /** 449 /** 815 * kmalloc - allocate kernel memory !! 450 * kmalloc - allocate memory 816 * @size: how many bytes of memory are require 451 * @size: how many bytes of memory are required. 817 * @flags: describe the allocation context !! 452 * @flags: the type of memory to allocate. 818 * 453 * 819 * kmalloc is the normal method of allocating 454 * kmalloc is the normal method of allocating memory 820 * for objects smaller than page size in the k 455 * for objects smaller than page size in the kernel. 821 * 456 * 822 * The allocated object address is aligned to !! 457 * The @flags argument may be one of: 823 * bytes. For @size of power of two bytes, the !! 458 * 824 * to be at least to the size. For other sizes !! 459 * %GFP_USER - Allocate memory on behalf of user. May sleep. 825 * be at least the largest power-of-two diviso !! 460 * >> 461 * %GFP_KERNEL - Allocate normal kernel ram. May sleep. >> 462 * >> 463 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. >> 464 * For example, use this inside interrupt handlers. 826 * 465 * 827 * The @flags argument may be one of the GFP f !! 466 * %GFP_HIGHUSER - Allocate pages from high memory. 828 * include/linux/gfp_types.h and described at << 829 * :ref:`Documentation/core-api/mm-api.rst <mm << 830 * 467 * 831 * The recommended usage of the @flags is desc !! 468 * %GFP_NOIO - Do not do any I/O at all while trying to get memory. 832 * :ref:`Documentation/core-api/memory-allocat << 833 * 469 * 834 * Below is a brief outline of the most useful !! 470 * %GFP_NOFS - Do not make any fs calls while trying to get memory. 835 * 471 * 836 * %GFP_KERNEL !! 472 * %GFP_NOWAIT - Allocation will not sleep. 837 * Allocate normal kernel ram. May sleep. << 838 * 473 * 839 * %GFP_NOWAIT !! 474 * %__GFP_THISNODE - Allocate node-local memory only. 840 * Allocation will not sleep. << 841 * 475 * 842 * %GFP_ATOMIC !! 476 * %GFP_DMA - Allocation suitable for DMA. 843 * Allocation will not sleep. May use em !! 477 * Should only be used for kmalloc() caches. Otherwise, use a >> 478 * slab created with SLAB_DMA. 844 * 479 * 845 * Also it is possible to set different flags 480 * Also it is possible to set different flags by OR'ing 846 * in one or more of the following additional 481 * in one or more of the following additional @flags: 847 * 482 * 848 * %__GFP_ZERO !! 483 * %__GFP_HIGH - This allocation has high priority and may use emergency pools. 849 * Zero the allocated memory before retur << 850 * 484 * 851 * %__GFP_HIGH !! 485 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail 852 * This allocation has high priority and !! 486 * (think twice before using). 853 * 487 * 854 * %__GFP_NOFAIL !! 488 * %__GFP_NORETRY - If memory is not immediately available, 855 * Indicate that this allocation is in no !! 489 * then give up at once. 856 * (think twice before using). << 857 * 490 * 858 * %__GFP_NORETRY !! 491 * %__GFP_NOWARN - If allocation fails, don't issue any warnings. 859 * If memory is not immediately available << 860 * then give up at once. << 861 * 492 * 862 * %__GFP_NOWARN !! 493 * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail 863 * If allocation fails, don't issue any w !! 494 * eventually. 864 * 495 * 865 * %__GFP_RETRY_MAYFAIL !! 496 * There are other flags available as well, but these are not intended 866 * Try really hard to succeed the allocat !! 497 * for general use, and so are not documented here. For a full list of 867 * eventually. !! 498 * potential flags, always refer to linux/gfp.h. 868 */ 499 */ 869 static __always_inline __alloc_size(1) void *k !! 500 static __always_inline void *kmalloc(size_t size, gfp_t flags) 870 { 501 { 871 if (__builtin_constant_p(size) && size !! 502 if (__builtin_constant_p(size)) { 872 unsigned int index; << 873 << 874 if (size > KMALLOC_MAX_CACHE_S 503 if (size > KMALLOC_MAX_CACHE_SIZE) 875 return __kmalloc_large !! 504 return kmalloc_large(size, flags); 876 !! 505 #ifndef CONFIG_SLOB 877 index = kmalloc_index(size); !! 506 if (!(flags & GFP_DMA)) { 878 return __kmalloc_cache_noprof( !! 507 unsigned int index = kmalloc_index(size); 879 kmalloc_caches !! 508 880 flags, size); !! 509 if (!index) >> 510 return ZERO_SIZE_PTR; >> 511 >> 512 return kmem_cache_alloc_trace(kmalloc_caches[index], >> 513 flags, size); >> 514 } >> 515 #endif 881 } 516 } 882 return __kmalloc_noprof(size, flags); !! 517 return __kmalloc(size, flags); 883 } 518 } 884 #define kmalloc(...) << 885 519 886 #define kmem_buckets_alloc(_b, _size, _flags) !! 520 /* 887 alloc_hooks(__kmalloc_node_noprof(PASS !! 521 * Determine size used for the nth kmalloc cache. >> 522 * return size or 0 if a kmalloc cache for that >> 523 * size does not exist >> 524 */ >> 525 static __always_inline unsigned int kmalloc_size(unsigned int n) >> 526 { >> 527 #ifndef CONFIG_SLOB >> 528 if (n > 2) >> 529 return 1U << n; 888 530 889 #define kmem_buckets_alloc_track_caller(_b, _s !! 531 if (n == 1 && KMALLOC_MIN_SIZE <= 32) 890 alloc_hooks(__kmalloc_node_track_calle !! 532 return 96; >> 533 >> 534 if (n == 2 && KMALLOC_MIN_SIZE <= 64) >> 535 return 192; >> 536 #endif >> 537 return 0; >> 538 } 891 539 892 static __always_inline __alloc_size(1) void *k !! 540 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 893 { 541 { 894 if (__builtin_constant_p(size) && size !! 542 #ifndef CONFIG_SLOB 895 unsigned int index; !! 543 if (__builtin_constant_p(size) && >> 544 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { >> 545 unsigned int i = kmalloc_index(size); 896 546 897 if (size > KMALLOC_MAX_CACHE_S !! 547 if (!i) 898 return __kmalloc_large !! 548 return ZERO_SIZE_PTR; 899 549 900 index = kmalloc_index(size); !! 550 return kmem_cache_alloc_node_trace(kmalloc_caches[i], 901 return __kmalloc_cache_node_no !! 551 flags, node, size); 902 kmalloc_caches << 903 flags, node, s << 904 } 552 } 905 return __kmalloc_node_noprof(PASS_BUCK !! 553 #endif >> 554 return __kmalloc_node(size, flags, node); 906 } 555 } 907 #define kmalloc_node(...) !! 556 >> 557 struct memcg_cache_array { >> 558 struct rcu_head rcu; >> 559 struct kmem_cache *entries[0]; >> 560 }; >> 561 >> 562 /* >> 563 * This is the main placeholder for memcg-related information in kmem caches. >> 564 * Both the root cache and the child caches will have it. For the root cache, >> 565 * this will hold a dynamically allocated array large enough to hold >> 566 * information about the currently limited memcgs in the system. To allow the >> 567 * array to be accessed without taking any locks, on relocation we free the old >> 568 * version only after a grace period. >> 569 * >> 570 * Root and child caches hold different metadata. >> 571 * >> 572 * @root_cache: Common to root and child caches. NULL for root, pointer to >> 573 * the root cache for children. >> 574 * >> 575 * The following fields are specific to root caches. >> 576 * >> 577 * @memcg_caches: kmemcg ID indexed table of child caches. This table is >> 578 * used to index child cachces during allocation and cleared >> 579 * early during shutdown. >> 580 * >> 581 * @root_caches_node: List node for slab_root_caches list. >> 582 * >> 583 * @children: List of all child caches. While the child caches are also >> 584 * reachable through @memcg_caches, a child cache remains on >> 585 * this list until it is actually destroyed. >> 586 * >> 587 * The following fields are specific to child caches. >> 588 * >> 589 * @memcg: Pointer to the memcg this cache belongs to. >> 590 * >> 591 * @children_node: List node for @root_cache->children list. >> 592 * >> 593 * @kmem_caches_node: List node for @memcg->kmem_caches list. >> 594 */ >> 595 struct memcg_cache_params { >> 596 struct kmem_cache *root_cache; >> 597 union { >> 598 struct { >> 599 struct memcg_cache_array __rcu *memcg_caches; >> 600 struct list_head __root_caches_node; >> 601 struct list_head children; >> 602 }; >> 603 struct { >> 604 struct mem_cgroup *memcg; >> 605 struct list_head children_node; >> 606 struct list_head kmem_caches_node; >> 607 >> 608 void (*deact_fn)(struct kmem_cache *); >> 609 union { >> 610 struct rcu_head deact_rcu_head; >> 611 struct work_struct deact_work; >> 612 }; >> 613 }; >> 614 }; >> 615 }; >> 616 >> 617 int memcg_update_all_caches(int num_memcgs); 908 618 909 /** 619 /** 910 * kmalloc_array - allocate memory for an arra 620 * kmalloc_array - allocate memory for an array. 911 * @n: number of elements. 621 * @n: number of elements. 912 * @size: element size. 622 * @size: element size. 913 * @flags: the type of memory to allocate (see 623 * @flags: the type of memory to allocate (see kmalloc). 914 */ 624 */ 915 static inline __alloc_size(1, 2) void *kmalloc !! 625 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 916 { 626 { 917 size_t bytes; !! 627 if (size != 0 && n > SIZE_MAX / size) 918 << 919 if (unlikely(check_mul_overflow(n, siz << 920 return NULL; 628 return NULL; 921 if (__builtin_constant_p(n) && __built 629 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 922 return kmalloc_noprof(bytes, f !! 630 return kmalloc(n * size, flags); 923 return kmalloc_noprof(bytes, flags); !! 631 return __kmalloc(n * size, flags); 924 } << 925 #define kmalloc_array(...) << 926 << 927 /** << 928 * krealloc_array - reallocate memory for an a << 929 * @p: pointer to the memory chunk to realloca << 930 * @new_n: new number of elements to alloc << 931 * @new_size: new size of a single member of t << 932 * @flags: the type of memory to allocate (see << 933 * << 934 * If __GFP_ZERO logic is requested, callers m << 935 * initial memory allocation, every subsequent << 936 * memory allocation is flagged with __GFP_ZER << 937 * __GFP_ZERO is not fully honored by this API << 938 * << 939 * See krealloc_noprof() for further details. << 940 * << 941 * In any case, the contents of the object poi << 942 * lesser of the new and old sizes. << 943 */ << 944 static inline __realloc_size(2, 3) void * __mu << 945 << 946 << 947 << 948 { << 949 size_t bytes; << 950 << 951 if (unlikely(check_mul_overflow(new_n, << 952 return NULL; << 953 << 954 return krealloc_noprof(p, bytes, flags << 955 } 632 } 956 #define krealloc_array(...) << 957 633 958 /** 634 /** 959 * kcalloc - allocate memory for an array. The 635 * kcalloc - allocate memory for an array. The memory is set to zero. 960 * @n: number of elements. 636 * @n: number of elements. 961 * @size: element size. 637 * @size: element size. 962 * @flags: the type of memory to allocate (see 638 * @flags: the type of memory to allocate (see kmalloc). 963 */ 639 */ 964 #define kcalloc(n, size, flags) kmallo !! 640 static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 965 !! 641 { 966 void *__kmalloc_node_track_caller_noprof(DECL_ !! 642 return kmalloc_array(n, size, flags | __GFP_ZERO); 967 unsig !! 643 } 968 #define kmalloc_node_track_caller_noprof(size, << 969 __kmalloc_node_track_caller_noprof(PAS << 970 #define kmalloc_node_track_caller(...) << 971 alloc_hooks(kmalloc_node_track_caller_ << 972 644 973 /* 645 /* 974 * kmalloc_track_caller is a special version o 646 * kmalloc_track_caller is a special version of kmalloc that records the 975 * calling function of the routine calling it 647 * calling function of the routine calling it for slab leak tracking instead 976 * of just the calling function (confusing, eh 648 * of just the calling function (confusing, eh?). 977 * It's useful when the call to kmalloc comes 649 * It's useful when the call to kmalloc comes from a widely-used standard 978 * allocator where we care about the real plac 650 * allocator where we care about the real place the memory allocation 979 * request comes from. 651 * request comes from. 980 */ 652 */ 981 #define kmalloc_track_caller(...) !! 653 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); >> 654 #define kmalloc_track_caller(size, flags) \ >> 655 __kmalloc_track_caller(size, flags, _RET_IP_) 982 656 983 #define kmalloc_track_caller_noprof(...) !! 657 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, 984 kmalloc_node_track_caller_nopr !! 658 int node) 985 << 986 static inline __alloc_size(1, 2) void *kmalloc << 987 << 988 { 659 { 989 size_t bytes; !! 660 if (size != 0 && n > SIZE_MAX / size) 990 << 991 if (unlikely(check_mul_overflow(n, siz << 992 return NULL; 661 return NULL; 993 if (__builtin_constant_p(n) && __built 662 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 994 return kmalloc_node_noprof(byt !! 663 return kmalloc_node(n * size, flags, node); 995 return __kmalloc_node_noprof(PASS_BUCK !! 664 return __kmalloc_node(n * size, flags, node); >> 665 } >> 666 >> 667 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) >> 668 { >> 669 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); 996 } 670 } 997 #define kmalloc_array_node(...) << 998 671 999 #define kcalloc_node(_n, _size, _flags, _node) !! 672 1000 kmalloc_array_node(_n, _size, (_flags !! 673 #ifdef CONFIG_NUMA >> 674 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); >> 675 #define kmalloc_node_track_caller(size, flags, node) \ >> 676 __kmalloc_node_track_caller(size, flags, node, \ >> 677 _RET_IP_) >> 678 >> 679 #else /* CONFIG_NUMA */ >> 680 >> 681 #define kmalloc_node_track_caller(size, flags, node) \ >> 682 kmalloc_track_caller(size, flags) >> 683 >> 684 #endif /* CONFIG_NUMA */ 1001 685 1002 /* 686 /* 1003 * Shortcuts 687 * Shortcuts 1004 */ 688 */ 1005 #define kmem_cache_zalloc(_k, _flags) !! 689 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) >> 690 { >> 691 return kmem_cache_alloc(k, flags | __GFP_ZERO); >> 692 } 1006 693 1007 /** 694 /** 1008 * kzalloc - allocate memory. The memory is s 695 * kzalloc - allocate memory. The memory is set to zero. 1009 * @size: how many bytes of memory are requir 696 * @size: how many bytes of memory are required. 1010 * @flags: the type of memory to allocate (se 697 * @flags: the type of memory to allocate (see kmalloc). 1011 */ 698 */ 1012 static inline __alloc_size(1) void *kzalloc_n !! 699 static inline void *kzalloc(size_t size, gfp_t flags) 1013 { 700 { 1014 return kmalloc_noprof(size, flags | _ !! 701 return kmalloc(size, flags | __GFP_ZERO); 1015 } 702 } 1016 #define kzalloc(...) << 1017 #define kzalloc_node(_size, _flags, _node) << 1018 << 1019 void *__kvmalloc_node_noprof(DECL_BUCKET_PARA << 1020 #define kvmalloc_node_noprof(size, flags, nod << 1021 __kvmalloc_node_noprof(PASS_BUCKET_PA << 1022 #define kvmalloc_node(...) << 1023 << 1024 #define kvmalloc(_size, _flags) << 1025 #define kvmalloc_noprof(_size, _flags) << 1026 #define kvzalloc(_size, _flags) << 1027 << 1028 #define kvzalloc_node(_size, _flags, _node) << 1029 #define kmem_buckets_valloc(_b, _size, _flags << 1030 alloc_hooks(__kvmalloc_node_noprof(PA << 1031 703 1032 static inline __alloc_size(1, 2) void * !! 704 /** 1033 kvmalloc_array_node_noprof(size_t n, size_t s !! 705 * kzalloc_node - allocate zeroed memory from a particular memory node. >> 706 * @size: how many bytes of memory are required. >> 707 * @flags: the type of memory to allocate (see kmalloc). >> 708 * @node: memory node from which to allocate >> 709 */ >> 710 static inline void *kzalloc_node(size_t size, gfp_t flags, int node) 1034 { 711 { 1035 size_t bytes; !! 712 return kmalloc_node(size, flags | __GFP_ZERO, node); 1036 << 1037 if (unlikely(check_mul_overflow(n, si << 1038 return NULL; << 1039 << 1040 return kvmalloc_node_noprof(bytes, fl << 1041 } 713 } 1042 714 1043 #define kvmalloc_array_noprof(...) << 1044 #define kvcalloc_node_noprof(_n,_s,_f,_node) << 1045 #define kvcalloc_noprof(...) << 1046 << 1047 #define kvmalloc_array(...) << 1048 #define kvcalloc_node(...) << 1049 #define kvcalloc(...) << 1050 << 1051 void *kvrealloc_noprof(const void *p, size_t << 1052 __realloc_size(2); << 1053 #define kvrealloc(...) << 1054 << 1055 extern void kvfree(const void *addr); << 1056 DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NU << 1057 << 1058 extern void kvfree_sensitive(const void *addr << 1059 << 1060 unsigned int kmem_cache_size(struct kmem_cach 715 unsigned int kmem_cache_size(struct kmem_cache *s); 1061 << 1062 /** << 1063 * kmalloc_size_roundup - Report allocation b << 1064 * << 1065 * @size: Number of bytes to round up from. << 1066 * << 1067 * This returns the number of bytes that woul << 1068 * allocation of @size bytes. For example, a << 1069 * rounded up to the next sized kmalloc bucke << 1070 * for the general-purpose kmalloc()-based al << 1071 * pre-sized kmem_cache_alloc()-based allocat << 1072 * << 1073 * Use this to kmalloc() the full bucket size << 1074 * ksize() to query the size after an allocat << 1075 */ << 1076 size_t kmalloc_size_roundup(size_t size); << 1077 << 1078 void __init kmem_cache_init_late(void); 716 void __init kmem_cache_init_late(void); >> 717 >> 718 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB) >> 719 int slab_prepare_cpu(unsigned int cpu); >> 720 int slab_dead_cpu(unsigned int cpu); >> 721 #else >> 722 #define slab_prepare_cpu NULL >> 723 #define slab_dead_cpu NULL >> 724 #endif 1079 725 1080 #endif /* _LINUX_SLAB_H */ 726 #endif /* _LINUX_SLAB_H */ 1081 727
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.