1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * Written by Mark Hemment, 1996 (markhe@nextd 3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). 4 * 4 * 5 * (C) SGI 2006, Christoph Lameter 5 * (C) SGI 2006, Christoph Lameter 6 * Cleaned up and restructured to ease th 6 * Cleaned up and restructured to ease the addition of alternative 7 * implementations of SLAB allocators. 7 * implementations of SLAB allocators. 8 * (C) Linux Foundation 2008-2013 8 * (C) Linux Foundation 2008-2013 9 * Unified interface for all slab allocat 9 * Unified interface for all slab allocators 10 */ 10 */ 11 11 12 #ifndef _LINUX_SLAB_H 12 #ifndef _LINUX_SLAB_H 13 #define _LINUX_SLAB_H 13 #define _LINUX_SLAB_H 14 14 15 #include <linux/cache.h> << 16 #include <linux/gfp.h> 15 #include <linux/gfp.h> 17 #include <linux/overflow.h> 16 #include <linux/overflow.h> 18 #include <linux/types.h> 17 #include <linux/types.h> 19 #include <linux/workqueue.h> 18 #include <linux/workqueue.h> 20 #include <linux/percpu-refcount.h> 19 #include <linux/percpu-refcount.h> 21 #include <linux/cleanup.h> << 22 #include <linux/hash.h> << 23 20 24 enum _slab_flag_bits { << 25 _SLAB_CONSISTENCY_CHECKS, << 26 _SLAB_RED_ZONE, << 27 _SLAB_POISON, << 28 _SLAB_KMALLOC, << 29 _SLAB_HWCACHE_ALIGN, << 30 _SLAB_CACHE_DMA, << 31 _SLAB_CACHE_DMA32, << 32 _SLAB_STORE_USER, << 33 _SLAB_PANIC, << 34 _SLAB_TYPESAFE_BY_RCU, << 35 _SLAB_TRACE, << 36 #ifdef CONFIG_DEBUG_OBJECTS << 37 _SLAB_DEBUG_OBJECTS, << 38 #endif << 39 _SLAB_NOLEAKTRACE, << 40 _SLAB_NO_MERGE, << 41 #ifdef CONFIG_FAILSLAB << 42 _SLAB_FAILSLAB, << 43 #endif << 44 #ifdef CONFIG_MEMCG << 45 _SLAB_ACCOUNT, << 46 #endif << 47 #ifdef CONFIG_KASAN_GENERIC << 48 _SLAB_KASAN, << 49 #endif << 50 _SLAB_NO_USER_FLAGS, << 51 #ifdef CONFIG_KFENCE << 52 _SLAB_SKIP_KFENCE, << 53 #endif << 54 #ifndef CONFIG_SLUB_TINY << 55 _SLAB_RECLAIM_ACCOUNT, << 56 #endif << 57 _SLAB_OBJECT_POISON, << 58 _SLAB_CMPXCHG_DOUBLE, << 59 #ifdef CONFIG_SLAB_OBJ_EXT << 60 _SLAB_NO_OBJ_EXT, << 61 #endif << 62 _SLAB_FLAGS_LAST_BIT << 63 }; << 64 << 65 #define __SLAB_FLAG_BIT(nr) ((slab_flags_t << 66 #define __SLAB_FLAG_UNUSED ((slab_flags_t << 67 21 68 /* 22 /* 69 * Flags to pass to kmem_cache_create(). 23 * Flags to pass to kmem_cache_create(). 70 * The ones marked DEBUG need CONFIG_SLUB_DEBU !! 24 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 71 */ 25 */ 72 /* DEBUG: Perform (expensive) checks on alloc/ 26 /* DEBUG: Perform (expensive) checks on alloc/free */ 73 #define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BI !! 27 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) 74 /* DEBUG: Red zone objs in a cache */ 28 /* DEBUG: Red zone objs in a cache */ 75 #define SLAB_RED_ZONE __SLAB_FLAG_BI !! 29 #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) 76 /* DEBUG: Poison objects */ 30 /* DEBUG: Poison objects */ 77 #define SLAB_POISON __SLAB_FLAG_BI !! 31 #define SLAB_POISON ((slab_flags_t __force)0x00000800U) 78 /* Indicate a kmalloc slab */ 32 /* Indicate a kmalloc slab */ 79 #define SLAB_KMALLOC __SLAB_FLAG_BI !! 33 #define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U) 80 /* Align objs on cache lines */ 34 /* Align objs on cache lines */ 81 #define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BI !! 35 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) 82 /* Use GFP_DMA memory */ 36 /* Use GFP_DMA memory */ 83 #define SLAB_CACHE_DMA __SLAB_FLAG_BI !! 37 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) 84 /* Use GFP_DMA32 memory */ 38 /* Use GFP_DMA32 memory */ 85 #define SLAB_CACHE_DMA32 __SLAB_FLAG_BI !! 39 #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) 86 /* DEBUG: Store the last owner for bug hunting 40 /* DEBUG: Store the last owner for bug hunting */ 87 #define SLAB_STORE_USER __SLAB_FLAG_BI !! 41 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) 88 /* Panic if kmem_cache_create() fails */ 42 /* Panic if kmem_cache_create() fails */ 89 #define SLAB_PANIC __SLAB_FLAG_BI !! 43 #define SLAB_PANIC ((slab_flags_t __force)0x00040000U) 90 /* 44 /* 91 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THI 45 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 92 * 46 * 93 * This delays freeing the SLAB page by a grac 47 * This delays freeing the SLAB page by a grace period, it does _NOT_ 94 * delay object freeing. This means that if yo 48 * delay object freeing. This means that if you do kmem_cache_free() 95 * that memory location is free to be reused a 49 * that memory location is free to be reused at any time. Thus it may 96 * be possible to see another object there in 50 * be possible to see another object there in the same RCU grace period. 97 * 51 * 98 * This feature only ensures the memory locati 52 * This feature only ensures the memory location backing the object 99 * stays valid, the trick to using this is rel 53 * stays valid, the trick to using this is relying on an independent 100 * object validation pass. Something like: 54 * object validation pass. Something like: 101 * 55 * 102 * begin: !! 56 * rcu_read_lock() 103 * rcu_read_lock(); !! 57 * again: 104 * obj = lockless_lookup(key); 58 * obj = lockless_lookup(key); 105 * if (obj) { 59 * if (obj) { 106 * if (!try_get_ref(obj)) // might fail for 60 * if (!try_get_ref(obj)) // might fail for free objects 107 * rcu_read_unlock(); !! 61 * goto again; 108 * goto begin; << 109 * 62 * 110 * if (obj->key != key) { // not the object 63 * if (obj->key != key) { // not the object we expected 111 * put_ref(obj); 64 * put_ref(obj); 112 * rcu_read_unlock(); !! 65 * goto again; 113 * goto begin; << 114 * } 66 * } 115 * } 67 * } 116 * rcu_read_unlock(); 68 * rcu_read_unlock(); 117 * 69 * 118 * This is useful if we need to approach a ker 70 * This is useful if we need to approach a kernel structure obliquely, 119 * from its address obtained without the usual 71 * from its address obtained without the usual locking. We can lock 120 * the structure to stabilize it and check it' 72 * the structure to stabilize it and check it's still at the given address, 121 * only if we can be sure that the memory has 73 * only if we can be sure that the memory has not been meanwhile reused 122 * for some other kind of object (which our su 74 * for some other kind of object (which our subsystem's lock might corrupt). 123 * 75 * 124 * rcu_read_lock before reading the address, t 76 * rcu_read_lock before reading the address, then rcu_read_unlock after 125 * taking the spinlock within the structure ex 77 * taking the spinlock within the structure expected at that address. 126 * 78 * 127 * Note that it is not possible to acquire a l 79 * Note that it is not possible to acquire a lock within a structure 128 * allocated with SLAB_TYPESAFE_BY_RCU without 80 * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference 129 * as described above. The reason is that SLA 81 * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages 130 * are not zeroed before being given to the sl 82 * are not zeroed before being given to the slab, which means that any 131 * locks must be initialized after each and ev 83 * locks must be initialized after each and every kmem_struct_alloc(). 132 * Alternatively, make the ctor passed to kmem 84 * Alternatively, make the ctor passed to kmem_cache_create() initialize 133 * the locks at page-allocation time, as is do 85 * the locks at page-allocation time, as is done in __i915_request_ctor(), 134 * sighand_ctor(), and anon_vma_ctor(). Such 86 * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers 135 * to safely acquire those ctor-initialized lo 87 * to safely acquire those ctor-initialized locks under rcu_read_lock() 136 * protection. 88 * protection. 137 * 89 * 138 * Note that SLAB_TYPESAFE_BY_RCU was original 90 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 139 */ 91 */ 140 /* Defer freeing slabs to RCU */ 92 /* Defer freeing slabs to RCU */ 141 #define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BI !! 93 #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) >> 94 /* Spread some memory over cpuset */ >> 95 #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) 142 /* Trace allocations and frees */ 96 /* Trace allocations and frees */ 143 #define SLAB_TRACE __SLAB_FLAG_BI !! 97 #define SLAB_TRACE ((slab_flags_t __force)0x00200000U) 144 98 145 /* Flag to prevent checks on free */ 99 /* Flag to prevent checks on free */ 146 #ifdef CONFIG_DEBUG_OBJECTS 100 #ifdef CONFIG_DEBUG_OBJECTS 147 # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BI !! 101 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) 148 #else 102 #else 149 # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UN !! 103 # define SLAB_DEBUG_OBJECTS 0 150 #endif 104 #endif 151 105 152 /* Avoid kmemleak tracing */ 106 /* Avoid kmemleak tracing */ 153 #define SLAB_NOLEAKTRACE __SLAB_FLAG_BI !! 107 #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) 154 << 155 /* << 156 * Prevent merging with compatible kmem caches << 157 * cautiously. Valid use cases: << 158 * << 159 * - caches created for self-tests (e.g. kunit << 160 * - general caches created and used by a subs << 161 * (subsystem-specific) debug option is enab << 162 * - performance critical caches, should be ve << 163 * maintainers, and not used together with C << 164 */ << 165 #define SLAB_NO_MERGE __SLAB_FLAG_BI << 166 108 167 /* Fault injection mark */ 109 /* Fault injection mark */ 168 #ifdef CONFIG_FAILSLAB 110 #ifdef CONFIG_FAILSLAB 169 # define SLAB_FAILSLAB __SLAB_FLAG_BI !! 111 # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) 170 #else 112 #else 171 # define SLAB_FAILSLAB __SLAB_FLAG_UN !! 113 # define SLAB_FAILSLAB 0 172 #endif 114 #endif 173 /* Account to memcg */ 115 /* Account to memcg */ 174 #ifdef CONFIG_MEMCG !! 116 #ifdef CONFIG_MEMCG_KMEM 175 # define SLAB_ACCOUNT __SLAB_FLAG_BI !! 117 # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) 176 #else 118 #else 177 # define SLAB_ACCOUNT __SLAB_FLAG_UN !! 119 # define SLAB_ACCOUNT 0 178 #endif 120 #endif 179 121 180 #ifdef CONFIG_KASAN_GENERIC 122 #ifdef CONFIG_KASAN_GENERIC 181 #define SLAB_KASAN __SLAB_FLAG_BI !! 123 #define SLAB_KASAN ((slab_flags_t __force)0x08000000U) 182 #else 124 #else 183 #define SLAB_KASAN __SLAB_FLAG_UN !! 125 #define SLAB_KASAN 0 184 #endif 126 #endif 185 127 186 /* 128 /* 187 * Ignore user specified debugging flags. 129 * Ignore user specified debugging flags. 188 * Intended for caches created for self-tests 130 * Intended for caches created for self-tests so they have only flags 189 * specified in the code and other flags are i 131 * specified in the code and other flags are ignored. 190 */ 132 */ 191 #define SLAB_NO_USER_FLAGS __SLAB_FLAG_BI !! 133 #define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U) 192 134 193 #ifdef CONFIG_KFENCE 135 #ifdef CONFIG_KFENCE 194 #define SLAB_SKIP_KFENCE __SLAB_FLAG_BI !! 136 #define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U) 195 #else 137 #else 196 #define SLAB_SKIP_KFENCE __SLAB_FLAG_UN !! 138 #define SLAB_SKIP_KFENCE 0 197 #endif 139 #endif 198 140 199 /* The following flags affect the page allocat 141 /* The following flags affect the page allocator grouping pages by mobility */ 200 /* Objects are reclaimable */ 142 /* Objects are reclaimable */ 201 #ifndef CONFIG_SLUB_TINY 143 #ifndef CONFIG_SLUB_TINY 202 #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BI !! 144 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) 203 #else 145 #else 204 #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UN !! 146 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0) 205 #endif 147 #endif 206 #define SLAB_TEMPORARY SLAB_RECLAIM_A 148 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 207 149 208 /* Slab created using create_boot_cache */ << 209 #ifdef CONFIG_SLAB_OBJ_EXT << 210 #define SLAB_NO_OBJ_EXT __SLAB_FLAG_BI << 211 #else << 212 #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UN << 213 #endif << 214 << 215 /* << 216 * freeptr_t represents a SLUB freelist pointe << 217 * and not dereferenceable if CONFIG_SLAB_FREE << 218 */ << 219 typedef struct { unsigned long v; } freeptr_t; << 220 << 221 /* 150 /* 222 * ZERO_SIZE_PTR will be returned for zero siz 151 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 223 * 152 * 224 * Dereferencing ZERO_SIZE_PTR will lead to a 153 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 225 * 154 * 226 * ZERO_SIZE_PTR can be passed to kfree though 155 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 227 * Both make kfree a no-op. 156 * Both make kfree a no-op. 228 */ 157 */ 229 #define ZERO_SIZE_PTR ((void *)16) 158 #define ZERO_SIZE_PTR ((void *)16) 230 159 231 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x 160 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 232 (unsigned long 161 (unsigned long)ZERO_SIZE_PTR) 233 162 234 #include <linux/kasan.h> 163 #include <linux/kasan.h> 235 164 236 struct list_lru; 165 struct list_lru; 237 struct mem_cgroup; 166 struct mem_cgroup; 238 /* 167 /* 239 * struct kmem_cache related prototypes 168 * struct kmem_cache related prototypes 240 */ 169 */ >> 170 void __init kmem_cache_init(void); 241 bool slab_is_available(void); 171 bool slab_is_available(void); 242 172 243 /** !! 173 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, 244 * struct kmem_cache_args - Less common argume !! 174 unsigned int align, slab_flags_t flags, 245 * !! 175 void (*ctor)(void *)); 246 * Any uninitialized fields of the structure a !! 176 struct kmem_cache *kmem_cache_create_usercopy(const char *name, 247 * exception is @freeptr_offset where %0 is a !! 177 unsigned int size, unsigned int align, 248 * @use_freeptr_offset must be also set to %tr !! 178 slab_flags_t flags, 249 * as used. For @useroffset %0 is also valid, !! 179 unsigned int useroffset, unsigned int usersize, 250 * @usersize. !! 180 void (*ctor)(void *)); 251 * << 252 * When %NULL args is passed to kmem_cache_cre << 253 * fields unused. << 254 */ << 255 struct kmem_cache_args { << 256 /** << 257 * @align: The required alignment for << 258 * << 259 * %0 means no specific alignment is r << 260 */ << 261 unsigned int align; << 262 /** << 263 * @useroffset: Usercopy region offset << 264 * << 265 * %0 is a valid offset, when @usersiz << 266 */ << 267 unsigned int useroffset; << 268 /** << 269 * @usersize: Usercopy region size. << 270 * << 271 * %0 means no usercopy region is spec << 272 */ << 273 unsigned int usersize; << 274 /** << 275 * @freeptr_offset: Custom offset for << 276 * in &SLAB_TYPESAFE_BY_RCU caches << 277 * << 278 * By default &SLAB_TYPESAFE_BY_RCU ca << 279 * outside of the object. This might c << 280 * Cache creators that have a reason t << 281 * free pointer offset in their struct << 282 * placed. << 283 * << 284 * Note that placing the free pointer << 285 * caller to ensure that no fields are << 286 * guard against object recycling (See << 287 * details). << 288 * << 289 * Using %0 as a value for @freeptr_of << 290 * is specified, %use_freeptr_offset m << 291 * << 292 * Note that @ctor currently isn't sup << 293 * as a @ctor requires an external fre << 294 */ << 295 unsigned int freeptr_offset; << 296 /** << 297 * @use_freeptr_offset: Whether a @fre << 298 */ << 299 bool use_freeptr_offset; << 300 /** << 301 * @ctor: A constructor for the object << 302 * << 303 * The constructor is invoked for each << 304 * page. It is the cache user's respon << 305 * same state as after calling the con << 306 * with any differences between a fres << 307 * object. << 308 * << 309 * %NULL means no constructor. << 310 */ << 311 void (*ctor)(void *); << 312 }; << 313 << 314 struct kmem_cache *__kmem_cache_create_args(co << 315 un << 316 st << 317 sl << 318 static inline struct kmem_cache * << 319 __kmem_cache_create(const char *name, unsigned << 320 slab_flags_t flags, void ( << 321 { << 322 struct kmem_cache_args kmem_args = { << 323 .align = align, << 324 .ctor = ctor, << 325 }; << 326 << 327 return __kmem_cache_create_args(name, << 328 } << 329 << 330 /** << 331 * kmem_cache_create_usercopy - Create a kmem << 332 * for copying to userspace. << 333 * @name: A string which is used in /proc/slab << 334 * @size: The size of objects to be created in << 335 * @align: The required alignment for the obje << 336 * @flags: SLAB flags << 337 * @useroffset: Usercopy region offset << 338 * @usersize: Usercopy region size << 339 * @ctor: A constructor for the objects, or %N << 340 * << 341 * This is a legacy wrapper, new code should u << 342 * if whitelisting a single field is sufficien << 343 * the necessary parameters passed via the arg << 344 * &struct kmem_cache_args) << 345 * << 346 * Return: a pointer to the cache on success, << 347 */ << 348 static inline struct kmem_cache * << 349 kmem_cache_create_usercopy(const char *name, u << 350 unsigned int align, << 351 unsigned int userof << 352 void (*ctor)(void * << 353 { << 354 struct kmem_cache_args kmem_args = { << 355 .align = align, << 356 .ctor = ctor, << 357 .useroffset = useroffset, << 358 .usersize = usersize, << 359 }; << 360 << 361 return __kmem_cache_create_args(name, << 362 } << 363 << 364 /* If NULL is passed for @args, use this varia << 365 static inline struct kmem_cache * << 366 __kmem_cache_default_args(const char *name, un << 367 struct kmem_cache_ar << 368 slab_flags_t flags) << 369 { << 370 struct kmem_cache_args kmem_default_ar << 371 << 372 /* Make sure we don't get passed garba << 373 if (WARN_ON_ONCE(args)) << 374 return ERR_PTR(-EINVAL); << 375 << 376 return __kmem_cache_create_args(name, << 377 } << 378 << 379 /** << 380 * kmem_cache_create - Create a kmem cache. << 381 * @__name: A string which is used in /proc/sl << 382 * @__object_size: The size of objects to be c << 383 * @__args: Optional arguments, see &struct km << 384 * means defaults will be used for al << 385 * << 386 * This is currently implemented as a macro us << 387 * either the new variant of the function, or << 388 * << 389 * The new variant has 4 parameters: << 390 * ``kmem_cache_create(name, object_size, args << 391 * << 392 * See __kmem_cache_create_args() which implem << 393 * << 394 * The legacy variant has 5 parameters: << 395 * ``kmem_cache_create(name, object_size, alig << 396 * << 397 * The align and ctor parameters map to the re << 398 * &struct kmem_cache_args << 399 * << 400 * Context: Cannot be called within a interrup << 401 * << 402 * Return: a pointer to the cache on success, << 403 */ << 404 #define kmem_cache_create(__name, __object_siz << 405 _Generic((__args), << 406 struct kmem_cache_args *: __km << 407 void *: __kmem_cache_default_a << 408 default: __kmem_cache_create)( << 409 << 410 void kmem_cache_destroy(struct kmem_cache *s); 181 void kmem_cache_destroy(struct kmem_cache *s); 411 int kmem_cache_shrink(struct kmem_cache *s); 182 int kmem_cache_shrink(struct kmem_cache *s); 412 183 413 /* 184 /* 414 * Please use this macro to create slab caches 185 * Please use this macro to create slab caches. Simply specify the 415 * name of the structure and maybe some flags 186 * name of the structure and maybe some flags that are listed above. 416 * 187 * 417 * The alignment of the struct determines obje 188 * The alignment of the struct determines object alignment. If you 418 * f.e. add ____cacheline_aligned_in_smp to th 189 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 419 * then the objects will be properly aligned i 190 * then the objects will be properly aligned in SMP configurations. 420 */ 191 */ 421 #define KMEM_CACHE(__struct, __flags) !! 192 #define KMEM_CACHE(__struct, __flags) \ 422 __kmem_cache_create_args(#__struct, si !! 193 kmem_cache_create(#__struct, sizeof(struct __struct), \ 423 &(struct kmem_cache_ar !! 194 __alignof__(struct __struct), (__flags), NULL) 424 .align = __al << 425 }, (__flags)) << 426 195 427 /* 196 /* 428 * To whitelist a single field for copying to/ 197 * To whitelist a single field for copying to/from usercopy, use this 429 * macro instead for KMEM_CACHE() above. 198 * macro instead for KMEM_CACHE() above. 430 */ 199 */ 431 #define KMEM_CACHE_USERCOPY(__struct, __flags, !! 200 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ 432 __kmem_cache_create_args(#__struct, si !! 201 kmem_cache_create_usercopy(#__struct, \ 433 &(struct kmem_cache_ar !! 202 sizeof(struct __struct), \ 434 .align !! 203 __alignof__(struct __struct), (__flags), \ 435 .useroffset !! 204 offsetof(struct __struct, __field), \ 436 .usersize !! 205 sizeof_field(struct __struct, __field), NULL) 437 }, (__flags)) << 438 206 439 /* 207 /* 440 * Common kmalloc functions provided by all al 208 * Common kmalloc functions provided by all allocators 441 */ 209 */ 442 void * __must_check krealloc_noprof(const void !! 210 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2); 443 gfp_t flag << 444 #define krealloc(...) << 445 << 446 void kfree(const void *objp); 211 void kfree(const void *objp); 447 void kfree_sensitive(const void *objp); 212 void kfree_sensitive(const void *objp); 448 size_t __ksize(const void *objp); 213 size_t __ksize(const void *objp); 449 214 450 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL << 451 << 452 /** 215 /** 453 * ksize - Report actual allocation size of as 216 * ksize - Report actual allocation size of associated object 454 * 217 * 455 * @objp: Pointer returned from a prior kmallo 218 * @objp: Pointer returned from a prior kmalloc()-family allocation. 456 * 219 * 457 * This should not be used for writing beyond 220 * This should not be used for writing beyond the originally requested 458 * allocation size. Either use krealloc() or r 221 * allocation size. Either use krealloc() or round up the allocation size 459 * with kmalloc_size_roundup() prior to alloca 222 * with kmalloc_size_roundup() prior to allocation. If this is used to 460 * access beyond the originally requested allo 223 * access beyond the originally requested allocation size, UBSAN_BOUNDS 461 * and/or FORTIFY_SOURCE may trip, since they 224 * and/or FORTIFY_SOURCE may trip, since they only know about the 462 * originally allocated size via the __alloc_s 225 * originally allocated size via the __alloc_size attribute. 463 */ 226 */ 464 size_t ksize(const void *objp); 227 size_t ksize(const void *objp); 465 228 466 #ifdef CONFIG_PRINTK 229 #ifdef CONFIG_PRINTK 467 bool kmem_dump_obj(void *object); !! 230 bool kmem_valid_obj(void *object); 468 #else !! 231 void kmem_dump_obj(void *object); 469 static inline bool kmem_dump_obj(void *object) << 470 #endif 232 #endif 471 233 472 /* 234 /* 473 * Some archs want to perform DMA into kmalloc 235 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 474 * alignment larger than the alignment of a 64 236 * alignment larger than the alignment of a 64-bit integer. 475 * Setting ARCH_DMA_MINALIGN in arch headers a 237 * Setting ARCH_DMA_MINALIGN in arch headers allows that. 476 */ 238 */ 477 #ifdef ARCH_HAS_DMA_MINALIGN !! 239 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 478 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMA << 479 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIG 240 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 480 #endif !! 241 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 481 #endif !! 242 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 482 !! 243 #else 483 #ifndef ARCH_KMALLOC_MINALIGN << 484 #define ARCH_KMALLOC_MINALIGN __alignof__(unsi 244 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 485 #elif ARCH_KMALLOC_MINALIGN > 8 << 486 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN << 487 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SI << 488 #endif 245 #endif 489 246 490 /* 247 /* 491 * Setting ARCH_SLAB_MINALIGN in arch headers 248 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 492 * Intended for arches that get misalignment f 249 * Intended for arches that get misalignment faults even for 64 bit integer 493 * aligned buffers. 250 * aligned buffers. 494 */ 251 */ 495 #ifndef ARCH_SLAB_MINALIGN 252 #ifndef ARCH_SLAB_MINALIGN 496 #define ARCH_SLAB_MINALIGN __alignof__(unsigne 253 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 497 #endif 254 #endif 498 255 499 /* 256 /* 500 * Arches can define this function if they wan 257 * Arches can define this function if they want to decide the minimum slab 501 * alignment at runtime. The value returned by 258 * alignment at runtime. The value returned by the function must be a power 502 * of two and >= ARCH_SLAB_MINALIGN. 259 * of two and >= ARCH_SLAB_MINALIGN. 503 */ 260 */ 504 #ifndef arch_slab_minalign 261 #ifndef arch_slab_minalign 505 static inline unsigned int arch_slab_minalign( 262 static inline unsigned int arch_slab_minalign(void) 506 { 263 { 507 return ARCH_SLAB_MINALIGN; 264 return ARCH_SLAB_MINALIGN; 508 } 265 } 509 #endif 266 #endif 510 267 511 /* 268 /* 512 * kmem_cache_alloc and friends return pointer 269 * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN. 513 * kmalloc and friends return pointers aligned 270 * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN 514 * and ARCH_SLAB_MINALIGN, but here we only as 271 * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment. 515 */ 272 */ 516 #define __assume_kmalloc_alignment __assume_al 273 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) 517 #define __assume_slab_alignment __assume_align 274 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) 518 #define __assume_page_alignment __assume_align 275 #define __assume_page_alignment __assume_aligned(PAGE_SIZE) 519 276 520 /* 277 /* 521 * Kmalloc array related definitions 278 * Kmalloc array related definitions 522 */ 279 */ 523 280 >> 281 #ifdef CONFIG_SLAB 524 /* 282 /* 525 * SLUB directly allocates requests fitting in !! 283 * SLAB and SLUB directly allocates requests fitting in to an order-1 page 526 * (PAGE_SIZE*2). Larger requests are passed 284 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 527 */ 285 */ 528 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 286 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 529 #define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDE !! 287 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) >> 288 #ifndef KMALLOC_SHIFT_LOW >> 289 #define KMALLOC_SHIFT_LOW 5 >> 290 #endif >> 291 #endif >> 292 >> 293 #ifdef CONFIG_SLUB >> 294 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) >> 295 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) >> 296 #ifndef KMALLOC_SHIFT_LOW >> 297 #define KMALLOC_SHIFT_LOW 3 >> 298 #endif >> 299 #endif >> 300 >> 301 #ifdef CONFIG_SLOB >> 302 /* >> 303 * SLOB passes all requests larger than one page to the page allocator. >> 304 * No kmalloc array is necessary since objects of different sizes can >> 305 * be allocated from the same page. >> 306 */ >> 307 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT >> 308 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) 530 #ifndef KMALLOC_SHIFT_LOW 309 #ifndef KMALLOC_SHIFT_LOW 531 #define KMALLOC_SHIFT_LOW 3 310 #define KMALLOC_SHIFT_LOW 3 532 #endif 311 #endif >> 312 #endif 533 313 534 /* Maximum allocatable size */ 314 /* Maximum allocatable size */ 535 #define KMALLOC_MAX_SIZE (1UL << KMALLO 315 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 536 /* Maximum size for which we actually use a sl 316 /* Maximum size for which we actually use a slab cache */ 537 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLO 317 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 538 /* Maximum order allocatable via the slab allo 318 /* Maximum order allocatable via the slab allocator */ 539 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT 319 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 540 320 541 /* 321 /* 542 * Kmalloc subsystem. 322 * Kmalloc subsystem. 543 */ 323 */ 544 #ifndef KMALLOC_MIN_SIZE 324 #ifndef KMALLOC_MIN_SIZE 545 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_L 325 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 546 #endif 326 #endif 547 327 548 /* 328 /* 549 * This restriction comes from byte sized inde 329 * This restriction comes from byte sized index implementation. 550 * Page size is normally 2^12 bytes and, in th 330 * Page size is normally 2^12 bytes and, in this case, if we want to use 551 * byte sized index which can represent 2^8 en 331 * byte sized index which can represent 2^8 entries, the size of the object 552 * should be equal or greater to 2^12 / 2^8 = 332 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. 553 * If minimum size of kmalloc is less than 16, 333 * If minimum size of kmalloc is less than 16, we use it as minimum object 554 * size and give up to use byte sized index. 334 * size and give up to use byte sized index. 555 */ 335 */ 556 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SI 336 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ 557 (KMALLOC_MIN_SI 337 (KMALLOC_MIN_SIZE) : 16) 558 338 559 #ifdef CONFIG_RANDOM_KMALLOC_CACHES << 560 #define RANDOM_KMALLOC_CACHES_NR 15 // << 561 #else << 562 #define RANDOM_KMALLOC_CACHES_NR 0 << 563 #endif << 564 << 565 /* 339 /* 566 * Whenever changing this, take care of that k 340 * Whenever changing this, take care of that kmalloc_type() and 567 * create_kmalloc_caches() still work as inten 341 * create_kmalloc_caches() still work as intended. 568 * 342 * 569 * KMALLOC_NORMAL can contain only unaccounted 343 * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP 570 * is for accounted but unreclaimable and non- 344 * is for accounted but unreclaimable and non-dma objects. All the other 571 * kmem caches can have both accounted and una 345 * kmem caches can have both accounted and unaccounted objects. 572 */ 346 */ 573 enum kmalloc_cache_type { 347 enum kmalloc_cache_type { 574 KMALLOC_NORMAL = 0, 348 KMALLOC_NORMAL = 0, 575 #ifndef CONFIG_ZONE_DMA 349 #ifndef CONFIG_ZONE_DMA 576 KMALLOC_DMA = KMALLOC_NORMAL, 350 KMALLOC_DMA = KMALLOC_NORMAL, 577 #endif 351 #endif 578 #ifndef CONFIG_MEMCG !! 352 #ifndef CONFIG_MEMCG_KMEM 579 KMALLOC_CGROUP = KMALLOC_NORMAL, 353 KMALLOC_CGROUP = KMALLOC_NORMAL, 580 #endif 354 #endif 581 KMALLOC_RANDOM_START = KMALLOC_NORMAL, << 582 KMALLOC_RANDOM_END = KMALLOC_RANDOM_ST << 583 #ifdef CONFIG_SLUB_TINY 355 #ifdef CONFIG_SLUB_TINY 584 KMALLOC_RECLAIM = KMALLOC_NORMAL, 356 KMALLOC_RECLAIM = KMALLOC_NORMAL, 585 #else 357 #else 586 KMALLOC_RECLAIM, 358 KMALLOC_RECLAIM, 587 #endif 359 #endif 588 #ifdef CONFIG_ZONE_DMA 360 #ifdef CONFIG_ZONE_DMA 589 KMALLOC_DMA, 361 KMALLOC_DMA, 590 #endif 362 #endif 591 #ifdef CONFIG_MEMCG !! 363 #ifdef CONFIG_MEMCG_KMEM 592 KMALLOC_CGROUP, 364 KMALLOC_CGROUP, 593 #endif 365 #endif 594 NR_KMALLOC_TYPES 366 NR_KMALLOC_TYPES 595 }; 367 }; 596 368 597 typedef struct kmem_cache * kmem_buckets[KMALL !! 369 #ifndef CONFIG_SLOB 598 !! 370 extern struct kmem_cache * 599 extern kmem_buckets kmalloc_caches[NR_KMALLOC_ !! 371 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; 600 372 601 /* 373 /* 602 * Define gfp bits that should not be set for 374 * Define gfp bits that should not be set for KMALLOC_NORMAL. 603 */ 375 */ 604 #define KMALLOC_NOT_NORMAL_BITS 376 #define KMALLOC_NOT_NORMAL_BITS \ 605 (__GFP_RECLAIMABLE | 377 (__GFP_RECLAIMABLE | \ 606 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP 378 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \ 607 (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCO !! 379 (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0)) 608 << 609 extern unsigned long random_kmalloc_seed; << 610 380 611 static __always_inline enum kmalloc_cache_type !! 381 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) 612 { 382 { 613 /* 383 /* 614 * The most common case is KMALLOC_NOR 384 * The most common case is KMALLOC_NORMAL, so test for it 615 * with a single branch for all the re 385 * with a single branch for all the relevant flags. 616 */ 386 */ 617 if (likely((flags & KMALLOC_NOT_NORMAL 387 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0)) 618 #ifdef CONFIG_RANDOM_KMALLOC_CACHES << 619 /* RANDOM_KMALLOC_CACHES_NR (= << 620 return KMALLOC_RANDOM_START + << 621 << 622 #else << 623 return KMALLOC_NORMAL; 388 return KMALLOC_NORMAL; 624 #endif << 625 389 626 /* 390 /* 627 * At least one of the flags has to be 391 * At least one of the flags has to be set. Their priorities in 628 * decreasing order are: 392 * decreasing order are: 629 * 1) __GFP_DMA 393 * 1) __GFP_DMA 630 * 2) __GFP_RECLAIMABLE 394 * 2) __GFP_RECLAIMABLE 631 * 3) __GFP_ACCOUNT 395 * 3) __GFP_ACCOUNT 632 */ 396 */ 633 if (IS_ENABLED(CONFIG_ZONE_DMA) && (fl 397 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA)) 634 return KMALLOC_DMA; 398 return KMALLOC_DMA; 635 if (!IS_ENABLED(CONFIG_MEMCG) || (flag !! 399 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE)) 636 return KMALLOC_RECLAIM; 400 return KMALLOC_RECLAIM; 637 else 401 else 638 return KMALLOC_CGROUP; 402 return KMALLOC_CGROUP; 639 } 403 } 640 404 641 /* 405 /* 642 * Figure out which kmalloc slab an allocation 406 * Figure out which kmalloc slab an allocation of a certain size 643 * belongs to. 407 * belongs to. 644 * 0 = zero alloc 408 * 0 = zero alloc 645 * 1 = 65 .. 96 bytes 409 * 1 = 65 .. 96 bytes 646 * 2 = 129 .. 192 bytes 410 * 2 = 129 .. 192 bytes 647 * n = 2^(n-1)+1 .. 2^n 411 * n = 2^(n-1)+1 .. 2^n 648 * 412 * 649 * Note: __kmalloc_index() is compile-time opt 413 * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized; 650 * typical usage is via kmalloc_index() and th 414 * typical usage is via kmalloc_index() and therefore evaluated at compile-time. 651 * Callers where !size_is_constant should only 415 * Callers where !size_is_constant should only be test modules, where runtime 652 * overheads of __kmalloc_index() can be toler 416 * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab(). 653 */ 417 */ 654 static __always_inline unsigned int __kmalloc_ 418 static __always_inline unsigned int __kmalloc_index(size_t size, 655 419 bool size_is_constant) 656 { 420 { 657 if (!size) 421 if (!size) 658 return 0; 422 return 0; 659 423 660 if (size <= KMALLOC_MIN_SIZE) 424 if (size <= KMALLOC_MIN_SIZE) 661 return KMALLOC_SHIFT_LOW; 425 return KMALLOC_SHIFT_LOW; 662 426 663 if (KMALLOC_MIN_SIZE <= 32 && size > 6 427 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 664 return 1; 428 return 1; 665 if (KMALLOC_MIN_SIZE <= 64 && size > 1 429 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 666 return 2; 430 return 2; 667 if (size <= 8) return 3; 431 if (size <= 8) return 3; 668 if (size <= 16) return 4; 432 if (size <= 16) return 4; 669 if (size <= 32) return 5; 433 if (size <= 32) return 5; 670 if (size <= 64) return 6; 434 if (size <= 64) return 6; 671 if (size <= 128) return 7; 435 if (size <= 128) return 7; 672 if (size <= 256) return 8; 436 if (size <= 256) return 8; 673 if (size <= 512) return 9; 437 if (size <= 512) return 9; 674 if (size <= 1024) return 10; 438 if (size <= 1024) return 10; 675 if (size <= 2 * 1024) return 11; 439 if (size <= 2 * 1024) return 11; 676 if (size <= 4 * 1024) return 12; 440 if (size <= 4 * 1024) return 12; 677 if (size <= 8 * 1024) return 13; 441 if (size <= 8 * 1024) return 13; 678 if (size <= 16 * 1024) return 14; 442 if (size <= 16 * 1024) return 14; 679 if (size <= 32 * 1024) return 15; 443 if (size <= 32 * 1024) return 15; 680 if (size <= 64 * 1024) return 16; 444 if (size <= 64 * 1024) return 16; 681 if (size <= 128 * 1024) return 17; 445 if (size <= 128 * 1024) return 17; 682 if (size <= 256 * 1024) return 18; 446 if (size <= 256 * 1024) return 18; 683 if (size <= 512 * 1024) return 19; 447 if (size <= 512 * 1024) return 19; 684 if (size <= 1024 * 1024) return 20; 448 if (size <= 1024 * 1024) return 20; 685 if (size <= 2 * 1024 * 1024) return 2 449 if (size <= 2 * 1024 * 1024) return 21; 686 450 687 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRA 451 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant) 688 BUILD_BUG_ON_MSG(1, "unexpecte 452 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()"); 689 else 453 else 690 BUG(); 454 BUG(); 691 455 692 /* Will never be reached. Needed becau 456 /* Will never be reached. Needed because the compiler may complain */ 693 return -1; 457 return -1; 694 } 458 } 695 static_assert(PAGE_SHIFT <= 20); 459 static_assert(PAGE_SHIFT <= 20); 696 #define kmalloc_index(s) __kmalloc_index(s, tr 460 #define kmalloc_index(s) __kmalloc_index(s, true) >> 461 #endif /* !CONFIG_SLOB */ 697 462 698 #include <linux/alloc_tag.h> !! 463 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); 699 464 700 /** 465 /** 701 * kmem_cache_alloc - Allocate an object 466 * kmem_cache_alloc - Allocate an object 702 * @cachep: The cache to allocate from. 467 * @cachep: The cache to allocate from. 703 * @flags: See kmalloc(). 468 * @flags: See kmalloc(). 704 * 469 * 705 * Allocate an object from this cache. 470 * Allocate an object from this cache. 706 * See kmem_cache_zalloc() for a shortcut of a 471 * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags. 707 * 472 * 708 * Return: pointer to the new object or %NULL 473 * Return: pointer to the new object or %NULL in case of error 709 */ 474 */ 710 void *kmem_cache_alloc_noprof(struct kmem_cach !! 475 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc; 711 gfp_t flags) __a !! 476 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, 712 #define kmem_cache_alloc(...) !! 477 gfp_t gfpflags) __assume_slab_alignment __malloc; 713 << 714 void *kmem_cache_alloc_lru_noprof(struct kmem_ << 715 gfp_t gfpflags) __ << 716 #define kmem_cache_alloc_lru(...) alloc_ << 717 << 718 /** << 719 * kmem_cache_charge - memcg charge an already << 720 * @objp: address of the slab object to memcg << 721 * @gfpflags: describe the allocation context << 722 * << 723 * kmem_cache_charge allows charging a slab ob << 724 * primarily in cases where charging at alloca << 725 * because the target memcg is not known (i.e. << 726 * << 727 * The objp should be pointer returned by the << 728 * kmalloc (with __GFP_ACCOUNT in flags) or km << 729 * behavior can be controlled through gfpflags << 730 * necessary internal metadata can be allocate << 731 * that overcharging is requested instead of f << 732 * internal metadata allocation. << 733 * << 734 * There are several cases where it will retur << 735 * not done: << 736 * More specifically: << 737 * << 738 * 1. For !CONFIG_MEMCG or cgroup_disable=memo << 739 * 2. Already charged slab objects. << 740 * 3. For slab objects from KMALLOC_NORMAL cac << 741 * without __GFP_ACCOUNT << 742 * 4. Allocating internal metadata has failed << 743 * << 744 * Return: true if charge was successful other << 745 */ << 746 bool kmem_cache_charge(void *objp, gfp_t gfpfl << 747 void kmem_cache_free(struct kmem_cache *s, voi 478 void kmem_cache_free(struct kmem_cache *s, void *objp); 748 479 749 kmem_buckets *kmem_buckets_create(const char * << 750 unsigned int << 751 void (*ctor) << 752 << 753 /* 480 /* 754 * Bulk allocation and freeing operations. The 481 * Bulk allocation and freeing operations. These are accelerated in an 755 * allocator specific way to avoid taking lock 482 * allocator specific way to avoid taking locks repeatedly or building 756 * metadata structures unnecessarily. 483 * metadata structures unnecessarily. 757 * 484 * 758 * Note that interrupts must be enabled when c 485 * Note that interrupts must be enabled when calling these functions. 759 */ 486 */ 760 void kmem_cache_free_bulk(struct kmem_cache *s 487 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); >> 488 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); 761 489 762 int kmem_cache_alloc_bulk_noprof(struct kmem_c !! 490 /* 763 #define kmem_cache_alloc_bulk(...) alloc_ !! 491 * Caller must not use kfree_bulk() on memory not originally allocated 764 !! 492 * by kmalloc(), because the SLOB allocator cannot handle this. >> 493 */ 765 static __always_inline void kfree_bulk(size_t 494 static __always_inline void kfree_bulk(size_t size, void **p) 766 { 495 { 767 kmem_cache_free_bulk(NULL, size, p); 496 kmem_cache_free_bulk(NULL, size, p); 768 } 497 } 769 498 770 void *kmem_cache_alloc_node_noprof(struct kmem !! 499 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment 771 int node) _ !! 500 __alloc_size(1); 772 #define kmem_cache_alloc_node(...) alloc_ !! 501 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment >> 502 __malloc; >> 503 >> 504 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) >> 505 __assume_kmalloc_alignment __alloc_size(3); >> 506 >> 507 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, >> 508 int node, size_t size) __assume_kmalloc_alignment >> 509 __alloc_size(4); >> 510 void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment >> 511 __alloc_size(1); 773 512 774 /* !! 513 void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment 775 * These macros allow declaring a kmem_buckets !! 514 __alloc_size(1); 776 * can be compiled out with CONFIG_SLAB_BUCKET << 777 * sites don't have to pass NULL. << 778 */ << 779 #ifdef CONFIG_SLAB_BUCKETS << 780 #define DECL_BUCKET_PARAMS(_size, _b) size_t << 781 #define PASS_BUCKET_PARAMS(_size, _b) (_size << 782 #define PASS_BUCKET_PARAM(_b) (_b) << 783 #else << 784 #define DECL_BUCKET_PARAMS(_size, _b) size_t << 785 #define PASS_BUCKET_PARAMS(_size, _b) (_size << 786 #define PASS_BUCKET_PARAM(_b) NULL << 787 #endif << 788 << 789 /* << 790 * The following functions are not to be used << 791 * for internal use from kmalloc() and kmalloc << 792 * with the exception of kunit tests << 793 */ << 794 << 795 void *__kmalloc_noprof(size_t size, gfp_t flag << 796 __assume_kmall << 797 << 798 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS << 799 __assume_kmall << 800 << 801 void *__kmalloc_cache_noprof(struct kmem_cache << 802 __assume_kmall << 803 << 804 void *__kmalloc_cache_node_noprof(struct kmem_ << 805 int node, si << 806 __assume_kmall << 807 << 808 void *__kmalloc_large_noprof(size_t size, gfp_ << 809 __assume_page_ << 810 << 811 void *__kmalloc_large_node_noprof(size_t size, << 812 __assume_page_ << 813 515 814 /** 516 /** 815 * kmalloc - allocate kernel memory 517 * kmalloc - allocate kernel memory 816 * @size: how many bytes of memory are require 518 * @size: how many bytes of memory are required. 817 * @flags: describe the allocation context 519 * @flags: describe the allocation context 818 * 520 * 819 * kmalloc is the normal method of allocating 521 * kmalloc is the normal method of allocating memory 820 * for objects smaller than page size in the k 522 * for objects smaller than page size in the kernel. 821 * 523 * 822 * The allocated object address is aligned to 524 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN 823 * bytes. For @size of power of two bytes, the 525 * bytes. For @size of power of two bytes, the alignment is also guaranteed 824 * to be at least to the size. For other sizes !! 526 * to be at least to the size. 825 * be at least the largest power-of-two diviso << 826 * 527 * 827 * The @flags argument may be one of the GFP f 528 * The @flags argument may be one of the GFP flags defined at 828 * include/linux/gfp_types.h and described at !! 529 * include/linux/gfp.h and described at 829 * :ref:`Documentation/core-api/mm-api.rst <mm 530 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` 830 * 531 * 831 * The recommended usage of the @flags is desc 532 * The recommended usage of the @flags is described at 832 * :ref:`Documentation/core-api/memory-allocat 533 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>` 833 * 534 * 834 * Below is a brief outline of the most useful 535 * Below is a brief outline of the most useful GFP flags 835 * 536 * 836 * %GFP_KERNEL 537 * %GFP_KERNEL 837 * Allocate normal kernel ram. May sleep. 538 * Allocate normal kernel ram. May sleep. 838 * 539 * 839 * %GFP_NOWAIT 540 * %GFP_NOWAIT 840 * Allocation will not sleep. 541 * Allocation will not sleep. 841 * 542 * 842 * %GFP_ATOMIC 543 * %GFP_ATOMIC 843 * Allocation will not sleep. May use em 544 * Allocation will not sleep. May use emergency pools. 844 * 545 * 845 * Also it is possible to set different flags 546 * Also it is possible to set different flags by OR'ing 846 * in one or more of the following additional 547 * in one or more of the following additional @flags: 847 * 548 * 848 * %__GFP_ZERO 549 * %__GFP_ZERO 849 * Zero the allocated memory before retur 550 * Zero the allocated memory before returning. Also see kzalloc(). 850 * 551 * 851 * %__GFP_HIGH 552 * %__GFP_HIGH 852 * This allocation has high priority and 553 * This allocation has high priority and may use emergency pools. 853 * 554 * 854 * %__GFP_NOFAIL 555 * %__GFP_NOFAIL 855 * Indicate that this allocation is in no 556 * Indicate that this allocation is in no way allowed to fail 856 * (think twice before using). 557 * (think twice before using). 857 * 558 * 858 * %__GFP_NORETRY 559 * %__GFP_NORETRY 859 * If memory is not immediately available 560 * If memory is not immediately available, 860 * then give up at once. 561 * then give up at once. 861 * 562 * 862 * %__GFP_NOWARN 563 * %__GFP_NOWARN 863 * If allocation fails, don't issue any w 564 * If allocation fails, don't issue any warnings. 864 * 565 * 865 * %__GFP_RETRY_MAYFAIL 566 * %__GFP_RETRY_MAYFAIL 866 * Try really hard to succeed the allocat 567 * Try really hard to succeed the allocation but fail 867 * eventually. 568 * eventually. 868 */ 569 */ 869 static __always_inline __alloc_size(1) void *k !! 570 #ifndef CONFIG_SLOB >> 571 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) 870 { 572 { 871 if (__builtin_constant_p(size) && size 573 if (__builtin_constant_p(size) && size) { 872 unsigned int index; 574 unsigned int index; 873 575 874 if (size > KMALLOC_MAX_CACHE_S 576 if (size > KMALLOC_MAX_CACHE_SIZE) 875 return __kmalloc_large !! 577 return kmalloc_large(size, flags); 876 578 877 index = kmalloc_index(size); 579 index = kmalloc_index(size); 878 return __kmalloc_cache_noprof( !! 580 return kmalloc_trace( 879 kmalloc_caches !! 581 kmalloc_caches[kmalloc_type(flags)][index], 880 flags, size); 582 flags, size); 881 } 583 } 882 return __kmalloc_noprof(size, flags); !! 584 return __kmalloc(size, flags); 883 } 585 } 884 #define kmalloc(...) !! 586 #else 885 !! 587 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) 886 #define kmem_buckets_alloc(_b, _size, _flags) !! 588 { 887 alloc_hooks(__kmalloc_node_noprof(PASS !! 589 if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) >> 590 return kmalloc_large(size, flags); 888 591 889 #define kmem_buckets_alloc_track_caller(_b, _s !! 592 return __kmalloc(size, flags); 890 alloc_hooks(__kmalloc_node_track_calle !! 593 } >> 594 #endif 891 595 892 static __always_inline __alloc_size(1) void *k !! 596 #ifndef CONFIG_SLOB >> 597 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) 893 { 598 { 894 if (__builtin_constant_p(size) && size 599 if (__builtin_constant_p(size) && size) { 895 unsigned int index; 600 unsigned int index; 896 601 897 if (size > KMALLOC_MAX_CACHE_S 602 if (size > KMALLOC_MAX_CACHE_SIZE) 898 return __kmalloc_large !! 603 return kmalloc_large_node(size, flags, node); 899 604 900 index = kmalloc_index(size); 605 index = kmalloc_index(size); 901 return __kmalloc_cache_node_no !! 606 return kmalloc_node_trace( 902 kmalloc_caches !! 607 kmalloc_caches[kmalloc_type(flags)][index], 903 flags, node, s 608 flags, node, size); 904 } 609 } 905 return __kmalloc_node_noprof(PASS_BUCK !! 610 return __kmalloc_node(size, flags, node); 906 } 611 } 907 #define kmalloc_node(...) !! 612 #else >> 613 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) >> 614 { >> 615 if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) >> 616 return kmalloc_large_node(size, flags, node); >> 617 >> 618 return __kmalloc_node(size, flags, node); >> 619 } >> 620 #endif 908 621 909 /** 622 /** 910 * kmalloc_array - allocate memory for an arra 623 * kmalloc_array - allocate memory for an array. 911 * @n: number of elements. 624 * @n: number of elements. 912 * @size: element size. 625 * @size: element size. 913 * @flags: the type of memory to allocate (see 626 * @flags: the type of memory to allocate (see kmalloc). 914 */ 627 */ 915 static inline __alloc_size(1, 2) void *kmalloc !! 628 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags) 916 { 629 { 917 size_t bytes; 630 size_t bytes; 918 631 919 if (unlikely(check_mul_overflow(n, siz 632 if (unlikely(check_mul_overflow(n, size, &bytes))) 920 return NULL; 633 return NULL; 921 if (__builtin_constant_p(n) && __built 634 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 922 return kmalloc_noprof(bytes, f !! 635 return kmalloc(bytes, flags); 923 return kmalloc_noprof(bytes, flags); !! 636 return __kmalloc(bytes, flags); 924 } 637 } 925 #define kmalloc_array(...) << 926 638 927 /** 639 /** 928 * krealloc_array - reallocate memory for an a 640 * krealloc_array - reallocate memory for an array. 929 * @p: pointer to the memory chunk to realloca 641 * @p: pointer to the memory chunk to reallocate 930 * @new_n: new number of elements to alloc 642 * @new_n: new number of elements to alloc 931 * @new_size: new size of a single member of t 643 * @new_size: new size of a single member of the array 932 * @flags: the type of memory to allocate (see 644 * @flags: the type of memory to allocate (see kmalloc) 933 * !! 645 */ 934 * If __GFP_ZERO logic is requested, callers m !! 646 static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p, 935 * initial memory allocation, every subsequent !! 647 size_t new_n, 936 * memory allocation is flagged with __GFP_ZER !! 648 size_t new_size, 937 * __GFP_ZERO is not fully honored by this API !! 649 gfp_t flags) 938 * << 939 * See krealloc_noprof() for further details. << 940 * << 941 * In any case, the contents of the object poi << 942 * lesser of the new and old sizes. << 943 */ << 944 static inline __realloc_size(2, 3) void * __mu << 945 << 946 << 947 << 948 { 650 { 949 size_t bytes; 651 size_t bytes; 950 652 951 if (unlikely(check_mul_overflow(new_n, 653 if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) 952 return NULL; 654 return NULL; 953 655 954 return krealloc_noprof(p, bytes, flags !! 656 return krealloc(p, bytes, flags); 955 } 657 } 956 #define krealloc_array(...) << 957 658 958 /** 659 /** 959 * kcalloc - allocate memory for an array. The 660 * kcalloc - allocate memory for an array. The memory is set to zero. 960 * @n: number of elements. 661 * @n: number of elements. 961 * @size: element size. 662 * @size: element size. 962 * @flags: the type of memory to allocate (see 663 * @flags: the type of memory to allocate (see kmalloc). 963 */ 664 */ 964 #define kcalloc(n, size, flags) kmallo !! 665 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags) >> 666 { >> 667 return kmalloc_array(n, size, flags | __GFP_ZERO); >> 668 } 965 669 966 void *__kmalloc_node_track_caller_noprof(DECL_ !! 670 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, 967 unsig !! 671 unsigned long caller) __alloc_size(1); 968 #define kmalloc_node_track_caller_noprof(size, !! 672 #define kmalloc_node_track_caller(size, flags, node) \ 969 __kmalloc_node_track_caller_noprof(PAS !! 673 __kmalloc_node_track_caller(size, flags, node, \ 970 #define kmalloc_node_track_caller(...) !! 674 _RET_IP_) 971 alloc_hooks(kmalloc_node_track_caller_ << 972 675 973 /* 676 /* 974 * kmalloc_track_caller is a special version o 677 * kmalloc_track_caller is a special version of kmalloc that records the 975 * calling function of the routine calling it 678 * calling function of the routine calling it for slab leak tracking instead 976 * of just the calling function (confusing, eh 679 * of just the calling function (confusing, eh?). 977 * It's useful when the call to kmalloc comes 680 * It's useful when the call to kmalloc comes from a widely-used standard 978 * allocator where we care about the real plac 681 * allocator where we care about the real place the memory allocation 979 * request comes from. 682 * request comes from. 980 */ 683 */ 981 #define kmalloc_track_caller(...) !! 684 #define kmalloc_track_caller(size, flags) \ 982 !! 685 __kmalloc_node_track_caller(size, flags, \ 983 #define kmalloc_track_caller_noprof(...) !! 686 NUMA_NO_NODE, _RET_IP_) 984 kmalloc_node_track_caller_nopr << 985 687 986 static inline __alloc_size(1, 2) void *kmalloc !! 688 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, 987 689 int node) 988 { 690 { 989 size_t bytes; 691 size_t bytes; 990 692 991 if (unlikely(check_mul_overflow(n, siz 693 if (unlikely(check_mul_overflow(n, size, &bytes))) 992 return NULL; 694 return NULL; 993 if (__builtin_constant_p(n) && __built 695 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 994 return kmalloc_node_noprof(byt !! 696 return kmalloc_node(bytes, flags, node); 995 return __kmalloc_node_noprof(PASS_BUCK !! 697 return __kmalloc_node(bytes, flags, node); 996 } 698 } 997 #define kmalloc_array_node(...) << 998 699 999 #define kcalloc_node(_n, _size, _flags, _node) !! 700 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) 1000 kmalloc_array_node(_n, _size, (_flags !! 701 { >> 702 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); >> 703 } 1001 704 1002 /* 705 /* 1003 * Shortcuts 706 * Shortcuts 1004 */ 707 */ 1005 #define kmem_cache_zalloc(_k, _flags) !! 708 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) >> 709 { >> 710 return kmem_cache_alloc(k, flags | __GFP_ZERO); >> 711 } 1006 712 1007 /** 713 /** 1008 * kzalloc - allocate memory. The memory is s 714 * kzalloc - allocate memory. The memory is set to zero. 1009 * @size: how many bytes of memory are requir 715 * @size: how many bytes of memory are required. 1010 * @flags: the type of memory to allocate (se 716 * @flags: the type of memory to allocate (see kmalloc). 1011 */ 717 */ 1012 static inline __alloc_size(1) void *kzalloc_n !! 718 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags) 1013 { 719 { 1014 return kmalloc_noprof(size, flags | _ !! 720 return kmalloc(size, flags | __GFP_ZERO); 1015 } 721 } 1016 #define kzalloc(...) << 1017 #define kzalloc_node(_size, _flags, _node) << 1018 722 1019 void *__kvmalloc_node_noprof(DECL_BUCKET_PARA !! 723 /** 1020 #define kvmalloc_node_noprof(size, flags, nod !! 724 * kzalloc_node - allocate zeroed memory from a particular memory node. 1021 __kvmalloc_node_noprof(PASS_BUCKET_PA !! 725 * @size: how many bytes of memory are required. 1022 #define kvmalloc_node(...) !! 726 * @flags: the type of memory to allocate (see kmalloc). 1023 !! 727 * @node: memory node from which to allocate 1024 #define kvmalloc(_size, _flags) !! 728 */ 1025 #define kvmalloc_noprof(_size, _flags) !! 729 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node) 1026 #define kvzalloc(_size, _flags) !! 730 { >> 731 return kmalloc_node(size, flags | __GFP_ZERO, node); >> 732 } 1027 733 1028 #define kvzalloc_node(_size, _flags, _node) !! 734 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1); 1029 #define kmem_buckets_valloc(_b, _size, _flags !! 735 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags) 1030 alloc_hooks(__kvmalloc_node_noprof(PA !! 736 { >> 737 return kvmalloc_node(size, flags, NUMA_NO_NODE); >> 738 } >> 739 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node) >> 740 { >> 741 return kvmalloc_node(size, flags | __GFP_ZERO, node); >> 742 } >> 743 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) >> 744 { >> 745 return kvmalloc(size, flags | __GFP_ZERO); >> 746 } 1031 747 1032 static inline __alloc_size(1, 2) void * !! 748 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) 1033 kvmalloc_array_node_noprof(size_t n, size_t s << 1034 { 749 { 1035 size_t bytes; 750 size_t bytes; 1036 751 1037 if (unlikely(check_mul_overflow(n, si 752 if (unlikely(check_mul_overflow(n, size, &bytes))) 1038 return NULL; 753 return NULL; 1039 754 1040 return kvmalloc_node_noprof(bytes, fl !! 755 return kvmalloc(bytes, flags); 1041 } 756 } 1042 757 1043 #define kvmalloc_array_noprof(...) !! 758 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags) 1044 #define kvcalloc_node_noprof(_n,_s,_f,_node) !! 759 { 1045 #define kvcalloc_noprof(...) !! 760 return kvmalloc_array(n, size, flags | __GFP_ZERO); 1046 !! 761 } 1047 #define kvmalloc_array(...) << 1048 #define kvcalloc_node(...) << 1049 #define kvcalloc(...) << 1050 << 1051 void *kvrealloc_noprof(const void *p, size_t << 1052 __realloc_size(2); << 1053 #define kvrealloc(...) << 1054 762 >> 763 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) >> 764 __realloc_size(3); 1055 extern void kvfree(const void *addr); 765 extern void kvfree(const void *addr); 1056 DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NU << 1057 << 1058 extern void kvfree_sensitive(const void *addr 766 extern void kvfree_sensitive(const void *addr, size_t len); 1059 767 1060 unsigned int kmem_cache_size(struct kmem_cach 768 unsigned int kmem_cache_size(struct kmem_cache *s); 1061 769 1062 /** 770 /** 1063 * kmalloc_size_roundup - Report allocation b 771 * kmalloc_size_roundup - Report allocation bucket size for the given size 1064 * 772 * 1065 * @size: Number of bytes to round up from. 773 * @size: Number of bytes to round up from. 1066 * 774 * 1067 * This returns the number of bytes that woul 775 * This returns the number of bytes that would be available in a kmalloc() 1068 * allocation of @size bytes. For example, a 776 * allocation of @size bytes. For example, a 126 byte request would be 1069 * rounded up to the next sized kmalloc bucke 777 * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly 1070 * for the general-purpose kmalloc()-based al 778 * for the general-purpose kmalloc()-based allocations, and is not for the 1071 * pre-sized kmem_cache_alloc()-based allocat 779 * pre-sized kmem_cache_alloc()-based allocations.) 1072 * 780 * 1073 * Use this to kmalloc() the full bucket size 781 * Use this to kmalloc() the full bucket size ahead of time instead of using 1074 * ksize() to query the size after an allocat 782 * ksize() to query the size after an allocation. 1075 */ 783 */ 1076 size_t kmalloc_size_roundup(size_t size); 784 size_t kmalloc_size_roundup(size_t size); 1077 785 1078 void __init kmem_cache_init_late(void); 786 void __init kmem_cache_init_late(void); >> 787 >> 788 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB) >> 789 int slab_prepare_cpu(unsigned int cpu); >> 790 int slab_dead_cpu(unsigned int cpu); >> 791 #else >> 792 #define slab_prepare_cpu NULL >> 793 #define slab_dead_cpu NULL >> 794 #endif 1079 795 1080 #endif /* _LINUX_SLAB_H */ 796 #endif /* _LINUX_SLAB_H */ 1081 797
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.