1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * Written by Mark Hemment, 1996 (markhe@nextd 3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). 4 * 4 * 5 * (C) SGI 2006, Christoph Lameter 5 * (C) SGI 2006, Christoph Lameter 6 * Cleaned up and restructured to ease th 6 * Cleaned up and restructured to ease the addition of alternative 7 * implementations of SLAB allocators. 7 * implementations of SLAB allocators. 8 * (C) Linux Foundation 2008-2013 8 * (C) Linux Foundation 2008-2013 9 * Unified interface for all slab allocat 9 * Unified interface for all slab allocators 10 */ 10 */ 11 11 12 #ifndef _LINUX_SLAB_H 12 #ifndef _LINUX_SLAB_H 13 #define _LINUX_SLAB_H 13 #define _LINUX_SLAB_H 14 14 15 #include <linux/cache.h> << 16 #include <linux/gfp.h> 15 #include <linux/gfp.h> 17 #include <linux/overflow.h> 16 #include <linux/overflow.h> 18 #include <linux/types.h> 17 #include <linux/types.h> 19 #include <linux/workqueue.h> 18 #include <linux/workqueue.h> 20 #include <linux/percpu-refcount.h> 19 #include <linux/percpu-refcount.h> 21 #include <linux/cleanup.h> << 22 #include <linux/hash.h> << 23 20 24 enum _slab_flag_bits { << 25 _SLAB_CONSISTENCY_CHECKS, << 26 _SLAB_RED_ZONE, << 27 _SLAB_POISON, << 28 _SLAB_KMALLOC, << 29 _SLAB_HWCACHE_ALIGN, << 30 _SLAB_CACHE_DMA, << 31 _SLAB_CACHE_DMA32, << 32 _SLAB_STORE_USER, << 33 _SLAB_PANIC, << 34 _SLAB_TYPESAFE_BY_RCU, << 35 _SLAB_TRACE, << 36 #ifdef CONFIG_DEBUG_OBJECTS << 37 _SLAB_DEBUG_OBJECTS, << 38 #endif << 39 _SLAB_NOLEAKTRACE, << 40 _SLAB_NO_MERGE, << 41 #ifdef CONFIG_FAILSLAB << 42 _SLAB_FAILSLAB, << 43 #endif << 44 #ifdef CONFIG_MEMCG << 45 _SLAB_ACCOUNT, << 46 #endif << 47 #ifdef CONFIG_KASAN_GENERIC << 48 _SLAB_KASAN, << 49 #endif << 50 _SLAB_NO_USER_FLAGS, << 51 #ifdef CONFIG_KFENCE << 52 _SLAB_SKIP_KFENCE, << 53 #endif << 54 #ifndef CONFIG_SLUB_TINY << 55 _SLAB_RECLAIM_ACCOUNT, << 56 #endif << 57 _SLAB_OBJECT_POISON, << 58 _SLAB_CMPXCHG_DOUBLE, << 59 #ifdef CONFIG_SLAB_OBJ_EXT << 60 _SLAB_NO_OBJ_EXT, << 61 #endif << 62 _SLAB_FLAGS_LAST_BIT << 63 }; << 64 << 65 #define __SLAB_FLAG_BIT(nr) ((slab_flags_t << 66 #define __SLAB_FLAG_UNUSED ((slab_flags_t << 67 21 68 /* 22 /* 69 * Flags to pass to kmem_cache_create(). 23 * Flags to pass to kmem_cache_create(). 70 * The ones marked DEBUG need CONFIG_SLUB_DEBU !! 24 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 71 */ 25 */ 72 /* DEBUG: Perform (expensive) checks on alloc/ 26 /* DEBUG: Perform (expensive) checks on alloc/free */ 73 #define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BI !! 27 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) 74 /* DEBUG: Red zone objs in a cache */ 28 /* DEBUG: Red zone objs in a cache */ 75 #define SLAB_RED_ZONE __SLAB_FLAG_BI !! 29 #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) 76 /* DEBUG: Poison objects */ 30 /* DEBUG: Poison objects */ 77 #define SLAB_POISON __SLAB_FLAG_BI !! 31 #define SLAB_POISON ((slab_flags_t __force)0x00000800U) 78 /* Indicate a kmalloc slab */ << 79 #define SLAB_KMALLOC __SLAB_FLAG_BI << 80 /* Align objs on cache lines */ 32 /* Align objs on cache lines */ 81 #define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BI !! 33 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) 82 /* Use GFP_DMA memory */ 34 /* Use GFP_DMA memory */ 83 #define SLAB_CACHE_DMA __SLAB_FLAG_BI !! 35 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) 84 /* Use GFP_DMA32 memory */ 36 /* Use GFP_DMA32 memory */ 85 #define SLAB_CACHE_DMA32 __SLAB_FLAG_BI !! 37 #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) 86 /* DEBUG: Store the last owner for bug hunting 38 /* DEBUG: Store the last owner for bug hunting */ 87 #define SLAB_STORE_USER __SLAB_FLAG_BI !! 39 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) 88 /* Panic if kmem_cache_create() fails */ 40 /* Panic if kmem_cache_create() fails */ 89 #define SLAB_PANIC __SLAB_FLAG_BI !! 41 #define SLAB_PANIC ((slab_flags_t __force)0x00040000U) 90 /* 42 /* 91 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THI 43 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 92 * 44 * 93 * This delays freeing the SLAB page by a grac 45 * This delays freeing the SLAB page by a grace period, it does _NOT_ 94 * delay object freeing. This means that if yo 46 * delay object freeing. This means that if you do kmem_cache_free() 95 * that memory location is free to be reused a 47 * that memory location is free to be reused at any time. Thus it may 96 * be possible to see another object there in 48 * be possible to see another object there in the same RCU grace period. 97 * 49 * 98 * This feature only ensures the memory locati 50 * This feature only ensures the memory location backing the object 99 * stays valid, the trick to using this is rel 51 * stays valid, the trick to using this is relying on an independent 100 * object validation pass. Something like: 52 * object validation pass. Something like: 101 * 53 * 102 * begin: !! 54 * rcu_read_lock() 103 * rcu_read_lock(); !! 55 * again: 104 * obj = lockless_lookup(key); 56 * obj = lockless_lookup(key); 105 * if (obj) { 57 * if (obj) { 106 * if (!try_get_ref(obj)) // might fail for 58 * if (!try_get_ref(obj)) // might fail for free objects 107 * rcu_read_unlock(); !! 59 * goto again; 108 * goto begin; << 109 * 60 * 110 * if (obj->key != key) { // not the object 61 * if (obj->key != key) { // not the object we expected 111 * put_ref(obj); 62 * put_ref(obj); 112 * rcu_read_unlock(); !! 63 * goto again; 113 * goto begin; << 114 * } 64 * } 115 * } 65 * } 116 * rcu_read_unlock(); 66 * rcu_read_unlock(); 117 * 67 * 118 * This is useful if we need to approach a ker 68 * This is useful if we need to approach a kernel structure obliquely, 119 * from its address obtained without the usual 69 * from its address obtained without the usual locking. We can lock 120 * the structure to stabilize it and check it' 70 * the structure to stabilize it and check it's still at the given address, 121 * only if we can be sure that the memory has 71 * only if we can be sure that the memory has not been meanwhile reused 122 * for some other kind of object (which our su 72 * for some other kind of object (which our subsystem's lock might corrupt). 123 * 73 * 124 * rcu_read_lock before reading the address, t 74 * rcu_read_lock before reading the address, then rcu_read_unlock after 125 * taking the spinlock within the structure ex 75 * taking the spinlock within the structure expected at that address. 126 * 76 * 127 * Note that it is not possible to acquire a l << 128 * allocated with SLAB_TYPESAFE_BY_RCU without << 129 * as described above. The reason is that SLA << 130 * are not zeroed before being given to the sl << 131 * locks must be initialized after each and ev << 132 * Alternatively, make the ctor passed to kmem << 133 * the locks at page-allocation time, as is do << 134 * sighand_ctor(), and anon_vma_ctor(). Such << 135 * to safely acquire those ctor-initialized lo << 136 * protection. << 137 * << 138 * Note that SLAB_TYPESAFE_BY_RCU was original 77 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 139 */ 78 */ 140 /* Defer freeing slabs to RCU */ 79 /* Defer freeing slabs to RCU */ 141 #define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BI !! 80 #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) >> 81 /* Spread some memory over cpuset */ >> 82 #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) 142 /* Trace allocations and frees */ 83 /* Trace allocations and frees */ 143 #define SLAB_TRACE __SLAB_FLAG_BI !! 84 #define SLAB_TRACE ((slab_flags_t __force)0x00200000U) 144 85 145 /* Flag to prevent checks on free */ 86 /* Flag to prevent checks on free */ 146 #ifdef CONFIG_DEBUG_OBJECTS 87 #ifdef CONFIG_DEBUG_OBJECTS 147 # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BI !! 88 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) 148 #else 89 #else 149 # define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UN !! 90 # define SLAB_DEBUG_OBJECTS 0 150 #endif 91 #endif 151 92 152 /* Avoid kmemleak tracing */ 93 /* Avoid kmemleak tracing */ 153 #define SLAB_NOLEAKTRACE __SLAB_FLAG_BI !! 94 #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) 154 << 155 /* << 156 * Prevent merging with compatible kmem caches << 157 * cautiously. Valid use cases: << 158 * << 159 * - caches created for self-tests (e.g. kunit << 160 * - general caches created and used by a subs << 161 * (subsystem-specific) debug option is enab << 162 * - performance critical caches, should be ve << 163 * maintainers, and not used together with C << 164 */ << 165 #define SLAB_NO_MERGE __SLAB_FLAG_BI << 166 95 167 /* Fault injection mark */ 96 /* Fault injection mark */ 168 #ifdef CONFIG_FAILSLAB 97 #ifdef CONFIG_FAILSLAB 169 # define SLAB_FAILSLAB __SLAB_FLAG_BI !! 98 # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) 170 #else 99 #else 171 # define SLAB_FAILSLAB __SLAB_FLAG_UN !! 100 # define SLAB_FAILSLAB 0 172 #endif 101 #endif 173 /* Account to memcg */ 102 /* Account to memcg */ 174 #ifdef CONFIG_MEMCG !! 103 #ifdef CONFIG_MEMCG_KMEM 175 # define SLAB_ACCOUNT __SLAB_FLAG_BI !! 104 # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) 176 #else 105 #else 177 # define SLAB_ACCOUNT __SLAB_FLAG_UN !! 106 # define SLAB_ACCOUNT 0 178 #endif 107 #endif 179 108 180 #ifdef CONFIG_KASAN_GENERIC !! 109 #ifdef CONFIG_KASAN 181 #define SLAB_KASAN __SLAB_FLAG_BI !! 110 #define SLAB_KASAN ((slab_flags_t __force)0x08000000U) 182 #else 111 #else 183 #define SLAB_KASAN __SLAB_FLAG_UN !! 112 #define SLAB_KASAN 0 184 #endif << 185 << 186 /* << 187 * Ignore user specified debugging flags. << 188 * Intended for caches created for self-tests << 189 * specified in the code and other flags are i << 190 */ << 191 #define SLAB_NO_USER_FLAGS __SLAB_FLAG_BI << 192 << 193 #ifdef CONFIG_KFENCE << 194 #define SLAB_SKIP_KFENCE __SLAB_FLAG_BI << 195 #else << 196 #define SLAB_SKIP_KFENCE __SLAB_FLAG_UN << 197 #endif 113 #endif 198 114 199 /* The following flags affect the page allocat 115 /* The following flags affect the page allocator grouping pages by mobility */ 200 /* Objects are reclaimable */ 116 /* Objects are reclaimable */ 201 #ifndef CONFIG_SLUB_TINY !! 117 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) 202 #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BI << 203 #else << 204 #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UN << 205 #endif << 206 #define SLAB_TEMPORARY SLAB_RECLAIM_A 118 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 207 119 208 /* Slab created using create_boot_cache */ !! 120 /* Slab deactivation flag */ 209 #ifdef CONFIG_SLAB_OBJ_EXT !! 121 #define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) 210 #define SLAB_NO_OBJ_EXT __SLAB_FLAG_BI << 211 #else << 212 #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UN << 213 #endif << 214 << 215 /* << 216 * freeptr_t represents a SLUB freelist pointe << 217 * and not dereferenceable if CONFIG_SLAB_FREE << 218 */ << 219 typedef struct { unsigned long v; } freeptr_t; << 220 122 221 /* 123 /* 222 * ZERO_SIZE_PTR will be returned for zero siz 124 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 223 * 125 * 224 * Dereferencing ZERO_SIZE_PTR will lead to a 126 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 225 * 127 * 226 * ZERO_SIZE_PTR can be passed to kfree though 128 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 227 * Both make kfree a no-op. 129 * Both make kfree a no-op. 228 */ 130 */ 229 #define ZERO_SIZE_PTR ((void *)16) 131 #define ZERO_SIZE_PTR ((void *)16) 230 132 231 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x 133 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 232 (unsigned long 134 (unsigned long)ZERO_SIZE_PTR) 233 135 234 #include <linux/kasan.h> 136 #include <linux/kasan.h> 235 137 236 struct list_lru; << 237 struct mem_cgroup; 138 struct mem_cgroup; 238 /* 139 /* 239 * struct kmem_cache related prototypes 140 * struct kmem_cache related prototypes 240 */ 141 */ >> 142 void __init kmem_cache_init(void); 241 bool slab_is_available(void); 143 bool slab_is_available(void); 242 144 243 /** !! 145 extern bool usercopy_fallback; 244 * struct kmem_cache_args - Less common argume << 245 * << 246 * Any uninitialized fields of the structure a << 247 * exception is @freeptr_offset where %0 is a << 248 * @use_freeptr_offset must be also set to %tr << 249 * as used. For @useroffset %0 is also valid, << 250 * @usersize. << 251 * << 252 * When %NULL args is passed to kmem_cache_cre << 253 * fields unused. << 254 */ << 255 struct kmem_cache_args { << 256 /** << 257 * @align: The required alignment for << 258 * << 259 * %0 means no specific alignment is r << 260 */ << 261 unsigned int align; << 262 /** << 263 * @useroffset: Usercopy region offset << 264 * << 265 * %0 is a valid offset, when @usersiz << 266 */ << 267 unsigned int useroffset; << 268 /** << 269 * @usersize: Usercopy region size. << 270 * << 271 * %0 means no usercopy region is spec << 272 */ << 273 unsigned int usersize; << 274 /** << 275 * @freeptr_offset: Custom offset for << 276 * in &SLAB_TYPESAFE_BY_RCU caches << 277 * << 278 * By default &SLAB_TYPESAFE_BY_RCU ca << 279 * outside of the object. This might c << 280 * Cache creators that have a reason t << 281 * free pointer offset in their struct << 282 * placed. << 283 * << 284 * Note that placing the free pointer << 285 * caller to ensure that no fields are << 286 * guard against object recycling (See << 287 * details). << 288 * << 289 * Using %0 as a value for @freeptr_of << 290 * is specified, %use_freeptr_offset m << 291 * << 292 * Note that @ctor currently isn't sup << 293 * as a @ctor requires an external fre << 294 */ << 295 unsigned int freeptr_offset; << 296 /** << 297 * @use_freeptr_offset: Whether a @fre << 298 */ << 299 bool use_freeptr_offset; << 300 /** << 301 * @ctor: A constructor for the object << 302 * << 303 * The constructor is invoked for each << 304 * page. It is the cache user's respon << 305 * same state as after calling the con << 306 * with any differences between a fres << 307 * object. << 308 * << 309 * %NULL means no constructor. << 310 */ << 311 void (*ctor)(void *); << 312 }; << 313 146 314 struct kmem_cache *__kmem_cache_create_args(co !! 147 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, 315 un !! 148 unsigned int align, slab_flags_t flags, 316 st !! 149 void (*ctor)(void *)); 317 sl !! 150 struct kmem_cache *kmem_cache_create_usercopy(const char *name, 318 static inline struct kmem_cache * !! 151 unsigned int size, unsigned int align, 319 __kmem_cache_create(const char *name, unsigned !! 152 slab_flags_t flags, 320 slab_flags_t flags, void ( !! 153 unsigned int useroffset, unsigned int usersize, 321 { !! 154 void (*ctor)(void *)); 322 struct kmem_cache_args kmem_args = { !! 155 void kmem_cache_destroy(struct kmem_cache *); 323 .align = align, !! 156 int kmem_cache_shrink(struct kmem_cache *); 324 .ctor = ctor, << 325 }; << 326 << 327 return __kmem_cache_create_args(name, << 328 } << 329 << 330 /** << 331 * kmem_cache_create_usercopy - Create a kmem << 332 * for copying to userspace. << 333 * @name: A string which is used in /proc/slab << 334 * @size: The size of objects to be created in << 335 * @align: The required alignment for the obje << 336 * @flags: SLAB flags << 337 * @useroffset: Usercopy region offset << 338 * @usersize: Usercopy region size << 339 * @ctor: A constructor for the objects, or %N << 340 * << 341 * This is a legacy wrapper, new code should u << 342 * if whitelisting a single field is sufficien << 343 * the necessary parameters passed via the arg << 344 * &struct kmem_cache_args) << 345 * << 346 * Return: a pointer to the cache on success, << 347 */ << 348 static inline struct kmem_cache * << 349 kmem_cache_create_usercopy(const char *name, u << 350 unsigned int align, << 351 unsigned int userof << 352 void (*ctor)(void * << 353 { << 354 struct kmem_cache_args kmem_args = { << 355 .align = align, << 356 .ctor = ctor, << 357 .useroffset = useroffset, << 358 .usersize = usersize, << 359 }; << 360 << 361 return __kmem_cache_create_args(name, << 362 } << 363 << 364 /* If NULL is passed for @args, use this varia << 365 static inline struct kmem_cache * << 366 __kmem_cache_default_args(const char *name, un << 367 struct kmem_cache_ar << 368 slab_flags_t flags) << 369 { << 370 struct kmem_cache_args kmem_default_ar << 371 << 372 /* Make sure we don't get passed garba << 373 if (WARN_ON_ONCE(args)) << 374 return ERR_PTR(-EINVAL); << 375 << 376 return __kmem_cache_create_args(name, << 377 } << 378 << 379 /** << 380 * kmem_cache_create - Create a kmem cache. << 381 * @__name: A string which is used in /proc/sl << 382 * @__object_size: The size of objects to be c << 383 * @__args: Optional arguments, see &struct km << 384 * means defaults will be used for al << 385 * << 386 * This is currently implemented as a macro us << 387 * either the new variant of the function, or << 388 * << 389 * The new variant has 4 parameters: << 390 * ``kmem_cache_create(name, object_size, args << 391 * << 392 * See __kmem_cache_create_args() which implem << 393 * << 394 * The legacy variant has 5 parameters: << 395 * ``kmem_cache_create(name, object_size, alig << 396 * << 397 * The align and ctor parameters map to the re << 398 * &struct kmem_cache_args << 399 * << 400 * Context: Cannot be called within a interrup << 401 * << 402 * Return: a pointer to the cache on success, << 403 */ << 404 #define kmem_cache_create(__name, __object_siz << 405 _Generic((__args), << 406 struct kmem_cache_args *: __km << 407 void *: __kmem_cache_default_a << 408 default: __kmem_cache_create)( << 409 << 410 void kmem_cache_destroy(struct kmem_cache *s); << 411 int kmem_cache_shrink(struct kmem_cache *s); << 412 157 413 /* 158 /* 414 * Please use this macro to create slab caches 159 * Please use this macro to create slab caches. Simply specify the 415 * name of the structure and maybe some flags 160 * name of the structure and maybe some flags that are listed above. 416 * 161 * 417 * The alignment of the struct determines obje 162 * The alignment of the struct determines object alignment. If you 418 * f.e. add ____cacheline_aligned_in_smp to th 163 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 419 * then the objects will be properly aligned i 164 * then the objects will be properly aligned in SMP configurations. 420 */ 165 */ 421 #define KMEM_CACHE(__struct, __flags) !! 166 #define KMEM_CACHE(__struct, __flags) \ 422 __kmem_cache_create_args(#__struct, si !! 167 kmem_cache_create(#__struct, sizeof(struct __struct), \ 423 &(struct kmem_cache_ar !! 168 __alignof__(struct __struct), (__flags), NULL) 424 .align = __al << 425 }, (__flags)) << 426 169 427 /* 170 /* 428 * To whitelist a single field for copying to/ 171 * To whitelist a single field for copying to/from usercopy, use this 429 * macro instead for KMEM_CACHE() above. 172 * macro instead for KMEM_CACHE() above. 430 */ 173 */ 431 #define KMEM_CACHE_USERCOPY(__struct, __flags, !! 174 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ 432 __kmem_cache_create_args(#__struct, si !! 175 kmem_cache_create_usercopy(#__struct, \ 433 &(struct kmem_cache_ar !! 176 sizeof(struct __struct), \ 434 .align !! 177 __alignof__(struct __struct), (__flags), \ 435 .useroffset !! 178 offsetof(struct __struct, __field), \ 436 .usersize !! 179 sizeof_field(struct __struct, __field), NULL) 437 }, (__flags)) << 438 180 439 /* 181 /* 440 * Common kmalloc functions provided by all al 182 * Common kmalloc functions provided by all allocators 441 */ 183 */ 442 void * __must_check krealloc_noprof(const void !! 184 void * __must_check krealloc(const void *, size_t, gfp_t); 443 gfp_t flag !! 185 void kfree(const void *); 444 #define krealloc(...) !! 186 void kfree_sensitive(const void *); 445 !! 187 size_t __ksize(const void *); 446 void kfree(const void *objp); !! 188 size_t ksize(const void *); 447 void kfree_sensitive(const void *objp); !! 189 448 size_t __ksize(const void *objp); !! 190 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 449 !! 191 void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 450 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL !! 192 bool to_user); 451 << 452 /** << 453 * ksize - Report actual allocation size of as << 454 * << 455 * @objp: Pointer returned from a prior kmallo << 456 * << 457 * This should not be used for writing beyond << 458 * allocation size. Either use krealloc() or r << 459 * with kmalloc_size_roundup() prior to alloca << 460 * access beyond the originally requested allo << 461 * and/or FORTIFY_SOURCE may trip, since they << 462 * originally allocated size via the __alloc_s << 463 */ << 464 size_t ksize(const void *objp); << 465 << 466 #ifdef CONFIG_PRINTK << 467 bool kmem_dump_obj(void *object); << 468 #else 193 #else 469 static inline bool kmem_dump_obj(void *object) !! 194 static inline void __check_heap_object(const void *ptr, unsigned long n, >> 195 struct page *page, bool to_user) { } 470 #endif 196 #endif 471 197 472 /* 198 /* 473 * Some archs want to perform DMA into kmalloc 199 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 474 * alignment larger than the alignment of a 64 200 * alignment larger than the alignment of a 64-bit integer. 475 * Setting ARCH_DMA_MINALIGN in arch headers a !! 201 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 476 */ 202 */ 477 #ifdef ARCH_HAS_DMA_MINALIGN !! 203 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 478 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMA << 479 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIG 204 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 480 #endif !! 205 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 481 #endif !! 206 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 482 !! 207 #else 483 #ifndef ARCH_KMALLOC_MINALIGN << 484 #define ARCH_KMALLOC_MINALIGN __alignof__(unsi 208 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 485 #elif ARCH_KMALLOC_MINALIGN > 8 << 486 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN << 487 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SI << 488 #endif 209 #endif 489 210 490 /* 211 /* 491 * Setting ARCH_SLAB_MINALIGN in arch headers 212 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 492 * Intended for arches that get misalignment f 213 * Intended for arches that get misalignment faults even for 64 bit integer 493 * aligned buffers. 214 * aligned buffers. 494 */ 215 */ 495 #ifndef ARCH_SLAB_MINALIGN 216 #ifndef ARCH_SLAB_MINALIGN 496 #define ARCH_SLAB_MINALIGN __alignof__(unsigne 217 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 497 #endif 218 #endif 498 219 499 /* 220 /* 500 * Arches can define this function if they wan !! 221 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned 501 * alignment at runtime. The value returned by !! 222 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN 502 * of two and >= ARCH_SLAB_MINALIGN. !! 223 * aligned pointers. 503 */ << 504 #ifndef arch_slab_minalign << 505 static inline unsigned int arch_slab_minalign( << 506 { << 507 return ARCH_SLAB_MINALIGN; << 508 } << 509 #endif << 510 << 511 /* << 512 * kmem_cache_alloc and friends return pointer << 513 * kmalloc and friends return pointers aligned << 514 * and ARCH_SLAB_MINALIGN, but here we only as << 515 */ 224 */ 516 #define __assume_kmalloc_alignment __assume_al 225 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) 517 #define __assume_slab_alignment __assume_align 226 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) 518 #define __assume_page_alignment __assume_align 227 #define __assume_page_alignment __assume_aligned(PAGE_SIZE) 519 228 520 /* 229 /* 521 * Kmalloc array related definitions 230 * Kmalloc array related definitions 522 */ 231 */ 523 232 >> 233 #ifdef CONFIG_SLAB >> 234 /* >> 235 * The largest kmalloc size supported by the SLAB allocators is >> 236 * 32 megabyte (2^25) or the maximum allocatable page order if that is >> 237 * less than 32 MB. >> 238 * >> 239 * WARNING: Its not easy to increase this value since the allocators have >> 240 * to do various tricks to work around compiler limitations in order to >> 241 * ensure proper constant folding. >> 242 */ >> 243 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ >> 244 (MAX_ORDER + PAGE_SHIFT - 1) : 25) >> 245 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH >> 246 #ifndef KMALLOC_SHIFT_LOW >> 247 #define KMALLOC_SHIFT_LOW 5 >> 248 #endif >> 249 #endif >> 250 >> 251 #ifdef CONFIG_SLUB 524 /* 252 /* 525 * SLUB directly allocates requests fitting in 253 * SLUB directly allocates requests fitting in to an order-1 page 526 * (PAGE_SIZE*2). Larger requests are passed 254 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 527 */ 255 */ 528 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 256 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 529 #define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDE !! 257 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) 530 #ifndef KMALLOC_SHIFT_LOW 258 #ifndef KMALLOC_SHIFT_LOW 531 #define KMALLOC_SHIFT_LOW 3 259 #define KMALLOC_SHIFT_LOW 3 532 #endif 260 #endif >> 261 #endif >> 262 >> 263 #ifdef CONFIG_SLOB >> 264 /* >> 265 * SLOB passes all requests larger than one page to the page allocator. >> 266 * No kmalloc array is necessary since objects of different sizes can >> 267 * be allocated from the same page. >> 268 */ >> 269 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT >> 270 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) >> 271 #ifndef KMALLOC_SHIFT_LOW >> 272 #define KMALLOC_SHIFT_LOW 3 >> 273 #endif >> 274 #endif 533 275 534 /* Maximum allocatable size */ 276 /* Maximum allocatable size */ 535 #define KMALLOC_MAX_SIZE (1UL << KMALLO 277 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 536 /* Maximum size for which we actually use a sl 278 /* Maximum size for which we actually use a slab cache */ 537 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLO 279 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 538 /* Maximum order allocatable via the slab allo 280 /* Maximum order allocatable via the slab allocator */ 539 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT 281 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 540 282 541 /* 283 /* 542 * Kmalloc subsystem. 284 * Kmalloc subsystem. 543 */ 285 */ 544 #ifndef KMALLOC_MIN_SIZE 286 #ifndef KMALLOC_MIN_SIZE 545 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_L 287 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 546 #endif 288 #endif 547 289 548 /* 290 /* 549 * This restriction comes from byte sized inde 291 * This restriction comes from byte sized index implementation. 550 * Page size is normally 2^12 bytes and, in th 292 * Page size is normally 2^12 bytes and, in this case, if we want to use 551 * byte sized index which can represent 2^8 en 293 * byte sized index which can represent 2^8 entries, the size of the object 552 * should be equal or greater to 2^12 / 2^8 = 294 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. 553 * If minimum size of kmalloc is less than 16, 295 * If minimum size of kmalloc is less than 16, we use it as minimum object 554 * size and give up to use byte sized index. 296 * size and give up to use byte sized index. 555 */ 297 */ 556 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SI 298 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ 557 (KMALLOC_MIN_SI 299 (KMALLOC_MIN_SIZE) : 16) 558 300 559 #ifdef CONFIG_RANDOM_KMALLOC_CACHES << 560 #define RANDOM_KMALLOC_CACHES_NR 15 // << 561 #else << 562 #define RANDOM_KMALLOC_CACHES_NR 0 << 563 #endif << 564 << 565 /* 301 /* 566 * Whenever changing this, take care of that k 302 * Whenever changing this, take care of that kmalloc_type() and 567 * create_kmalloc_caches() still work as inten 303 * create_kmalloc_caches() still work as intended. 568 * << 569 * KMALLOC_NORMAL can contain only unaccounted << 570 * is for accounted but unreclaimable and non- << 571 * kmem caches can have both accounted and una << 572 */ 304 */ 573 enum kmalloc_cache_type { 305 enum kmalloc_cache_type { 574 KMALLOC_NORMAL = 0, 306 KMALLOC_NORMAL = 0, 575 #ifndef CONFIG_ZONE_DMA << 576 KMALLOC_DMA = KMALLOC_NORMAL, << 577 #endif << 578 #ifndef CONFIG_MEMCG << 579 KMALLOC_CGROUP = KMALLOC_NORMAL, << 580 #endif << 581 KMALLOC_RANDOM_START = KMALLOC_NORMAL, << 582 KMALLOC_RANDOM_END = KMALLOC_RANDOM_ST << 583 #ifdef CONFIG_SLUB_TINY << 584 KMALLOC_RECLAIM = KMALLOC_NORMAL, << 585 #else << 586 KMALLOC_RECLAIM, 307 KMALLOC_RECLAIM, 587 #endif << 588 #ifdef CONFIG_ZONE_DMA 308 #ifdef CONFIG_ZONE_DMA 589 KMALLOC_DMA, 309 KMALLOC_DMA, 590 #endif 310 #endif 591 #ifdef CONFIG_MEMCG << 592 KMALLOC_CGROUP, << 593 #endif << 594 NR_KMALLOC_TYPES 311 NR_KMALLOC_TYPES 595 }; 312 }; 596 313 597 typedef struct kmem_cache * kmem_buckets[KMALL !! 314 #ifndef CONFIG_SLOB >> 315 extern struct kmem_cache * >> 316 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; 598 317 599 extern kmem_buckets kmalloc_caches[NR_KMALLOC_ !! 318 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) 600 << 601 /* << 602 * Define gfp bits that should not be set for << 603 */ << 604 #define KMALLOC_NOT_NORMAL_BITS << 605 (__GFP_RECLAIMABLE | << 606 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP << 607 (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCO << 608 << 609 extern unsigned long random_kmalloc_seed; << 610 << 611 static __always_inline enum kmalloc_cache_type << 612 { 319 { >> 320 #ifdef CONFIG_ZONE_DMA 613 /* 321 /* 614 * The most common case is KMALLOC_NOR 322 * The most common case is KMALLOC_NORMAL, so test for it 615 * with a single branch for all the re !! 323 * with a single branch for both flags. 616 */ 324 */ 617 if (likely((flags & KMALLOC_NOT_NORMAL !! 325 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) 618 #ifdef CONFIG_RANDOM_KMALLOC_CACHES << 619 /* RANDOM_KMALLOC_CACHES_NR (= << 620 return KMALLOC_RANDOM_START + << 621 << 622 #else << 623 return KMALLOC_NORMAL; 326 return KMALLOC_NORMAL; 624 #endif << 625 327 626 /* 328 /* 627 * At least one of the flags has to be !! 329 * At least one of the flags has to be set. If both are, __GFP_DMA 628 * decreasing order are: !! 330 * is more important. 629 * 1) __GFP_DMA << 630 * 2) __GFP_RECLAIMABLE << 631 * 3) __GFP_ACCOUNT << 632 */ 331 */ 633 if (IS_ENABLED(CONFIG_ZONE_DMA) && (fl !! 332 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; 634 return KMALLOC_DMA; !! 333 #else 635 if (!IS_ENABLED(CONFIG_MEMCG) || (flag !! 334 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL; 636 return KMALLOC_RECLAIM; !! 335 #endif 637 else << 638 return KMALLOC_CGROUP; << 639 } 336 } 640 337 641 /* 338 /* 642 * Figure out which kmalloc slab an allocation 339 * Figure out which kmalloc slab an allocation of a certain size 643 * belongs to. 340 * belongs to. 644 * 0 = zero alloc 341 * 0 = zero alloc 645 * 1 = 65 .. 96 bytes 342 * 1 = 65 .. 96 bytes 646 * 2 = 129 .. 192 bytes 343 * 2 = 129 .. 192 bytes 647 * n = 2^(n-1)+1 .. 2^n 344 * n = 2^(n-1)+1 .. 2^n 648 * << 649 * Note: __kmalloc_index() is compile-time opt << 650 * typical usage is via kmalloc_index() and th << 651 * Callers where !size_is_constant should only << 652 * overheads of __kmalloc_index() can be toler << 653 */ 345 */ 654 static __always_inline unsigned int __kmalloc_ !! 346 static __always_inline unsigned int kmalloc_index(size_t size) 655 << 656 { 347 { 657 if (!size) 348 if (!size) 658 return 0; 349 return 0; 659 350 660 if (size <= KMALLOC_MIN_SIZE) 351 if (size <= KMALLOC_MIN_SIZE) 661 return KMALLOC_SHIFT_LOW; 352 return KMALLOC_SHIFT_LOW; 662 353 663 if (KMALLOC_MIN_SIZE <= 32 && size > 6 354 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 664 return 1; 355 return 1; 665 if (KMALLOC_MIN_SIZE <= 64 && size > 1 356 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 666 return 2; 357 return 2; 667 if (size <= 8) return 3; 358 if (size <= 8) return 3; 668 if (size <= 16) return 4; 359 if (size <= 16) return 4; 669 if (size <= 32) return 5; 360 if (size <= 32) return 5; 670 if (size <= 64) return 6; 361 if (size <= 64) return 6; 671 if (size <= 128) return 7; 362 if (size <= 128) return 7; 672 if (size <= 256) return 8; 363 if (size <= 256) return 8; 673 if (size <= 512) return 9; 364 if (size <= 512) return 9; 674 if (size <= 1024) return 10; 365 if (size <= 1024) return 10; 675 if (size <= 2 * 1024) return 11; 366 if (size <= 2 * 1024) return 11; 676 if (size <= 4 * 1024) return 12; 367 if (size <= 4 * 1024) return 12; 677 if (size <= 8 * 1024) return 13; 368 if (size <= 8 * 1024) return 13; 678 if (size <= 16 * 1024) return 14; 369 if (size <= 16 * 1024) return 14; 679 if (size <= 32 * 1024) return 15; 370 if (size <= 32 * 1024) return 15; 680 if (size <= 64 * 1024) return 16; 371 if (size <= 64 * 1024) return 16; 681 if (size <= 128 * 1024) return 17; 372 if (size <= 128 * 1024) return 17; 682 if (size <= 256 * 1024) return 18; 373 if (size <= 256 * 1024) return 18; 683 if (size <= 512 * 1024) return 19; 374 if (size <= 512 * 1024) return 19; 684 if (size <= 1024 * 1024) return 20; 375 if (size <= 1024 * 1024) return 20; 685 if (size <= 2 * 1024 * 1024) return 2 376 if (size <= 2 * 1024 * 1024) return 21; 686 !! 377 if (size <= 4 * 1024 * 1024) return 22; 687 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRA !! 378 if (size <= 8 * 1024 * 1024) return 23; 688 BUILD_BUG_ON_MSG(1, "unexpecte !! 379 if (size <= 16 * 1024 * 1024) return 24; 689 else !! 380 if (size <= 32 * 1024 * 1024) return 25; 690 BUG(); !! 381 if (size <= 64 * 1024 * 1024) return 26; >> 382 BUG(); 691 383 692 /* Will never be reached. Needed becau 384 /* Will never be reached. Needed because the compiler may complain */ 693 return -1; 385 return -1; 694 } 386 } 695 static_assert(PAGE_SHIFT <= 20); !! 387 #endif /* !CONFIG_SLOB */ 696 #define kmalloc_index(s) __kmalloc_index(s, tr << 697 << 698 #include <linux/alloc_tag.h> << 699 388 700 /** !! 389 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; 701 * kmem_cache_alloc - Allocate an object !! 390 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; 702 * @cachep: The cache to allocate from. !! 391 void kmem_cache_free(struct kmem_cache *, void *); 703 * @flags: See kmalloc(). << 704 * << 705 * Allocate an object from this cache. << 706 * See kmem_cache_zalloc() for a shortcut of a << 707 * << 708 * Return: pointer to the new object or %NULL << 709 */ << 710 void *kmem_cache_alloc_noprof(struct kmem_cach << 711 gfp_t flags) __a << 712 #define kmem_cache_alloc(...) << 713 << 714 void *kmem_cache_alloc_lru_noprof(struct kmem_ << 715 gfp_t gfpflags) __ << 716 #define kmem_cache_alloc_lru(...) alloc_ << 717 << 718 /** << 719 * kmem_cache_charge - memcg charge an already << 720 * @objp: address of the slab object to memcg << 721 * @gfpflags: describe the allocation context << 722 * << 723 * kmem_cache_charge allows charging a slab ob << 724 * primarily in cases where charging at alloca << 725 * because the target memcg is not known (i.e. << 726 * << 727 * The objp should be pointer returned by the << 728 * kmalloc (with __GFP_ACCOUNT in flags) or km << 729 * behavior can be controlled through gfpflags << 730 * necessary internal metadata can be allocate << 731 * that overcharging is requested instead of f << 732 * internal metadata allocation. << 733 * << 734 * There are several cases where it will retur << 735 * not done: << 736 * More specifically: << 737 * << 738 * 1. For !CONFIG_MEMCG or cgroup_disable=memo << 739 * 2. Already charged slab objects. << 740 * 3. For slab objects from KMALLOC_NORMAL cac << 741 * without __GFP_ACCOUNT << 742 * 4. Allocating internal metadata has failed << 743 * << 744 * Return: true if charge was successful other << 745 */ << 746 bool kmem_cache_charge(void *objp, gfp_t gfpfl << 747 void kmem_cache_free(struct kmem_cache *s, voi << 748 << 749 kmem_buckets *kmem_buckets_create(const char * << 750 unsigned int << 751 void (*ctor) << 752 392 753 /* 393 /* 754 * Bulk allocation and freeing operations. The 394 * Bulk allocation and freeing operations. These are accelerated in an 755 * allocator specific way to avoid taking lock 395 * allocator specific way to avoid taking locks repeatedly or building 756 * metadata structures unnecessarily. 396 * metadata structures unnecessarily. 757 * 397 * 758 * Note that interrupts must be enabled when c 398 * Note that interrupts must be enabled when calling these functions. 759 */ 399 */ 760 void kmem_cache_free_bulk(struct kmem_cache *s !! 400 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 761 !! 401 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 762 int kmem_cache_alloc_bulk_noprof(struct kmem_c << 763 #define kmem_cache_alloc_bulk(...) alloc_ << 764 402 >> 403 /* >> 404 * Caller must not use kfree_bulk() on memory not originally allocated >> 405 * by kmalloc(), because the SLOB allocator cannot handle this. >> 406 */ 765 static __always_inline void kfree_bulk(size_t 407 static __always_inline void kfree_bulk(size_t size, void **p) 766 { 408 { 767 kmem_cache_free_bulk(NULL, size, p); 409 kmem_cache_free_bulk(NULL, size, p); 768 } 410 } 769 411 770 void *kmem_cache_alloc_node_noprof(struct kmem !! 412 #ifdef CONFIG_NUMA 771 int node) _ !! 413 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; 772 #define kmem_cache_alloc_node(...) alloc_ !! 414 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; 773 << 774 /* << 775 * These macros allow declaring a kmem_buckets << 776 * can be compiled out with CONFIG_SLAB_BUCKET << 777 * sites don't have to pass NULL. << 778 */ << 779 #ifdef CONFIG_SLAB_BUCKETS << 780 #define DECL_BUCKET_PARAMS(_size, _b) size_t << 781 #define PASS_BUCKET_PARAMS(_size, _b) (_size << 782 #define PASS_BUCKET_PARAM(_b) (_b) << 783 #else 415 #else 784 #define DECL_BUCKET_PARAMS(_size, _b) size_t !! 416 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 785 #define PASS_BUCKET_PARAMS(_size, _b) (_size !! 417 { 786 #define PASS_BUCKET_PARAM(_b) NULL !! 418 return __kmalloc(size, flags); >> 419 } >> 420 >> 421 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) >> 422 { >> 423 return kmem_cache_alloc(s, flags); >> 424 } 787 #endif 425 #endif 788 426 789 /* !! 427 #ifdef CONFIG_TRACING 790 * The following functions are not to be used !! 428 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; 791 * for internal use from kmalloc() and kmalloc << 792 * with the exception of kunit tests << 793 */ << 794 429 795 void *__kmalloc_noprof(size_t size, gfp_t flag !! 430 #ifdef CONFIG_NUMA 796 __assume_kmall !! 431 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, >> 432 gfp_t gfpflags, >> 433 int node, size_t size) __assume_slab_alignment __malloc; >> 434 #else >> 435 static __always_inline void * >> 436 kmem_cache_alloc_node_trace(struct kmem_cache *s, >> 437 gfp_t gfpflags, >> 438 int node, size_t size) >> 439 { >> 440 return kmem_cache_alloc_trace(s, gfpflags, size); >> 441 } >> 442 #endif /* CONFIG_NUMA */ 797 443 798 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS !! 444 #else /* CONFIG_TRACING */ 799 __assume_kmall !! 445 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, >> 446 gfp_t flags, size_t size) >> 447 { >> 448 void *ret = kmem_cache_alloc(s, flags); 800 449 801 void *__kmalloc_cache_noprof(struct kmem_cache !! 450 ret = kasan_kmalloc(s, ret, size, flags); 802 __assume_kmall !! 451 return ret; >> 452 } 803 453 804 void *__kmalloc_cache_node_noprof(struct kmem_ !! 454 static __always_inline void * 805 int node, si !! 455 kmem_cache_alloc_node_trace(struct kmem_cache *s, 806 __assume_kmall !! 456 gfp_t gfpflags, >> 457 int node, size_t size) >> 458 { >> 459 void *ret = kmem_cache_alloc_node(s, gfpflags, node); 807 460 808 void *__kmalloc_large_noprof(size_t size, gfp_ !! 461 ret = kasan_kmalloc(s, ret, size, gfpflags); 809 __assume_page_ !! 462 return ret; >> 463 } >> 464 #endif /* CONFIG_TRACING */ 810 465 811 void *__kmalloc_large_node_noprof(size_t size, !! 466 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; 812 __assume_page_ !! 467 >> 468 #ifdef CONFIG_TRACING >> 469 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; >> 470 #else >> 471 static __always_inline void * >> 472 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) >> 473 { >> 474 return kmalloc_order(size, flags, order); >> 475 } >> 476 #endif >> 477 >> 478 static __always_inline void *kmalloc_large(size_t size, gfp_t flags) >> 479 { >> 480 unsigned int order = get_order(size); >> 481 return kmalloc_order_trace(size, flags, order); >> 482 } 813 483 814 /** 484 /** 815 * kmalloc - allocate kernel memory !! 485 * kmalloc - allocate memory 816 * @size: how many bytes of memory are require 486 * @size: how many bytes of memory are required. 817 * @flags: describe the allocation context !! 487 * @flags: the type of memory to allocate. 818 * 488 * 819 * kmalloc is the normal method of allocating 489 * kmalloc is the normal method of allocating memory 820 * for objects smaller than page size in the k 490 * for objects smaller than page size in the kernel. 821 * 491 * 822 * The allocated object address is aligned to 492 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN 823 * bytes. For @size of power of two bytes, the 493 * bytes. For @size of power of two bytes, the alignment is also guaranteed 824 * to be at least to the size. For other sizes !! 494 * to be at least to the size. 825 * be at least the largest power-of-two diviso << 826 * 495 * 827 * The @flags argument may be one of the GFP f 496 * The @flags argument may be one of the GFP flags defined at 828 * include/linux/gfp_types.h and described at !! 497 * include/linux/gfp.h and described at 829 * :ref:`Documentation/core-api/mm-api.rst <mm 498 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` 830 * 499 * 831 * The recommended usage of the @flags is desc 500 * The recommended usage of the @flags is described at 832 * :ref:`Documentation/core-api/memory-allocat 501 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>` 833 * 502 * 834 * Below is a brief outline of the most useful 503 * Below is a brief outline of the most useful GFP flags 835 * 504 * 836 * %GFP_KERNEL 505 * %GFP_KERNEL 837 * Allocate normal kernel ram. May sleep. 506 * Allocate normal kernel ram. May sleep. 838 * 507 * 839 * %GFP_NOWAIT 508 * %GFP_NOWAIT 840 * Allocation will not sleep. 509 * Allocation will not sleep. 841 * 510 * 842 * %GFP_ATOMIC 511 * %GFP_ATOMIC 843 * Allocation will not sleep. May use em 512 * Allocation will not sleep. May use emergency pools. 844 * 513 * >> 514 * %GFP_HIGHUSER >> 515 * Allocate memory from high memory on behalf of user. >> 516 * 845 * Also it is possible to set different flags 517 * Also it is possible to set different flags by OR'ing 846 * in one or more of the following additional 518 * in one or more of the following additional @flags: 847 * 519 * 848 * %__GFP_ZERO << 849 * Zero the allocated memory before retur << 850 * << 851 * %__GFP_HIGH 520 * %__GFP_HIGH 852 * This allocation has high priority and 521 * This allocation has high priority and may use emergency pools. 853 * 522 * 854 * %__GFP_NOFAIL 523 * %__GFP_NOFAIL 855 * Indicate that this allocation is in no 524 * Indicate that this allocation is in no way allowed to fail 856 * (think twice before using). 525 * (think twice before using). 857 * 526 * 858 * %__GFP_NORETRY 527 * %__GFP_NORETRY 859 * If memory is not immediately available 528 * If memory is not immediately available, 860 * then give up at once. 529 * then give up at once. 861 * 530 * 862 * %__GFP_NOWARN 531 * %__GFP_NOWARN 863 * If allocation fails, don't issue any w 532 * If allocation fails, don't issue any warnings. 864 * 533 * 865 * %__GFP_RETRY_MAYFAIL 534 * %__GFP_RETRY_MAYFAIL 866 * Try really hard to succeed the allocat 535 * Try really hard to succeed the allocation but fail 867 * eventually. 536 * eventually. 868 */ 537 */ 869 static __always_inline __alloc_size(1) void *k !! 538 static __always_inline void *kmalloc(size_t size, gfp_t flags) 870 { 539 { 871 if (__builtin_constant_p(size) && size !! 540 if (__builtin_constant_p(size)) { >> 541 #ifndef CONFIG_SLOB 872 unsigned int index; 542 unsigned int index; 873 !! 543 #endif 874 if (size > KMALLOC_MAX_CACHE_S 544 if (size > KMALLOC_MAX_CACHE_SIZE) 875 return __kmalloc_large !! 545 return kmalloc_large(size, flags); 876 !! 546 #ifndef CONFIG_SLOB 877 index = kmalloc_index(size); 547 index = kmalloc_index(size); 878 return __kmalloc_cache_noprof( !! 548 879 kmalloc_caches !! 549 if (!index) >> 550 return ZERO_SIZE_PTR; >> 551 >> 552 return kmem_cache_alloc_trace( >> 553 kmalloc_caches[kmalloc_type(flags)][index], 880 flags, size); 554 flags, size); >> 555 #endif 881 } 556 } 882 return __kmalloc_noprof(size, flags); !! 557 return __kmalloc(size, flags); 883 } 558 } 884 #define kmalloc(...) << 885 559 886 #define kmem_buckets_alloc(_b, _size, _flags) !! 560 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 887 alloc_hooks(__kmalloc_node_noprof(PASS << 888 << 889 #define kmem_buckets_alloc_track_caller(_b, _s << 890 alloc_hooks(__kmalloc_node_track_calle << 891 << 892 static __always_inline __alloc_size(1) void *k << 893 { 561 { 894 if (__builtin_constant_p(size) && size !! 562 #ifndef CONFIG_SLOB 895 unsigned int index; !! 563 if (__builtin_constant_p(size) && >> 564 size <= KMALLOC_MAX_CACHE_SIZE) { >> 565 unsigned int i = kmalloc_index(size); 896 566 897 if (size > KMALLOC_MAX_CACHE_S !! 567 if (!i) 898 return __kmalloc_large !! 568 return ZERO_SIZE_PTR; 899 569 900 index = kmalloc_index(size); !! 570 return kmem_cache_alloc_node_trace( 901 return __kmalloc_cache_node_no !! 571 kmalloc_caches[kmalloc_type(flags)][i], 902 kmalloc_caches !! 572 flags, node, size); 903 flags, node, s << 904 } 573 } 905 return __kmalloc_node_noprof(PASS_BUCK !! 574 #endif >> 575 return __kmalloc_node(size, flags, node); 906 } 576 } 907 #define kmalloc_node(...) << 908 577 909 /** 578 /** 910 * kmalloc_array - allocate memory for an arra 579 * kmalloc_array - allocate memory for an array. 911 * @n: number of elements. 580 * @n: number of elements. 912 * @size: element size. 581 * @size: element size. 913 * @flags: the type of memory to allocate (see 582 * @flags: the type of memory to allocate (see kmalloc). 914 */ 583 */ 915 static inline __alloc_size(1, 2) void *kmalloc !! 584 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 916 { 585 { 917 size_t bytes; 586 size_t bytes; 918 587 919 if (unlikely(check_mul_overflow(n, siz 588 if (unlikely(check_mul_overflow(n, size, &bytes))) 920 return NULL; 589 return NULL; 921 if (__builtin_constant_p(n) && __built 590 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 922 return kmalloc_noprof(bytes, f !! 591 return kmalloc(bytes, flags); 923 return kmalloc_noprof(bytes, flags); !! 592 return __kmalloc(bytes, flags); 924 } 593 } 925 #define kmalloc_array(...) << 926 << 927 /** << 928 * krealloc_array - reallocate memory for an a << 929 * @p: pointer to the memory chunk to realloca << 930 * @new_n: new number of elements to alloc << 931 * @new_size: new size of a single member of t << 932 * @flags: the type of memory to allocate (see << 933 * << 934 * If __GFP_ZERO logic is requested, callers m << 935 * initial memory allocation, every subsequent << 936 * memory allocation is flagged with __GFP_ZER << 937 * __GFP_ZERO is not fully honored by this API << 938 * << 939 * See krealloc_noprof() for further details. << 940 * << 941 * In any case, the contents of the object poi << 942 * lesser of the new and old sizes. << 943 */ << 944 static inline __realloc_size(2, 3) void * __mu << 945 << 946 << 947 << 948 { << 949 size_t bytes; << 950 << 951 if (unlikely(check_mul_overflow(new_n, << 952 return NULL; << 953 << 954 return krealloc_noprof(p, bytes, flags << 955 } << 956 #define krealloc_array(...) << 957 594 958 /** 595 /** 959 * kcalloc - allocate memory for an array. The 596 * kcalloc - allocate memory for an array. The memory is set to zero. 960 * @n: number of elements. 597 * @n: number of elements. 961 * @size: element size. 598 * @size: element size. 962 * @flags: the type of memory to allocate (see 599 * @flags: the type of memory to allocate (see kmalloc). 963 */ 600 */ 964 #define kcalloc(n, size, flags) kmallo !! 601 static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 965 !! 602 { 966 void *__kmalloc_node_track_caller_noprof(DECL_ !! 603 return kmalloc_array(n, size, flags | __GFP_ZERO); 967 unsig !! 604 } 968 #define kmalloc_node_track_caller_noprof(size, << 969 __kmalloc_node_track_caller_noprof(PAS << 970 #define kmalloc_node_track_caller(...) << 971 alloc_hooks(kmalloc_node_track_caller_ << 972 605 973 /* 606 /* 974 * kmalloc_track_caller is a special version o 607 * kmalloc_track_caller is a special version of kmalloc that records the 975 * calling function of the routine calling it 608 * calling function of the routine calling it for slab leak tracking instead 976 * of just the calling function (confusing, eh 609 * of just the calling function (confusing, eh?). 977 * It's useful when the call to kmalloc comes 610 * It's useful when the call to kmalloc comes from a widely-used standard 978 * allocator where we care about the real plac 611 * allocator where we care about the real place the memory allocation 979 * request comes from. 612 * request comes from. 980 */ 613 */ 981 #define kmalloc_track_caller(...) !! 614 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 982 !! 615 #define kmalloc_track_caller(size, flags) \ 983 #define kmalloc_track_caller_noprof(...) !! 616 __kmalloc_track_caller(size, flags, _RET_IP_) 984 kmalloc_node_track_caller_nopr << 985 617 986 static inline __alloc_size(1, 2) void *kmalloc !! 618 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, 987 !! 619 int node) 988 { 620 { 989 size_t bytes; 621 size_t bytes; 990 622 991 if (unlikely(check_mul_overflow(n, siz 623 if (unlikely(check_mul_overflow(n, size, &bytes))) 992 return NULL; 624 return NULL; 993 if (__builtin_constant_p(n) && __built 625 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 994 return kmalloc_node_noprof(byt !! 626 return kmalloc_node(bytes, flags, node); 995 return __kmalloc_node_noprof(PASS_BUCK !! 627 return __kmalloc_node(bytes, flags, node); >> 628 } >> 629 >> 630 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) >> 631 { >> 632 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); 996 } 633 } 997 #define kmalloc_array_node(...) << 998 634 999 #define kcalloc_node(_n, _size, _flags, _node) !! 635 1000 kmalloc_array_node(_n, _size, (_flags !! 636 #ifdef CONFIG_NUMA >> 637 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); >> 638 #define kmalloc_node_track_caller(size, flags, node) \ >> 639 __kmalloc_node_track_caller(size, flags, node, \ >> 640 _RET_IP_) >> 641 >> 642 #else /* CONFIG_NUMA */ >> 643 >> 644 #define kmalloc_node_track_caller(size, flags, node) \ >> 645 kmalloc_track_caller(size, flags) >> 646 >> 647 #endif /* CONFIG_NUMA */ 1001 648 1002 /* 649 /* 1003 * Shortcuts 650 * Shortcuts 1004 */ 651 */ 1005 #define kmem_cache_zalloc(_k, _flags) !! 652 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) >> 653 { >> 654 return kmem_cache_alloc(k, flags | __GFP_ZERO); >> 655 } 1006 656 1007 /** 657 /** 1008 * kzalloc - allocate memory. The memory is s 658 * kzalloc - allocate memory. The memory is set to zero. 1009 * @size: how many bytes of memory are requir 659 * @size: how many bytes of memory are required. 1010 * @flags: the type of memory to allocate (se 660 * @flags: the type of memory to allocate (see kmalloc). 1011 */ 661 */ 1012 static inline __alloc_size(1) void *kzalloc_n !! 662 static inline void *kzalloc(size_t size, gfp_t flags) 1013 { 663 { 1014 return kmalloc_noprof(size, flags | _ !! 664 return kmalloc(size, flags | __GFP_ZERO); 1015 } 665 } 1016 #define kzalloc(...) << 1017 #define kzalloc_node(_size, _flags, _node) << 1018 666 1019 void *__kvmalloc_node_noprof(DECL_BUCKET_PARA !! 667 /** 1020 #define kvmalloc_node_noprof(size, flags, nod !! 668 * kzalloc_node - allocate zeroed memory from a particular memory node. 1021 __kvmalloc_node_noprof(PASS_BUCKET_PA !! 669 * @size: how many bytes of memory are required. 1022 #define kvmalloc_node(...) !! 670 * @flags: the type of memory to allocate (see kmalloc). 1023 !! 671 * @node: memory node from which to allocate 1024 #define kvmalloc(_size, _flags) !! 672 */ 1025 #define kvmalloc_noprof(_size, _flags) !! 673 static inline void *kzalloc_node(size_t size, gfp_t flags, int node) 1026 #define kvzalloc(_size, _flags) << 1027 << 1028 #define kvzalloc_node(_size, _flags, _node) << 1029 #define kmem_buckets_valloc(_b, _size, _flags << 1030 alloc_hooks(__kvmalloc_node_noprof(PA << 1031 << 1032 static inline __alloc_size(1, 2) void * << 1033 kvmalloc_array_node_noprof(size_t n, size_t s << 1034 { 674 { 1035 size_t bytes; !! 675 return kmalloc_node(size, flags | __GFP_ZERO, node); 1036 << 1037 if (unlikely(check_mul_overflow(n, si << 1038 return NULL; << 1039 << 1040 return kvmalloc_node_noprof(bytes, fl << 1041 } 676 } 1042 677 1043 #define kvmalloc_array_noprof(...) << 1044 #define kvcalloc_node_noprof(_n,_s,_f,_node) << 1045 #define kvcalloc_noprof(...) << 1046 << 1047 #define kvmalloc_array(...) << 1048 #define kvcalloc_node(...) << 1049 #define kvcalloc(...) << 1050 << 1051 void *kvrealloc_noprof(const void *p, size_t << 1052 __realloc_size(2); << 1053 #define kvrealloc(...) << 1054 << 1055 extern void kvfree(const void *addr); << 1056 DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NU << 1057 << 1058 extern void kvfree_sensitive(const void *addr << 1059 << 1060 unsigned int kmem_cache_size(struct kmem_cach 678 unsigned int kmem_cache_size(struct kmem_cache *s); 1061 << 1062 /** << 1063 * kmalloc_size_roundup - Report allocation b << 1064 * << 1065 * @size: Number of bytes to round up from. << 1066 * << 1067 * This returns the number of bytes that woul << 1068 * allocation of @size bytes. For example, a << 1069 * rounded up to the next sized kmalloc bucke << 1070 * for the general-purpose kmalloc()-based al << 1071 * pre-sized kmem_cache_alloc()-based allocat << 1072 * << 1073 * Use this to kmalloc() the full bucket size << 1074 * ksize() to query the size after an allocat << 1075 */ << 1076 size_t kmalloc_size_roundup(size_t size); << 1077 << 1078 void __init kmem_cache_init_late(void); 679 void __init kmem_cache_init_late(void); >> 680 >> 681 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB) >> 682 int slab_prepare_cpu(unsigned int cpu); >> 683 int slab_dead_cpu(unsigned int cpu); >> 684 #else >> 685 #define slab_prepare_cpu NULL >> 686 #define slab_dead_cpu NULL >> 687 #endif 1079 688 1080 #endif /* _LINUX_SLAB_H */ 689 #endif /* _LINUX_SLAB_H */ 1081 690
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.