~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/slab.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef MM_SLAB_H
  3 #define MM_SLAB_H
  4 
  5 #include <linux/reciprocal_div.h>
  6 #include <linux/list_lru.h>
  7 #include <linux/local_lock.h>
  8 #include <linux/random.h>
  9 #include <linux/kobject.h>
 10 #include <linux/sched/mm.h>
 11 #include <linux/memcontrol.h>
 12 #include <linux/kfence.h>
 13 #include <linux/kasan.h>
 14 
 15 /*
 16  * Internal slab definitions
 17  */
 18 
 19 #ifdef CONFIG_64BIT
 20 # ifdef system_has_cmpxchg128
 21 # define system_has_freelist_aba()      system_has_cmpxchg128()
 22 # define try_cmpxchg_freelist           try_cmpxchg128
 23 # endif
 24 #define this_cpu_try_cmpxchg_freelist   this_cpu_try_cmpxchg128
 25 typedef u128 freelist_full_t;
 26 #else /* CONFIG_64BIT */
 27 # ifdef system_has_cmpxchg64
 28 # define system_has_freelist_aba()      system_has_cmpxchg64()
 29 # define try_cmpxchg_freelist           try_cmpxchg64
 30 # endif
 31 #define this_cpu_try_cmpxchg_freelist   this_cpu_try_cmpxchg64
 32 typedef u64 freelist_full_t;
 33 #endif /* CONFIG_64BIT */
 34 
 35 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
 36 #undef system_has_freelist_aba
 37 #endif
 38 
 39 /*
 40  * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
 41  * problems with cmpxchg of just a pointer.
 42  */
 43 typedef union {
 44         struct {
 45                 void *freelist;
 46                 unsigned long counter;
 47         };
 48         freelist_full_t full;
 49 } freelist_aba_t;
 50 
 51 /* Reuses the bits in struct page */
 52 struct slab {
 53         unsigned long __page_flags;
 54 
 55         struct kmem_cache *slab_cache;
 56         union {
 57                 struct {
 58                         union {
 59                                 struct list_head slab_list;
 60 #ifdef CONFIG_SLUB_CPU_PARTIAL
 61                                 struct {
 62                                         struct slab *next;
 63                                         int slabs;      /* Nr of slabs left */
 64                                 };
 65 #endif
 66                         };
 67                         /* Double-word boundary */
 68                         union {
 69                                 struct {
 70                                         void *freelist;         /* first free object */
 71                                         union {
 72                                                 unsigned long counters;
 73                                                 struct {
 74                                                         unsigned inuse:16;
 75                                                         unsigned objects:15;
 76                                                         unsigned frozen:1;
 77                                                 };
 78                                         };
 79                                 };
 80 #ifdef system_has_freelist_aba
 81                                 freelist_aba_t freelist_counter;
 82 #endif
 83                         };
 84                 };
 85                 struct rcu_head rcu_head;
 86         };
 87 
 88         unsigned int __page_type;
 89         atomic_t __page_refcount;
 90 #ifdef CONFIG_SLAB_OBJ_EXT
 91         unsigned long obj_exts;
 92 #endif
 93 };
 94 
 95 #define SLAB_MATCH(pg, sl)                                              \
 96         static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
 97 SLAB_MATCH(flags, __page_flags);
 98 SLAB_MATCH(compound_head, slab_cache);  /* Ensure bit 0 is clear */
 99 SLAB_MATCH(_refcount, __page_refcount);
100 #ifdef CONFIG_MEMCG
101 SLAB_MATCH(memcg_data, obj_exts);
102 #elif defined(CONFIG_SLAB_OBJ_EXT)
103 SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
104 #endif
105 #undef SLAB_MATCH
106 static_assert(sizeof(struct slab) <= sizeof(struct page));
107 #if defined(system_has_freelist_aba)
108 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
109 #endif
110 
111 /**
112  * folio_slab - Converts from folio to slab.
113  * @folio: The folio.
114  *
115  * Currently struct slab is a different representation of a folio where
116  * folio_test_slab() is true.
117  *
118  * Return: The slab which contains this folio.
119  */
120 #define folio_slab(folio)       (_Generic((folio),                      \
121         const struct folio *:   (const struct slab *)(folio),           \
122         struct folio *:         (struct slab *)(folio)))
123 
124 /**
125  * slab_folio - The folio allocated for a slab
126  * @slab: The slab.
127  *
128  * Slabs are allocated as folios that contain the individual objects and are
129  * using some fields in the first struct page of the folio - those fields are
130  * now accessed by struct slab. It is occasionally necessary to convert back to
131  * a folio in order to communicate with the rest of the mm.  Please use this
132  * helper function instead of casting yourself, as the implementation may change
133  * in the future.
134  */
135 #define slab_folio(s)           (_Generic((s),                          \
136         const struct slab *:    (const struct folio *)s,                \
137         struct slab *:          (struct folio *)s))
138 
139 /**
140  * page_slab - Converts from first struct page to slab.
141  * @p: The first (either head of compound or single) page of slab.
142  *
143  * A temporary wrapper to convert struct page to struct slab in situations where
144  * we know the page is the compound head, or single order-0 page.
145  *
146  * Long-term ideally everything would work with struct slab directly or go
147  * through folio to struct slab.
148  *
149  * Return: The slab which contains this page
150  */
151 #define page_slab(p)            (_Generic((p),                          \
152         const struct page *:    (const struct slab *)(p),               \
153         struct page *:          (struct slab *)(p)))
154 
155 /**
156  * slab_page - The first struct page allocated for a slab
157  * @slab: The slab.
158  *
159  * A convenience wrapper for converting slab to the first struct page of the
160  * underlying folio, to communicate with code not yet converted to folio or
161  * struct slab.
162  */
163 #define slab_page(s) folio_page(slab_folio(s), 0)
164 
165 /*
166  * If network-based swap is enabled, sl*b must keep track of whether pages
167  * were allocated from pfmemalloc reserves.
168  */
169 static inline bool slab_test_pfmemalloc(const struct slab *slab)
170 {
171         return folio_test_active(slab_folio(slab));
172 }
173 
174 static inline void slab_set_pfmemalloc(struct slab *slab)
175 {
176         folio_set_active(slab_folio(slab));
177 }
178 
179 static inline void slab_clear_pfmemalloc(struct slab *slab)
180 {
181         folio_clear_active(slab_folio(slab));
182 }
183 
184 static inline void __slab_clear_pfmemalloc(struct slab *slab)
185 {
186         __folio_clear_active(slab_folio(slab));
187 }
188 
189 static inline void *slab_address(const struct slab *slab)
190 {
191         return folio_address(slab_folio(slab));
192 }
193 
194 static inline int slab_nid(const struct slab *slab)
195 {
196         return folio_nid(slab_folio(slab));
197 }
198 
199 static inline pg_data_t *slab_pgdat(const struct slab *slab)
200 {
201         return folio_pgdat(slab_folio(slab));
202 }
203 
204 static inline struct slab *virt_to_slab(const void *addr)
205 {
206         struct folio *folio = virt_to_folio(addr);
207 
208         if (!folio_test_slab(folio))
209                 return NULL;
210 
211         return folio_slab(folio);
212 }
213 
214 static inline int slab_order(const struct slab *slab)
215 {
216         return folio_order(slab_folio(slab));
217 }
218 
219 static inline size_t slab_size(const struct slab *slab)
220 {
221         return PAGE_SIZE << slab_order(slab);
222 }
223 
224 #ifdef CONFIG_SLUB_CPU_PARTIAL
225 #define slub_percpu_partial(c)                  ((c)->partial)
226 
227 #define slub_set_percpu_partial(c, p)           \
228 ({                                              \
229         slub_percpu_partial(c) = (p)->next;     \
230 })
231 
232 #define slub_percpu_partial_read_once(c)        READ_ONCE(slub_percpu_partial(c))
233 #else
234 #define slub_percpu_partial(c)                  NULL
235 
236 #define slub_set_percpu_partial(c, p)
237 
238 #define slub_percpu_partial_read_once(c)        NULL
239 #endif // CONFIG_SLUB_CPU_PARTIAL
240 
241 /*
242  * Word size structure that can be atomically updated or read and that
243  * contains both the order and the number of objects that a slab of the
244  * given order would contain.
245  */
246 struct kmem_cache_order_objects {
247         unsigned int x;
248 };
249 
250 /*
251  * Slab cache management.
252  */
253 struct kmem_cache {
254 #ifndef CONFIG_SLUB_TINY
255         struct kmem_cache_cpu __percpu *cpu_slab;
256 #endif
257         /* Used for retrieving partial slabs, etc. */
258         slab_flags_t flags;
259         unsigned long min_partial;
260         unsigned int size;              /* Object size including metadata */
261         unsigned int object_size;       /* Object size without metadata */
262         struct reciprocal_value reciprocal_size;
263         unsigned int offset;            /* Free pointer offset */
264 #ifdef CONFIG_SLUB_CPU_PARTIAL
265         /* Number of per cpu partial objects to keep around */
266         unsigned int cpu_partial;
267         /* Number of per cpu partial slabs to keep around */
268         unsigned int cpu_partial_slabs;
269 #endif
270         struct kmem_cache_order_objects oo;
271 
272         /* Allocation and freeing of slabs */
273         struct kmem_cache_order_objects min;
274         gfp_t allocflags;               /* gfp flags to use on each alloc */
275         int refcount;                   /* Refcount for slab cache destroy */
276         void (*ctor)(void *object);     /* Object constructor */
277         unsigned int inuse;             /* Offset to metadata */
278         unsigned int align;             /* Alignment */
279         unsigned int red_left_pad;      /* Left redzone padding size */
280         const char *name;               /* Name (only for display!) */
281         struct list_head list;          /* List of slab caches */
282 #ifdef CONFIG_SYSFS
283         struct kobject kobj;            /* For sysfs */
284 #endif
285 #ifdef CONFIG_SLAB_FREELIST_HARDENED
286         unsigned long random;
287 #endif
288 
289 #ifdef CONFIG_NUMA
290         /*
291          * Defragmentation by allocating from a remote node.
292          */
293         unsigned int remote_node_defrag_ratio;
294 #endif
295 
296 #ifdef CONFIG_SLAB_FREELIST_RANDOM
297         unsigned int *random_seq;
298 #endif
299 
300 #ifdef CONFIG_KASAN_GENERIC
301         struct kasan_cache kasan_info;
302 #endif
303 
304 #ifdef CONFIG_HARDENED_USERCOPY
305         unsigned int useroffset;        /* Usercopy region offset */
306         unsigned int usersize;          /* Usercopy region size */
307 #endif
308 
309         struct kmem_cache_node *node[MAX_NUMNODES];
310 };
311 
312 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
313 #define SLAB_SUPPORTS_SYSFS
314 void sysfs_slab_unlink(struct kmem_cache *s);
315 void sysfs_slab_release(struct kmem_cache *s);
316 #else
317 static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
318 static inline void sysfs_slab_release(struct kmem_cache *s) { }
319 #endif
320 
321 void *fixup_red_left(struct kmem_cache *s, void *p);
322 
323 static inline void *nearest_obj(struct kmem_cache *cache,
324                                 const struct slab *slab, void *x)
325 {
326         void *object = x - (x - slab_address(slab)) % cache->size;
327         void *last_object = slab_address(slab) +
328                 (slab->objects - 1) * cache->size;
329         void *result = (unlikely(object > last_object)) ? last_object : object;
330 
331         result = fixup_red_left(cache, result);
332         return result;
333 }
334 
335 /* Determine object index from a given position */
336 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
337                                           void *addr, void *obj)
338 {
339         return reciprocal_divide(kasan_reset_tag(obj) - addr,
340                                  cache->reciprocal_size);
341 }
342 
343 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
344                                         const struct slab *slab, void *obj)
345 {
346         if (is_kfence_address(obj))
347                 return 0;
348         return __obj_to_index(cache, slab_address(slab), obj);
349 }
350 
351 static inline int objs_per_slab(const struct kmem_cache *cache,
352                                 const struct slab *slab)
353 {
354         return slab->objects;
355 }
356 
357 /*
358  * State of the slab allocator.
359  *
360  * This is used to describe the states of the allocator during bootup.
361  * Allocators use this to gradually bootstrap themselves. Most allocators
362  * have the problem that the structures used for managing slab caches are
363  * allocated from slab caches themselves.
364  */
365 enum slab_state {
366         DOWN,                   /* No slab functionality yet */
367         PARTIAL,                /* SLUB: kmem_cache_node available */
368         UP,                     /* Slab caches usable but not all extras yet */
369         FULL                    /* Everything is working */
370 };
371 
372 extern enum slab_state slab_state;
373 
374 /* The slab cache mutex protects the management structures during changes */
375 extern struct mutex slab_mutex;
376 
377 /* The list of all slab caches on the system */
378 extern struct list_head slab_caches;
379 
380 /* The slab cache that manages slab cache information */
381 extern struct kmem_cache *kmem_cache;
382 
383 /* A table of kmalloc cache names and sizes */
384 extern const struct kmalloc_info_struct {
385         const char *name[NR_KMALLOC_TYPES];
386         unsigned int size;
387 } kmalloc_info[];
388 
389 /* Kmalloc array related functions */
390 void setup_kmalloc_cache_index_table(void);
391 void create_kmalloc_caches(void);
392 
393 extern u8 kmalloc_size_index[24];
394 
395 static inline unsigned int size_index_elem(unsigned int bytes)
396 {
397         return (bytes - 1) / 8;
398 }
399 
400 /*
401  * Find the kmem_cache structure that serves a given size of
402  * allocation
403  *
404  * This assumes size is larger than zero and not larger than
405  * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
406  */
407 static inline struct kmem_cache *
408 kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
409 {
410         unsigned int index;
411 
412         if (!b)
413                 b = &kmalloc_caches[kmalloc_type(flags, caller)];
414         if (size <= 192)
415                 index = kmalloc_size_index[size_index_elem(size)];
416         else
417                 index = fls(size - 1);
418 
419         return (*b)[index];
420 }
421 
422 gfp_t kmalloc_fix_flags(gfp_t flags);
423 
424 /* Functions provided by the slab allocators */
425 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
426 
427 void __init kmem_cache_init(void);
428 extern void create_boot_cache(struct kmem_cache *, const char *name,
429                         unsigned int size, slab_flags_t flags,
430                         unsigned int useroffset, unsigned int usersize);
431 
432 int slab_unmergeable(struct kmem_cache *s);
433 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
434                 slab_flags_t flags, const char *name, void (*ctor)(void *));
435 struct kmem_cache *
436 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
437                    slab_flags_t flags, void (*ctor)(void *));
438 
439 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
440 
441 static inline bool is_kmalloc_cache(struct kmem_cache *s)
442 {
443         return (s->flags & SLAB_KMALLOC);
444 }
445 
446 /* Legal flag mask for kmem_cache_create(), for various configurations */
447 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
448                          SLAB_CACHE_DMA32 | SLAB_PANIC | \
449                          SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
450 
451 #ifdef CONFIG_SLUB_DEBUG
452 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
453                           SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
454 #else
455 #define SLAB_DEBUG_FLAGS (0)
456 #endif
457 
458 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
459                           SLAB_TEMPORARY | SLAB_ACCOUNT | \
460                           SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
461 
462 /* Common flags available with current configuration */
463 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
464 
465 /* Common flags permitted for kmem_cache_create */
466 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
467                               SLAB_RED_ZONE | \
468                               SLAB_POISON | \
469                               SLAB_STORE_USER | \
470                               SLAB_TRACE | \
471                               SLAB_CONSISTENCY_CHECKS | \
472                               SLAB_NOLEAKTRACE | \
473                               SLAB_RECLAIM_ACCOUNT | \
474                               SLAB_TEMPORARY | \
475                               SLAB_ACCOUNT | \
476                               SLAB_KMALLOC | \
477                               SLAB_NO_MERGE | \
478                               SLAB_NO_USER_FLAGS)
479 
480 bool __kmem_cache_empty(struct kmem_cache *);
481 int __kmem_cache_shutdown(struct kmem_cache *);
482 void __kmem_cache_release(struct kmem_cache *);
483 int __kmem_cache_shrink(struct kmem_cache *);
484 void slab_kmem_cache_release(struct kmem_cache *);
485 
486 struct seq_file;
487 struct file;
488 
489 struct slabinfo {
490         unsigned long active_objs;
491         unsigned long num_objs;
492         unsigned long active_slabs;
493         unsigned long num_slabs;
494         unsigned long shared_avail;
495         unsigned int limit;
496         unsigned int batchcount;
497         unsigned int shared;
498         unsigned int objects_per_slab;
499         unsigned int cache_order;
500 };
501 
502 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
503 
504 #ifdef CONFIG_SLUB_DEBUG
505 #ifdef CONFIG_SLUB_DEBUG_ON
506 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
507 #else
508 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
509 #endif
510 extern void print_tracking(struct kmem_cache *s, void *object);
511 long validate_slab_cache(struct kmem_cache *s);
512 static inline bool __slub_debug_enabled(void)
513 {
514         return static_branch_unlikely(&slub_debug_enabled);
515 }
516 #else
517 static inline void print_tracking(struct kmem_cache *s, void *object)
518 {
519 }
520 static inline bool __slub_debug_enabled(void)
521 {
522         return false;
523 }
524 #endif
525 
526 /*
527  * Returns true if any of the specified slab_debug flags is enabled for the
528  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
529  * the static key.
530  */
531 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
532 {
533         if (IS_ENABLED(CONFIG_SLUB_DEBUG))
534                 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
535         if (__slub_debug_enabled())
536                 return s->flags & flags;
537         return false;
538 }
539 
540 #ifdef CONFIG_SLAB_OBJ_EXT
541 
542 /*
543  * slab_obj_exts - get the pointer to the slab object extension vector
544  * associated with a slab.
545  * @slab: a pointer to the slab struct
546  *
547  * Returns a pointer to the object extension vector associated with the slab,
548  * or NULL if no such vector has been associated yet.
549  */
550 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
551 {
552         unsigned long obj_exts = READ_ONCE(slab->obj_exts);
553 
554 #ifdef CONFIG_MEMCG
555         VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS),
556                                                         slab_page(slab));
557         VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
558 #endif
559         return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
560 }
561 
562 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
563                         gfp_t gfp, bool new_slab);
564 
565 #else /* CONFIG_SLAB_OBJ_EXT */
566 
567 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
568 {
569         return NULL;
570 }
571 
572 #endif /* CONFIG_SLAB_OBJ_EXT */
573 
574 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
575 {
576         return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
577                 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
578 }
579 
580 #ifdef CONFIG_MEMCG
581 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
582                                   gfp_t flags, size_t size, void **p);
583 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
584                             void **p, int objects, struct slabobj_ext *obj_exts);
585 #endif
586 
587 size_t __ksize(const void *objp);
588 
589 static inline size_t slab_ksize(const struct kmem_cache *s)
590 {
591 #ifdef CONFIG_SLUB_DEBUG
592         /*
593          * Debugging requires use of the padding between object
594          * and whatever may come after it.
595          */
596         if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
597                 return s->object_size;
598 #endif
599         if (s->flags & SLAB_KASAN)
600                 return s->object_size;
601         /*
602          * If we have the need to store the freelist pointer
603          * back there or track user information then we can
604          * only use the space before that information.
605          */
606         if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
607                 return s->inuse;
608         /*
609          * Else we can use all the padding etc for the allocation
610          */
611         return s->size;
612 }
613 
614 #ifdef CONFIG_SLUB_DEBUG
615 void dump_unreclaimable_slab(void);
616 #else
617 static inline void dump_unreclaimable_slab(void)
618 {
619 }
620 #endif
621 
622 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
623 
624 #ifdef CONFIG_SLAB_FREELIST_RANDOM
625 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
626                         gfp_t gfp);
627 void cache_random_seq_destroy(struct kmem_cache *cachep);
628 #else
629 static inline int cache_random_seq_create(struct kmem_cache *cachep,
630                                         unsigned int count, gfp_t gfp)
631 {
632         return 0;
633 }
634 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
635 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
636 
637 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
638 {
639         if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
640                                 &init_on_alloc)) {
641                 if (c->ctor)
642                         return false;
643                 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
644                         return flags & __GFP_ZERO;
645                 return true;
646         }
647         return flags & __GFP_ZERO;
648 }
649 
650 static inline bool slab_want_init_on_free(struct kmem_cache *c)
651 {
652         if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
653                                 &init_on_free))
654                 return !(c->ctor ||
655                          (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
656         return false;
657 }
658 
659 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
660 void debugfs_slab_release(struct kmem_cache *);
661 #else
662 static inline void debugfs_slab_release(struct kmem_cache *s) { }
663 #endif
664 
665 #ifdef CONFIG_PRINTK
666 #define KS_ADDRS_COUNT 16
667 struct kmem_obj_info {
668         void *kp_ptr;
669         struct slab *kp_slab;
670         void *kp_objp;
671         unsigned long kp_data_offset;
672         struct kmem_cache *kp_slab_cache;
673         void *kp_ret;
674         void *kp_stack[KS_ADDRS_COUNT];
675         void *kp_free_stack[KS_ADDRS_COUNT];
676 };
677 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
678 #endif
679 
680 void __check_heap_object(const void *ptr, unsigned long n,
681                          const struct slab *slab, bool to_user);
682 
683 #ifdef CONFIG_SLUB_DEBUG
684 void skip_orig_size_check(struct kmem_cache *s, const void *object);
685 #endif
686 
687 #endif /* MM_SLAB_H */
688 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php