~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/percpu-internal.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _MM_PERCPU_INTERNAL_H
  3 #define _MM_PERCPU_INTERNAL_H
  4 
  5 #include <linux/types.h>
  6 #include <linux/percpu.h>
  7 #include <linux/memcontrol.h>
  8 
  9 /*
 10  * pcpu_block_md is the metadata block struct.
 11  * Each chunk's bitmap is split into a number of full blocks.
 12  * All units are in terms of bits.
 13  *
 14  * The scan hint is the largest known contiguous area before the contig hint.
 15  * It is not necessarily the actual largest contig hint though.  There is an
 16  * invariant that the scan_hint_start > contig_hint_start iff
 17  * scan_hint == contig_hint.  This is necessary because when scanning forward,
 18  * we don't know if a new contig hint would be better than the current one.
 19  */
 20 struct pcpu_block_md {
 21         int                     scan_hint;      /* scan hint for block */
 22         int                     scan_hint_start; /* block relative starting
 23                                                     position of the scan hint */
 24         int                     contig_hint;    /* contig hint for block */
 25         int                     contig_hint_start; /* block relative starting
 26                                                       position of the contig hint */
 27         int                     left_free;      /* size of free space along
 28                                                    the left side of the block */
 29         int                     right_free;     /* size of free space along
 30                                                    the right side of the block */
 31         int                     first_free;     /* block position of first free */
 32         int                     nr_bits;        /* total bits responsible for */
 33 };
 34 
 35 struct pcpuobj_ext {
 36 #ifdef CONFIG_MEMCG
 37         struct obj_cgroup       *cgroup;
 38 #endif
 39 #ifdef CONFIG_MEM_ALLOC_PROFILING
 40         union codetag_ref       tag;
 41 #endif
 42 };
 43 
 44 #if defined(CONFIG_MEMCG) || defined(CONFIG_MEM_ALLOC_PROFILING)
 45 #define NEED_PCPUOBJ_EXT
 46 #endif
 47 
 48 struct pcpu_chunk {
 49 #ifdef CONFIG_PERCPU_STATS
 50         int                     nr_alloc;       /* # of allocations */
 51         size_t                  max_alloc_size; /* largest allocation size */
 52 #endif
 53 
 54         struct list_head        list;           /* linked to pcpu_slot lists */
 55         int                     free_bytes;     /* free bytes in the chunk */
 56         struct pcpu_block_md    chunk_md;
 57         unsigned long           *bound_map;     /* boundary map */
 58 
 59         /*
 60          * base_addr is the base address of this chunk.
 61          * To reduce false sharing, current layout is optimized to make sure
 62          * base_addr locate in the different cacheline with free_bytes and
 63          * chunk_md.
 64          */
 65         void                    *base_addr ____cacheline_aligned_in_smp;
 66 
 67         unsigned long           *alloc_map;     /* allocation map */
 68         struct pcpu_block_md    *md_blocks;     /* metadata blocks */
 69 
 70         void                    *data;          /* chunk data */
 71         bool                    immutable;      /* no [de]population allowed */
 72         bool                    isolated;       /* isolated from active chunk
 73                                                    slots */
 74         int                     start_offset;   /* the overlap with the previous
 75                                                    region to have a page aligned
 76                                                    base_addr */
 77         int                     end_offset;     /* additional area required to
 78                                                    have the region end page
 79                                                    aligned */
 80 #ifdef NEED_PCPUOBJ_EXT
 81         struct pcpuobj_ext      *obj_exts;      /* vector of object cgroups */
 82 #endif
 83 
 84         int                     nr_pages;       /* # of pages served by this chunk */
 85         int                     nr_populated;   /* # of populated pages */
 86         int                     nr_empty_pop_pages; /* # of empty populated pages */
 87         unsigned long           populated[];    /* populated bitmap */
 88 };
 89 
 90 static inline bool need_pcpuobj_ext(void)
 91 {
 92         if (IS_ENABLED(CONFIG_MEM_ALLOC_PROFILING))
 93                 return true;
 94         if (!mem_cgroup_kmem_disabled())
 95                 return true;
 96         return false;
 97 }
 98 
 99 extern spinlock_t pcpu_lock;
100 
101 extern struct list_head *pcpu_chunk_lists;
102 extern int pcpu_nr_slots;
103 extern int pcpu_sidelined_slot;
104 extern int pcpu_to_depopulate_slot;
105 extern int pcpu_nr_empty_pop_pages;
106 
107 extern struct pcpu_chunk *pcpu_first_chunk;
108 extern struct pcpu_chunk *pcpu_reserved_chunk;
109 
110 /**
111  * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
112  * @chunk: chunk of interest
113  *
114  * This conversion is from the number of physical pages that the chunk
115  * serves to the number of bitmap blocks used.
116  */
117 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
118 {
119         return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
120 }
121 
122 /**
123  * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
124  * @pages: number of physical pages
125  *
126  * This conversion is from physical pages to the number of bits
127  * required in the bitmap.
128  */
129 static inline int pcpu_nr_pages_to_map_bits(int pages)
130 {
131         return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
132 }
133 
134 /**
135  * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
136  * @chunk: chunk of interest
137  *
138  * This conversion is from the number of physical pages that the chunk
139  * serves to the number of bits in the bitmap.
140  */
141 static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
142 {
143         return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
144 }
145 
146 /**
147  * pcpu_obj_full_size - helper to calculate size of each accounted object
148  * @size: size of area to allocate in bytes
149  *
150  * For each accounted object there is an extra space which is used to store
151  * obj_cgroup membership if kmemcg is not disabled. Charge it too.
152  */
153 static inline size_t pcpu_obj_full_size(size_t size)
154 {
155         size_t extra_size = 0;
156 
157 #ifdef CONFIG_MEMCG
158         if (!mem_cgroup_kmem_disabled())
159                 extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
160 #endif
161 
162         return size * num_possible_cpus() + extra_size;
163 }
164 
165 #ifdef CONFIG_PERCPU_STATS
166 
167 #include <linux/spinlock.h>
168 
169 struct percpu_stats {
170         u64 nr_alloc;           /* lifetime # of allocations */
171         u64 nr_dealloc;         /* lifetime # of deallocations */
172         u64 nr_cur_alloc;       /* current # of allocations */
173         u64 nr_max_alloc;       /* max # of live allocations */
174         u32 nr_chunks;          /* current # of live chunks */
175         u32 nr_max_chunks;      /* max # of live chunks */
176         size_t min_alloc_size;  /* min allocation size */
177         size_t max_alloc_size;  /* max allocation size */
178 };
179 
180 extern struct percpu_stats pcpu_stats;
181 extern struct pcpu_alloc_info pcpu_stats_ai;
182 
183 /*
184  * For debug purposes. We don't care about the flexible array.
185  */
186 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
187 {
188         memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
189 
190         /* initialize min_alloc_size to unit_size */
191         pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
192 }
193 
194 /*
195  * pcpu_stats_area_alloc - increment area allocation stats
196  * @chunk: the location of the area being allocated
197  * @size: size of area to allocate in bytes
198  *
199  * CONTEXT:
200  * pcpu_lock.
201  */
202 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
203 {
204         lockdep_assert_held(&pcpu_lock);
205 
206         pcpu_stats.nr_alloc++;
207         pcpu_stats.nr_cur_alloc++;
208         pcpu_stats.nr_max_alloc =
209                 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
210         pcpu_stats.min_alloc_size =
211                 min(pcpu_stats.min_alloc_size, size);
212         pcpu_stats.max_alloc_size =
213                 max(pcpu_stats.max_alloc_size, size);
214 
215         chunk->nr_alloc++;
216         chunk->max_alloc_size = max(chunk->max_alloc_size, size);
217 }
218 
219 /*
220  * pcpu_stats_area_dealloc - decrement allocation stats
221  * @chunk: the location of the area being deallocated
222  *
223  * CONTEXT:
224  * pcpu_lock.
225  */
226 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
227 {
228         lockdep_assert_held(&pcpu_lock);
229 
230         pcpu_stats.nr_dealloc++;
231         pcpu_stats.nr_cur_alloc--;
232 
233         chunk->nr_alloc--;
234 }
235 
236 /*
237  * pcpu_stats_chunk_alloc - increment chunk stats
238  */
239 static inline void pcpu_stats_chunk_alloc(void)
240 {
241         unsigned long flags;
242         spin_lock_irqsave(&pcpu_lock, flags);
243 
244         pcpu_stats.nr_chunks++;
245         pcpu_stats.nr_max_chunks =
246                 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
247 
248         spin_unlock_irqrestore(&pcpu_lock, flags);
249 }
250 
251 /*
252  * pcpu_stats_chunk_dealloc - decrement chunk stats
253  */
254 static inline void pcpu_stats_chunk_dealloc(void)
255 {
256         unsigned long flags;
257         spin_lock_irqsave(&pcpu_lock, flags);
258 
259         pcpu_stats.nr_chunks--;
260 
261         spin_unlock_irqrestore(&pcpu_lock, flags);
262 }
263 
264 #else
265 
266 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
267 {
268 }
269 
270 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
271 {
272 }
273 
274 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
275 {
276 }
277 
278 static inline void pcpu_stats_chunk_alloc(void)
279 {
280 }
281 
282 static inline void pcpu_stats_chunk_dealloc(void)
283 {
284 }
285 
286 #endif /* !CONFIG_PERCPU_STATS */
287 
288 #endif
289 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php