~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/vmalloc.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_VMALLOC_H
  3 #define _LINUX_VMALLOC_H
  4 
  5 #include <linux/alloc_tag.h>
  6 #include <linux/sched.h>
  7 #include <linux/spinlock.h>
  8 #include <linux/init.h>
  9 #include <linux/list.h>
 10 #include <linux/llist.h>
 11 #include <asm/page.h>           /* pgprot_t */
 12 #include <linux/rbtree.h>
 13 #include <linux/overflow.h>
 14 
 15 #include <asm/vmalloc.h>
 16 
 17 struct vm_area_struct;          /* vma defining user mapping in mm_types.h */
 18 struct notifier_block;          /* in notifier.h */
 19 struct iov_iter;                /* in uio.h */
 20 
 21 /* bits in flags of vmalloc's vm_struct below */
 22 #define VM_IOREMAP              0x00000001      /* ioremap() and friends */
 23 #define VM_ALLOC                0x00000002      /* vmalloc() */
 24 #define VM_MAP                  0x00000004      /* vmap()ed pages */
 25 #define VM_USERMAP              0x00000008      /* suitable for remap_vmalloc_range */
 26 #define VM_DMA_COHERENT         0x00000010      /* dma_alloc_coherent */
 27 #define VM_UNINITIALIZED        0x00000020      /* vm_struct is not fully initialized */
 28 #define VM_NO_GUARD             0x00000040      /* ***DANGEROUS*** don't add guard page */
 29 #define VM_KASAN                0x00000080      /* has allocated kasan shadow memory */
 30 #define VM_FLUSH_RESET_PERMS    0x00000100      /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
 31 #define VM_MAP_PUT_PAGES        0x00000200      /* put pages and free array in vfree */
 32 #define VM_ALLOW_HUGE_VMAP      0x00000400      /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
 33 
 34 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
 35         !defined(CONFIG_KASAN_VMALLOC)
 36 #define VM_DEFER_KMEMLEAK       0x00000800      /* defer kmemleak object creation */
 37 #else
 38 #define VM_DEFER_KMEMLEAK       0
 39 #endif
 40 #define VM_SPARSE               0x00001000      /* sparse vm_area. not all pages are present. */
 41 
 42 /* bits [20..32] reserved for arch specific ioremap internals */
 43 
 44 /*
 45  * Maximum alignment for ioremap() regions.
 46  * Can be overridden by arch-specific value.
 47  */
 48 #ifndef IOREMAP_MAX_ORDER
 49 #define IOREMAP_MAX_ORDER       (7 + PAGE_SHIFT)        /* 128 pages */
 50 #endif
 51 
 52 struct vm_struct {
 53         struct vm_struct        *next;
 54         void                    *addr;
 55         unsigned long           size;
 56         unsigned long           flags;
 57         struct page             **pages;
 58 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
 59         unsigned int            page_order;
 60 #endif
 61         unsigned int            nr_pages;
 62         phys_addr_t             phys_addr;
 63         const void              *caller;
 64 };
 65 
 66 struct vmap_area {
 67         unsigned long va_start;
 68         unsigned long va_end;
 69 
 70         struct rb_node rb_node;         /* address sorted rbtree */
 71         struct list_head list;          /* address sorted list */
 72 
 73         /*
 74          * The following two variables can be packed, because
 75          * a vmap_area object can be either:
 76          *    1) in "free" tree (root is free_vmap_area_root)
 77          *    2) or "busy" tree (root is vmap_area_root)
 78          */
 79         union {
 80                 unsigned long subtree_max_size; /* in "free" tree */
 81                 struct vm_struct *vm;           /* in "busy" tree */
 82         };
 83         unsigned long flags; /* mark type of vm_map_ram area */
 84 };
 85 
 86 /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
 87 #ifndef arch_vmap_p4d_supported
 88 static inline bool arch_vmap_p4d_supported(pgprot_t prot)
 89 {
 90         return false;
 91 }
 92 #endif
 93 
 94 #ifndef arch_vmap_pud_supported
 95 static inline bool arch_vmap_pud_supported(pgprot_t prot)
 96 {
 97         return false;
 98 }
 99 #endif
100 
101 #ifndef arch_vmap_pmd_supported
102 static inline bool arch_vmap_pmd_supported(pgprot_t prot)
103 {
104         return false;
105 }
106 #endif
107 
108 #ifndef arch_vmap_pte_range_map_size
109 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
110                                                          u64 pfn, unsigned int max_page_shift)
111 {
112         return PAGE_SIZE;
113 }
114 #endif
115 
116 #ifndef arch_vmap_pte_supported_shift
117 static inline int arch_vmap_pte_supported_shift(unsigned long size)
118 {
119         return PAGE_SHIFT;
120 }
121 #endif
122 
123 #ifndef arch_vmap_pgprot_tagged
124 static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
125 {
126         return prot;
127 }
128 #endif
129 
130 /*
131  *      Highlevel APIs for driver use
132  */
133 extern void vm_unmap_ram(const void *mem, unsigned int count);
134 extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
135 extern void vm_unmap_aliases(void);
136 
137 #ifdef CONFIG_MMU
138 extern unsigned long vmalloc_nr_pages(void);
139 #else
140 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
141 #endif
142 
143 extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
144 #define vmalloc(...)            alloc_hooks(vmalloc_noprof(__VA_ARGS__))
145 
146 extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
147 #define vzalloc(...)            alloc_hooks(vzalloc_noprof(__VA_ARGS__))
148 
149 extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
150 #define vmalloc_user(...)       alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
151 
152 extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
153 #define vmalloc_node(...)       alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
154 
155 extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
156 #define vzalloc_node(...)       alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
157 
158 extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
159 #define vmalloc_32(...)         alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
160 
161 extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
162 #define vmalloc_32_user(...)    alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
163 
164 extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
165 #define __vmalloc(...)          alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
166 
167 extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
168                         unsigned long start, unsigned long end, gfp_t gfp_mask,
169                         pgprot_t prot, unsigned long vm_flags, int node,
170                         const void *caller) __alloc_size(1);
171 #define __vmalloc_node_range(...)       alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
172 
173 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
174                 int node, const void *caller) __alloc_size(1);
175 #define __vmalloc_node(...)     alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
176 
177 void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
178 #define vmalloc_huge(...)       alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
179 
180 extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
181 #define __vmalloc_array(...)    alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
182 
183 extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
184 #define vmalloc_array(...)      alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
185 
186 extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
187 #define __vcalloc(...)          alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
188 
189 extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
190 #define vcalloc(...)            alloc_hooks(vcalloc_noprof(__VA_ARGS__))
191 
192 extern void vfree(const void *addr);
193 extern void vfree_atomic(const void *addr);
194 
195 extern void *vmap(struct page **pages, unsigned int count,
196                         unsigned long flags, pgprot_t prot);
197 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
198 extern void vunmap(const void *addr);
199 
200 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
201                                        unsigned long uaddr, void *kaddr,
202                                        unsigned long pgoff, unsigned long size);
203 
204 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
205                                                         unsigned long pgoff);
206 
207 /*
208  * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
209  * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
210  * needs to be called.
211  */
212 #ifndef ARCH_PAGE_TABLE_SYNC_MASK
213 #define ARCH_PAGE_TABLE_SYNC_MASK 0
214 #endif
215 
216 /*
217  * There is no default implementation for arch_sync_kernel_mappings(). It is
218  * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
219  * is 0.
220  */
221 void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
222 
223 /*
224  *      Lowlevel-APIs (not for driver use!)
225  */
226 
227 static inline size_t get_vm_area_size(const struct vm_struct *area)
228 {
229         if (!(area->flags & VM_NO_GUARD))
230                 /* return actual size without guard page */
231                 return area->size - PAGE_SIZE;
232         else
233                 return area->size;
234 
235 }
236 
237 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
238 extern struct vm_struct *get_vm_area_caller(unsigned long size,
239                                         unsigned long flags, const void *caller);
240 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
241                                         unsigned long flags,
242                                         unsigned long start, unsigned long end,
243                                         const void *caller);
244 void free_vm_area(struct vm_struct *area);
245 extern struct vm_struct *remove_vm_area(const void *addr);
246 extern struct vm_struct *find_vm_area(const void *addr);
247 struct vmap_area *find_vmap_area(unsigned long addr);
248 
249 static inline bool is_vm_area_hugepages(const void *addr)
250 {
251         /*
252          * This may not 100% tell if the area is mapped with > PAGE_SIZE
253          * page table entries, if for some reason the architecture indicates
254          * larger sizes are available but decides not to use them, nothing
255          * prevents that. This only indicates the size of the physical page
256          * allocated in the vmalloc layer.
257          */
258 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
259         return find_vm_area(addr)->page_order > 0;
260 #else
261         return false;
262 #endif
263 }
264 
265 #ifdef CONFIG_MMU
266 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
267                       unsigned long end, struct page **pages);
268 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
269                          unsigned long end);
270 void vunmap_range(unsigned long addr, unsigned long end);
271 static inline void set_vm_flush_reset_perms(void *addr)
272 {
273         struct vm_struct *vm = find_vm_area(addr);
274 
275         if (vm)
276                 vm->flags |= VM_FLUSH_RESET_PERMS;
277 }
278 
279 #else
280 static inline void set_vm_flush_reset_perms(void *addr)
281 {
282 }
283 #endif
284 
285 /* for /proc/kcore */
286 extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
287 
288 /*
289  *      Internals.  Don't use..
290  */
291 extern __init void vm_area_add_early(struct vm_struct *vm);
292 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
293 
294 #ifdef CONFIG_SMP
295 # ifdef CONFIG_MMU
296 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
297                                      const size_t *sizes, int nr_vms,
298                                      size_t align);
299 
300 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
301 # else
302 static inline struct vm_struct **
303 pcpu_get_vm_areas(const unsigned long *offsets,
304                 const size_t *sizes, int nr_vms,
305                 size_t align)
306 {
307         return NULL;
308 }
309 
310 static inline void
311 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
312 {
313 }
314 # endif
315 #endif
316 
317 #ifdef CONFIG_MMU
318 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
319 #else
320 #define VMALLOC_TOTAL 0UL
321 #endif
322 
323 int register_vmap_purge_notifier(struct notifier_block *nb);
324 int unregister_vmap_purge_notifier(struct notifier_block *nb);
325 
326 #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
327 bool vmalloc_dump_obj(void *object);
328 #else
329 static inline bool vmalloc_dump_obj(void *object) { return false; }
330 #endif
331 
332 #endif /* _LINUX_VMALLOC_H */
333 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php