1 /* SPDX-License-Identifier: GPL-2.0 */ !! 1 #ifndef __LINUX_VMALLOC_H 2 #ifndef _LINUX_VMALLOC_H !! 2 #define __LINUX_VMALLOC_H 3 #define _LINUX_VMALLOC_H << 4 3 5 #include <linux/alloc_tag.h> << 6 #include <linux/sched.h> 4 #include <linux/sched.h> >> 5 #include <linux/mm.h> 7 #include <linux/spinlock.h> 6 #include <linux/spinlock.h> 8 #include <linux/init.h> << 9 #include <linux/list.h> << 10 #include <linux/llist.h> << 11 #include <asm/page.h> /* pgprot_t */ << 12 #include <linux/rbtree.h> << 13 #include <linux/overflow.h> << 14 << 15 #include <asm/vmalloc.h> << 16 << 17 struct vm_area_struct; /* vma definin << 18 struct notifier_block; /* in notifier << 19 struct iov_iter; /* in uio.h */ << 20 << 21 /* bits in flags of vmalloc's vm_struct below << 22 #define VM_IOREMAP 0x00000001 << 23 #define VM_ALLOC 0x00000002 << 24 #define VM_MAP 0x00000004 << 25 #define VM_USERMAP 0x00000008 << 26 #define VM_DMA_COHERENT 0x00000010 << 27 #define VM_UNINITIALIZED 0x00000020 << 28 #define VM_NO_GUARD 0x00000040 << 29 #define VM_KASAN 0x00000080 << 30 #define VM_FLUSH_RESET_PERMS 0x00000100 << 31 #define VM_MAP_PUT_PAGES 0x00000200 << 32 #define VM_ALLOW_HUGE_VMAP 0x00000400 << 33 << 34 #if (defined(CONFIG_KASAN_GENERIC) || defined( << 35 !defined(CONFIG_KASAN_VMALLOC) << 36 #define VM_DEFER_KMEMLEAK 0x00000800 << 37 #else << 38 #define VM_DEFER_KMEMLEAK 0 << 39 #endif << 40 #define VM_SPARSE 0x00001000 << 41 7 42 /* bits [20..32] reserved for arch specific io !! 8 #include <linux/highmem.h> /* several arch define VMALLOC_END via PKMAP_BASE */ >> 9 #include <asm/pgtable.h> 43 10 44 /* !! 11 /* bits in vm_struct->flags */ 45 * Maximum alignment for ioremap() regions. !! 12 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 46 * Can be overridden by arch-specific value. !! 13 #define VM_ALLOC 0x00000002 /* vmalloc() */ 47 */ << 48 #ifndef IOREMAP_MAX_ORDER << 49 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIF << 50 #endif << 51 14 52 struct vm_struct { 15 struct vm_struct { 53 struct vm_struct *next; !! 16 unsigned long flags; 54 void *addr; !! 17 void * addr; 55 unsigned long size; !! 18 unsigned long size; 56 unsigned long flags; !! 19 struct vm_struct * next; 57 struct page **pages; << 58 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC << 59 unsigned int page_order; << 60 #endif << 61 unsigned int nr_pages; << 62 phys_addr_t phys_addr; << 63 const void *caller; << 64 }; 20 }; 65 21 66 struct vmap_area { !! 22 extern struct vm_struct * get_vm_area (unsigned long size, unsigned long flags); 67 unsigned long va_start; !! 23 extern void vfree(void * addr); 68 unsigned long va_end; !! 24 #define vunmap(addr) vfree(addr) 69 !! 25 extern void * vmap(struct page **pages, int count, 70 struct rb_node rb_node; /* add !! 26 unsigned long flags, pgprot_t prot); 71 struct list_head list; /* add !! 27 extern void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot); 72 !! 28 extern long vread(char *buf, char *addr, unsigned long count); 73 /* !! 29 extern void vmfree_area_pages(unsigned long address, unsigned long size); 74 * The following two variables can be !! 30 extern int vmalloc_area_pages(unsigned long address, unsigned long size, 75 * a vmap_area object can be either: !! 31 int gfp_mask, pgprot_t prot); 76 * 1) in "free" tree (root is free_ << 77 * 2) or "busy" tree (root is vmap_ << 78 */ << 79 union { << 80 unsigned long subtree_max_size << 81 struct vm_struct *vm; << 82 }; << 83 unsigned long flags; /* mark type of v << 84 }; << 85 << 86 /* archs that select HAVE_ARCH_HUGE_VMAP shoul << 87 #ifndef arch_vmap_p4d_supported << 88 static inline bool arch_vmap_p4d_supported(pgp << 89 { << 90 return false; << 91 } << 92 #endif << 93 << 94 #ifndef arch_vmap_pud_supported << 95 static inline bool arch_vmap_pud_supported(pgp << 96 { << 97 return false; << 98 } << 99 #endif << 100 << 101 #ifndef arch_vmap_pmd_supported << 102 static inline bool arch_vmap_pmd_supported(pgp << 103 { << 104 return false; << 105 } << 106 #endif << 107 << 108 #ifndef arch_vmap_pte_range_map_size << 109 static inline unsigned long arch_vmap_pte_rang << 110 << 111 { << 112 return PAGE_SIZE; << 113 } << 114 #endif << 115 << 116 #ifndef arch_vmap_pte_supported_shift << 117 static inline int arch_vmap_pte_supported_shif << 118 { << 119 return PAGE_SHIFT; << 120 } << 121 #endif << 122 << 123 #ifndef arch_vmap_pgprot_tagged << 124 static inline pgprot_t arch_vmap_pgprot_tagged << 125 { << 126 return prot; << 127 } << 128 #endif << 129 << 130 /* << 131 * Highlevel APIs for driver use << 132 */ << 133 extern void vm_unmap_ram(const void *mem, unsi << 134 extern void *vm_map_ram(struct page **pages, u << 135 extern void vm_unmap_aliases(void); << 136 << 137 #ifdef CONFIG_MMU << 138 extern unsigned long vmalloc_nr_pages(void); << 139 #else << 140 static inline unsigned long vmalloc_nr_pages(v << 141 #endif << 142 << 143 extern void *vmalloc_noprof(unsigned long size << 144 #define vmalloc(...) alloc_hooks(vm << 145 << 146 extern void *vzalloc_noprof(unsigned long size << 147 #define vzalloc(...) alloc_hooks(vz << 148 << 149 extern void *vmalloc_user_noprof(unsigned long << 150 #define vmalloc_user(...) alloc_hooks(vm << 151 << 152 extern void *vmalloc_node_noprof(unsigned long << 153 #define vmalloc_node(...) alloc_hooks(vm << 154 << 155 extern void *vzalloc_node_noprof(unsigned long << 156 #define vzalloc_node(...) alloc_hooks(vz << 157 << 158 extern void *vmalloc_32_noprof(unsigned long s << 159 #define vmalloc_32(...) alloc_hooks(vm << 160 << 161 extern void *vmalloc_32_user_noprof(unsigned l << 162 #define vmalloc_32_user(...) alloc_hooks(vm << 163 << 164 extern void *__vmalloc_noprof(unsigned long si << 165 #define __vmalloc(...) alloc_hooks(__ << 166 << 167 extern void *__vmalloc_node_range_noprof(unsig << 168 unsigned long start, u << 169 pgprot_t prot, unsigne << 170 const void *caller) __ << 171 #define __vmalloc_node_range(...) alloc_ << 172 << 173 void *__vmalloc_node_noprof(unsigned long size << 174 int node, const void *caller) << 175 #define __vmalloc_node(...) alloc_hooks(__ << 176 << 177 void *vmalloc_huge_noprof(unsigned long size, << 178 #define vmalloc_huge(...) alloc_hooks(vm << 179 << 180 extern void *__vmalloc_array_noprof(size_t n, << 181 #define __vmalloc_array(...) alloc_hooks(__ << 182 << 183 extern void *vmalloc_array_noprof(size_t n, si << 184 #define vmalloc_array(...) alloc_hooks(vm << 185 << 186 extern void *__vcalloc_noprof(size_t n, size_t << 187 #define __vcalloc(...) alloc_hooks(__ << 188 << 189 extern void *vcalloc_noprof(size_t n, size_t s << 190 #define vcalloc(...) alloc_hooks(vc << 191 << 192 void * __must_check vrealloc_noprof(const void << 193 __realloc_size(2); << 194 #define vrealloc(...) alloc_hooks(vr << 195 << 196 extern void vfree(const void *addr); << 197 extern void vfree_atomic(const void *addr); << 198 << 199 extern void *vmap(struct page **pages, unsigne << 200 unsigned long flags, p << 201 void *vmap_pfn(unsigned long *pfns, unsigned i << 202 extern void vunmap(const void *addr); << 203 << 204 extern int remap_vmalloc_range_partial(struct << 205 unsigne << 206 unsigne << 207 << 208 extern int remap_vmalloc_range(struct vm_area_ << 209 << 210 << 211 /* << 212 * Architectures can set this mask to a combin << 213 * and let generic vmalloc and ioremap code kn << 214 * needs to be called. << 215 */ << 216 #ifndef ARCH_PAGE_TABLE_SYNC_MASK << 217 #define ARCH_PAGE_TABLE_SYNC_MASK 0 << 218 #endif << 219 32 220 /* 33 /* 221 * There is no default implementation for arch !! 34 * Allocate any pages 222 * relied upon the compiler to optimize calls << 223 * is 0. << 224 */ 35 */ 225 void arch_sync_kernel_mappings(unsigned long s !! 36 226 !! 37 static inline void * vmalloc (unsigned long size) 227 /* << 228 * Lowlevel-APIs (not for driver use!) << 229 */ << 230 << 231 static inline size_t get_vm_area_size(const st << 232 { << 233 if (!(area->flags & VM_NO_GUARD)) << 234 /* return actual size without << 235 return area->size - PAGE_SIZE; << 236 else << 237 return area->size; << 238 << 239 } << 240 << 241 extern struct vm_struct *get_vm_area(unsigned << 242 extern struct vm_struct *get_vm_area_caller(un << 243 unsign << 244 extern struct vm_struct *__get_vm_area_caller( << 245 unsign << 246 unsign << 247 const << 248 void free_vm_area(struct vm_struct *area); << 249 extern struct vm_struct *remove_vm_area(const << 250 extern struct vm_struct *find_vm_area(const vo << 251 struct vmap_area *find_vmap_area(unsigned long << 252 << 253 static inline bool is_vm_area_hugepages(const << 254 { 38 { 255 /* !! 39 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 256 * This may not 100% tell if the area << 257 * page table entries, if for some rea << 258 * larger sizes are available but deci << 259 * prevents that. This only indicates << 260 * allocated in the vmalloc layer. << 261 */ << 262 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC << 263 return find_vm_area(addr)->page_order << 264 #else << 265 return false; << 266 #endif << 267 } 40 } 268 41 269 #ifdef CONFIG_MMU << 270 int vm_area_map_pages(struct vm_struct *area, << 271 unsigned long end, struc << 272 void vm_area_unmap_pages(struct vm_struct *are << 273 unsigned long end); << 274 void vunmap_range(unsigned long addr, unsigned << 275 static inline void set_vm_flush_reset_perms(vo << 276 { << 277 struct vm_struct *vm = find_vm_area(ad << 278 << 279 if (vm) << 280 vm->flags |= VM_FLUSH_RESET_PE << 281 } << 282 << 283 #else << 284 static inline void set_vm_flush_reset_perms(vo << 285 { << 286 } << 287 #endif << 288 << 289 /* for /proc/kcore */ << 290 extern long vread_iter(struct iov_iter *iter, << 291 << 292 /* 42 /* 293 * Internals. Don't use.. !! 43 * Allocate ISA addressable pages for broke crap 294 */ 44 */ 295 extern __init void vm_area_add_early(struct vm << 296 extern __init void vm_area_register_early(stru << 297 45 298 #ifdef CONFIG_SMP !! 46 static inline void * vmalloc_dma (unsigned long size) 299 # ifdef CONFIG_MMU << 300 struct vm_struct **pcpu_get_vm_areas(const uns << 301 const siz << 302 size_t al << 303 << 304 void pcpu_free_vm_areas(struct vm_struct **vms << 305 # else << 306 static inline struct vm_struct ** << 307 pcpu_get_vm_areas(const unsigned long *offsets << 308 const size_t *sizes, int nr_vm << 309 size_t align) << 310 { 47 { 311 return NULL; !! 48 return __vmalloc(size, GFP_KERNEL|GFP_DMA, PAGE_KERNEL); 312 } 49 } 313 50 314 static inline void !! 51 /* 315 pcpu_free_vm_areas(struct vm_struct **vms, int !! 52 * vmalloc 32bit PA addressable pages - eg for PCI 32bit devices >> 53 */ >> 54 >> 55 static inline void * vmalloc_32(unsigned long size) 316 { 56 { >> 57 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 317 } 58 } 318 # endif << 319 #endif << 320 << 321 #ifdef CONFIG_MMU << 322 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_S << 323 #else << 324 #define VMALLOC_TOTAL 0UL << 325 #endif << 326 59 327 int register_vmap_purge_notifier(struct notifi !! 60 /* 328 int unregister_vmap_purge_notifier(struct noti !! 61 * vmlist_lock is a read-write spinlock that protects vmlist >> 62 * Used in mm/vmalloc.c (get_vm_area() and vfree()) and fs/proc/kcore.c. >> 63 */ >> 64 extern rwlock_t vmlist_lock; 329 65 330 #if defined(CONFIG_MMU) && defined(CONFIG_PRIN !! 66 extern struct vm_struct * vmlist; 331 bool vmalloc_dump_obj(void *object); << 332 #else << 333 static inline bool vmalloc_dump_obj(void *obje << 334 #endif 67 #endif 335 68 336 #endif /* _LINUX_VMALLOC_H */ << 337 69
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.