~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/nommu.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/nommu.c (Version linux-6.12-rc7) and /mm/nommu.c (Version linux-2.6.0)


  1 // SPDX-License-Identifier: GPL-2.0-only       << 
  2 /*                                                  1 /*
  3  *  linux/mm/nommu.c                                2  *  linux/mm/nommu.c
  4  *                                                  3  *
  5  *  Replacement code for mm functions to suppo      4  *  Replacement code for mm functions to support CPU's that don't
  6  *  have any form of memory management unit (t      5  *  have any form of memory management unit (thus no virtual memory).
  7  *                                                  6  *
  8  *  See Documentation/admin-guide/mm/nommu-mma << 
  9  *                                             << 
 10  *  Copyright (c) 2004-2008 David Howells <dho << 
 11  *  Copyright (c) 2000-2003 David McCullough <      7  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
 12  *  Copyright (c) 2000-2001 D Jeff Dionne <jef      8  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
 13  *  Copyright (c) 2002      Greg Ungerer <gerg      9  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
 14  *  Copyright (c) 2007-2010 Paul Mundt <lethal << 
 15  */                                                10  */
 16                                                    11 
 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt    << 
 18                                                << 
 19 #include <linux/export.h>                      << 
 20 #include <linux/mm.h>                              12 #include <linux/mm.h>
 21 #include <linux/sched/mm.h>                    << 
 22 #include <linux/mman.h>                            13 #include <linux/mman.h>
 23 #include <linux/swap.h>                            14 #include <linux/swap.h>
 24 #include <linux/file.h>                        !!  15 #include <linux/smp_lock.h>
 25 #include <linux/highmem.h>                         16 #include <linux/highmem.h>
 26 #include <linux/pagemap.h>                         17 #include <linux/pagemap.h>
 27 #include <linux/slab.h>                            18 #include <linux/slab.h>
 28 #include <linux/vmalloc.h>                         19 #include <linux/vmalloc.h>
 29 #include <linux/backing-dev.h>                 !!  20 #include <linux/blkdev.h>
 30 #include <linux/compiler.h>                    << 
 31 #include <linux/mount.h>                       << 
 32 #include <linux/personality.h>                 << 
 33 #include <linux/security.h>                    << 
 34 #include <linux/syscalls.h>                    << 
 35 #include <linux/audit.h>                       << 
 36 #include <linux/printk.h>                      << 
 37                                                    21 
 38 #include <linux/uaccess.h>                     !!  22 #include <asm/pgalloc.h>
 39 #include <linux/uio.h>                         !!  23 #include <asm/uaccess.h>
 40 #include <asm/tlb.h>                               24 #include <asm/tlb.h>
 41 #include <asm/tlbflush.h>                          25 #include <asm/tlbflush.h>
 42 #include <asm/mmu_context.h>                   << 
 43 #include "internal.h"                          << 
 44                                                    26 
 45 void *high_memory;                                 27 void *high_memory;
 46 EXPORT_SYMBOL(high_memory);                    !!  28 struct page *mem_map = NULL;
 47 struct page *mem_map;                          << 
 48 unsigned long max_mapnr;                           29 unsigned long max_mapnr;
 49 EXPORT_SYMBOL(max_mapnr);                      !!  30 unsigned long num_physpages;
 50 unsigned long highest_memmap_pfn;              !!  31 unsigned long askedalloc, realalloc;
 51 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIA !!  32 atomic_t vm_committed_space = ATOMIC_INIT(0);
 52 int heap_stack_gap = 0;                        !!  33 int sysctl_overcommit_memory = 0; /* default is heuristic overcommit */
 53                                                !!  34 int sysctl_overcommit_ratio = 50; /* default is 50% */
 54 atomic_long_t mmap_pages_allocated;            !!  35 
 55                                                !!  36 /*
 56 EXPORT_SYMBOL(mem_map);                        !!  37  * Handle all mappings that got truncated by a "truncate()"
 57                                                !!  38  * system call.
 58 /* list of mapped, potentially shareable regio !!  39  *
 59 static struct kmem_cache *vm_region_jar;       !!  40  * NOTE! We have to be ready to update the memory sharing
 60 struct rb_root nommu_region_tree = RB_ROOT;    !!  41  * between the file and the memory map for a potential last
 61 DECLARE_RWSEM(nommu_region_sem);               !!  42  * incomplete page.  Ugly, but necessary.
 62                                                !!  43  */
 63 const struct vm_operations_struct generic_file !!  44 int vmtruncate(struct inode *inode, loff_t offset)
 64 };                                             !!  45 {
                                                   >>  46         struct address_space *mapping = inode->i_mapping;
                                                   >>  47         unsigned long limit;
                                                   >>  48 
                                                   >>  49         if (inode->i_size < offset)
                                                   >>  50                 goto do_expand;
                                                   >>  51         i_size_write(inode, offset);
                                                   >>  52 
                                                   >>  53         truncate_inode_pages(mapping, offset);
                                                   >>  54         goto out_truncate;
                                                   >>  55 
                                                   >>  56 do_expand:
                                                   >>  57         limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
                                                   >>  58         if (limit != RLIM_INFINITY && offset > limit)
                                                   >>  59                 goto out_sig;
                                                   >>  60         if (offset > inode->i_sb->s_maxbytes)
                                                   >>  61                 goto out;
                                                   >>  62         i_size_write(inode, offset);
                                                   >>  63 
                                                   >>  64 out_truncate:
                                                   >>  65         if (inode->i_op && inode->i_op->truncate)
                                                   >>  66                 inode->i_op->truncate(inode);
                                                   >>  67         return 0;
                                                   >>  68 out_sig:
                                                   >>  69         send_sig(SIGXFSZ, current, 0);
                                                   >>  70 out:
                                                   >>  71         return -EFBIG;
                                                   >>  72 }
 65                                                    73 
 66 /*                                                 74 /*
 67  * Return the total memory allocated for this      75  * Return the total memory allocated for this pointer, not
 68  * just what the caller asked for.                 76  * just what the caller asked for.
 69  *                                                 77  *
 70  * Doesn't have to be accurate, i.e. may have      78  * Doesn't have to be accurate, i.e. may have races.
 71  */                                                79  */
 72 unsigned int kobjsize(const void *objp)            80 unsigned int kobjsize(const void *objp)
 73 {                                                  81 {
 74         struct page *page;                         82         struct page *page;
 75                                                    83 
 76         /*                                     !!  84         if (!objp || !((page = virt_to_page(objp))))
 77          * If the object we have should not ha << 
 78          * return size of 0                    << 
 79          */                                    << 
 80         if (!objp || !virt_addr_valid(objp))   << 
 81                 return 0;                          85                 return 0;
 82                                                    86 
 83         page = virt_to_head_page(objp);        << 
 84                                                << 
 85         /*                                     << 
 86          * If the allocator sets PageSlab, we  << 
 87          * kmalloc().                          << 
 88          */                                    << 
 89         if (PageSlab(page))                        87         if (PageSlab(page))
 90                 return ksize(objp);                88                 return ksize(objp);
 91                                                    89 
 92         /*                                     !!  90         BUG_ON(page->index < 0);
 93          * If it's not a compound page, see if !!  91         BUG_ON(page->index >= MAX_ORDER);
 94          * region. This test is intentionally  << 
 95          * so if there's no VMA, we still fall << 
 96          * PAGE_SIZE for 0-order pages.        << 
 97          */                                    << 
 98         if (!PageCompound(page)) {             << 
 99                 struct vm_area_struct *vma;    << 
100                                                << 
101                 vma = find_vma(current->mm, (u << 
102                 if (vma)                       << 
103                         return vma->vm_end - v << 
104         }                                      << 
105                                                    92 
106         /*                                     !!  93         return (PAGE_SIZE << page->index);
107          * The ksize() function is only guaran << 
108          * returned by kmalloc(). So handle ar << 
109          */                                    << 
110         return page_size(page);                << 
111 }                                              << 
112                                                << 
113 void vfree(const void *addr)                   << 
114 {                                              << 
115         kfree(addr);                           << 
116 }                                                  94 }
117 EXPORT_SYMBOL(vfree);                          << 
118                                                    95 
119 void *__vmalloc_noprof(unsigned long size, gfp !!  96 /*
120 {                                              !!  97  * The nommu dodgy version :-)
121         /*                                     !!  98  */
122          *  You can't specify __GFP_HIGHMEM wi !!  99 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
123          * returns only a logical address.     !! 100         unsigned long start, int len, int write, int force,
124          */                                    !! 101         struct page **pages, struct vm_area_struct **vmas)
125         return kmalloc_noprof(size, (gfp_mask  << 
126 }                                              << 
127 EXPORT_SYMBOL(__vmalloc_noprof);               << 
128                                                << 
129 void *vrealloc_noprof(const void *p, size_t si << 
130 {                                                 102 {
131         return krealloc_noprof(p, size, (flags !! 103         int i;
132 }                                              !! 104         static struct vm_area_struct dummy_vma;
133                                                   105 
134 void *__vmalloc_node_range_noprof(unsigned lon !! 106         for (i = 0; i < len; i++) {
135                 unsigned long start, unsigned  !! 107                 if (pages) {
136                 pgprot_t prot, unsigned long v !! 108                         pages[i] = virt_to_page(start);
137                 const void *caller)            !! 109                         if (pages[i])
138 {                                              !! 110                                 page_cache_get(pages[i]);
139         return __vmalloc_noprof(size, gfp_mask !! 111                 }
                                                   >> 112                 if (vmas)
                                                   >> 113                         vmas[i] = &dummy_vma;
                                                   >> 114                 start += PAGE_SIZE;
                                                   >> 115         }
                                                   >> 116         return(i);
140 }                                                 117 }
141                                                   118 
142 void *__vmalloc_node_noprof(unsigned long size !! 119 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
143                 int node, const void *caller)  !! 120 struct vm_struct *vmlist;
144 {                                              << 
145         return __vmalloc_noprof(size, gfp_mask << 
146 }                                              << 
147                                                   121 
148 static void *__vmalloc_user_flags(unsigned lon !! 122 void vfree(void *addr)
149 {                                                 123 {
150         void *ret;                             !! 124         kfree(addr);
151                                                << 
152         ret = __vmalloc(size, flags);          << 
153         if (ret) {                             << 
154                 struct vm_area_struct *vma;    << 
155                                                << 
156                 mmap_write_lock(current->mm);  << 
157                 vma = find_vma(current->mm, (u << 
158                 if (vma)                       << 
159                         vm_flags_set(vma, VM_U << 
160                 mmap_write_unlock(current->mm) << 
161         }                                      << 
162                                                << 
163         return ret;                            << 
164 }                                                 125 }
165                                                   126 
166 void *vmalloc_user_noprof(unsigned long size)  !! 127 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
167 {                                                 128 {
168         return __vmalloc_user_flags(size, GFP_ !! 129         /*
                                                   >> 130          * kmalloc doesn't like __GFP_HIGHMEM for some reason
                                                   >> 131          */
                                                   >> 132         return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
169 }                                                 133 }
170 EXPORT_SYMBOL(vmalloc_user_noprof);            << 
171                                                   134 
172 struct page *vmalloc_to_page(const void *addr) !! 135 struct page * vmalloc_to_page(void *addr)
173 {                                                 136 {
174         return virt_to_page(addr);                137         return virt_to_page(addr);
175 }                                                 138 }
176 EXPORT_SYMBOL(vmalloc_to_page);                << 
177                                                   139 
178 unsigned long vmalloc_to_pfn(const void *addr) !! 140 long vread(char *buf, char *addr, unsigned long count)
179 {                                                 141 {
180         return page_to_pfn(virt_to_page(addr)) !! 142         memcpy(buf, addr, count);
                                                   >> 143         return count;
181 }                                                 144 }
182 EXPORT_SYMBOL(vmalloc_to_pfn);                 << 
183                                                   145 
184 long vread_iter(struct iov_iter *iter, const c !! 146 long vwrite(char *buf, char *addr, unsigned long count)
185 {                                                 147 {
186         /* Don't allow overflow */                148         /* Don't allow overflow */
187         if ((unsigned long) addr + count < cou    149         if ((unsigned long) addr + count < count)
188                 count = -(unsigned long) addr;    150                 count = -(unsigned long) addr;
189                                                !! 151         
190         return copy_to_iter(addr, count, iter) !! 152         memcpy(addr, buf, count);
                                                   >> 153         return(count);
191 }                                                 154 }
192                                                   155 
193 /*                                                156 /*
194  *      vmalloc  -  allocate virtually contigu !! 157  *      vmalloc  -  allocate virtually continguos memory
195  *                                                158  *
196  *      @size:          allocation size           159  *      @size:          allocation size
197  *                                                160  *
198  *      Allocate enough pages to cover @size f    161  *      Allocate enough pages to cover @size from the page level
199  *      allocator and map them into contiguous !! 162  *      allocator and map them into continguos kernel virtual space.
200  *                                                163  *
201  *      For tight control over page level allo !! 164  *      For tight cotrol over page level allocator and protection flags
202  *      use __vmalloc() instead.                  165  *      use __vmalloc() instead.
203  */                                               166  */
204 void *vmalloc_noprof(unsigned long size)       !! 167 void *vmalloc(unsigned long size)
205 {                                                 168 {
206         return __vmalloc_noprof(size, GFP_KERN !! 169        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
207 }                                                 170 }
208 EXPORT_SYMBOL(vmalloc_noprof);                 << 
209                                                << 
210 void *vmalloc_huge_noprof(unsigned long size,  << 
211                                                   171 
212 /*                                                172 /*
213  *      vzalloc - allocate virtually contiguou !! 173  *      vmalloc_32  -  allocate virtually continguos memory (32bit addressable)
214  *                                                174  *
215  *      @size:          allocation size           175  *      @size:          allocation size
216  *                                                176  *
217  *      Allocate enough pages to cover @size f << 
218  *      allocator and map them into contiguous << 
219  *      The memory allocated is set to zero.   << 
220  *                                             << 
221  *      For tight control over page level allo << 
222  *      use __vmalloc() instead.               << 
223  */                                            << 
224 void *vzalloc_noprof(unsigned long size)       << 
225 {                                              << 
226         return __vmalloc_noprof(size, GFP_KERN << 
227 }                                              << 
228 EXPORT_SYMBOL(vzalloc_noprof);                 << 
229                                                << 
230 /**                                            << 
231  * vmalloc_node - allocate memory on a specifi << 
232  * @size:       allocation size                << 
233  * @node:       numa node                      << 
234  *                                             << 
235  * Allocate enough pages to cover @size from t << 
236  * allocator and map them into contiguous kern << 
237  *                                             << 
238  * For tight control over page level allocator << 
239  * use __vmalloc() instead.                    << 
240  */                                            << 
241 void *vmalloc_node_noprof(unsigned long size,  << 
242 {                                              << 
243         return vmalloc_noprof(size);           << 
244 }                                              << 
245 EXPORT_SYMBOL(vmalloc_node_noprof);            << 
246                                                << 
247 /**                                            << 
248  * vzalloc_node - allocate memory on a specifi << 
249  * @size:       allocation size                << 
250  * @node:       numa node                      << 
251  *                                             << 
252  * Allocate enough pages to cover @size from t << 
253  * allocator and map them into contiguous kern << 
254  * The memory allocated is set to zero.        << 
255  *                                             << 
256  * For tight control over page level allocator << 
257  * use __vmalloc() instead.                    << 
258  */                                            << 
259 void *vzalloc_node_noprof(unsigned long size,  << 
260 {                                              << 
261         return vzalloc_noprof(size);           << 
262 }                                              << 
263 EXPORT_SYMBOL(vzalloc_node_noprof);            << 
264                                                << 
265 /**                                            << 
266  * vmalloc_32  -  allocate virtually contiguou << 
267  *      @size:          allocation size        << 
268  *                                             << 
269  *      Allocate enough 32bit PA addressable p    177  *      Allocate enough 32bit PA addressable pages to cover @size from the
270  *      page level allocator and map them into !! 178  *      page level allocator and map them into continguos kernel virtual space.
271  */                                            << 
272 void *vmalloc_32_noprof(unsigned long size)    << 
273 {                                              << 
274         return __vmalloc_noprof(size, GFP_KERN << 
275 }                                              << 
276 EXPORT_SYMBOL(vmalloc_32_noprof);              << 
277                                                << 
278 /**                                            << 
279  * vmalloc_32_user - allocate zeroed virtually << 
280  *      @size:          allocation size        << 
281  *                                             << 
282  * The resulting memory area is 32bit addressa << 
283  * mapped to userspace without leaking data.   << 
284  *                                             << 
285  * VM_USERMAP is set on the corresponding VMA  << 
286  * remap_vmalloc_range() are permissible.      << 
287  */                                               179  */
288 void *vmalloc_32_user_noprof(unsigned long siz !! 180 void *vmalloc_32(unsigned long size)
289 {                                                 181 {
290         /*                                     !! 182         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
291          * We'll have to sort out the ZONE_DMA << 
292          * but for now this can simply use vma << 
293          */                                    << 
294         return vmalloc_user_noprof(size);      << 
295 }                                                 183 }
296 EXPORT_SYMBOL(vmalloc_32_user_noprof);         << 
297                                                   184 
298 void *vmap(struct page **pages, unsigned int c    185 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
299 {                                                 186 {
300         BUG();                                    187         BUG();
301         return NULL;                              188         return NULL;
302 }                                                 189 }
303 EXPORT_SYMBOL(vmap);                           << 
304                                                   190 
305 void vunmap(const void *addr)                  !! 191 void vunmap(void *addr)
306 {                                                 192 {
307         BUG();                                    193         BUG();
308 }                                                 194 }
309 EXPORT_SYMBOL(vunmap);                         << 
310                                                << 
311 void *vm_map_ram(struct page **pages, unsigned << 
312 {                                              << 
313         BUG();                                 << 
314         return NULL;                           << 
315 }                                              << 
316 EXPORT_SYMBOL(vm_map_ram);                     << 
317                                                << 
318 void vm_unmap_ram(const void *mem, unsigned in << 
319 {                                              << 
320         BUG();                                 << 
321 }                                              << 
322 EXPORT_SYMBOL(vm_unmap_ram);                   << 
323                                                << 
324 void vm_unmap_aliases(void)                    << 
325 {                                              << 
326 }                                              << 
327 EXPORT_SYMBOL_GPL(vm_unmap_aliases);           << 
328                                                << 
329 void free_vm_area(struct vm_struct *area)      << 
330 {                                              << 
331         BUG();                                 << 
332 }                                              << 
333 EXPORT_SYMBOL_GPL(free_vm_area);               << 
334                                                << 
335 int vm_insert_page(struct vm_area_struct *vma, << 
336                    struct page *page)          << 
337 {                                              << 
338         return -EINVAL;                        << 
339 }                                              << 
340 EXPORT_SYMBOL(vm_insert_page);                 << 
341                                                << 
342 int vm_insert_pages(struct vm_area_struct *vma << 
343                         struct page **pages, u << 
344 {                                              << 
345         return -EINVAL;                        << 
346 }                                              << 
347 EXPORT_SYMBOL(vm_insert_pages);                << 
348                                                << 
349 int vm_map_pages(struct vm_area_struct *vma, s << 
350                         unsigned long num)     << 
351 {                                              << 
352         return -EINVAL;                        << 
353 }                                              << 
354 EXPORT_SYMBOL(vm_map_pages);                   << 
355                                                << 
356 int vm_map_pages_zero(struct vm_area_struct *v << 
357                                 unsigned long  << 
358 {                                              << 
359         return -EINVAL;                        << 
360 }                                              << 
361 EXPORT_SYMBOL(vm_map_pages_zero);              << 
362                                                   195 
363 /*                                                196 /*
364  *  sys_brk() for the most part doesn't need t    197  *  sys_brk() for the most part doesn't need the global kernel
365  *  lock, except when an application is doing     198  *  lock, except when an application is doing something nasty
366  *  like trying to un-brk an area that has alr    199  *  like trying to un-brk an area that has already been mapped
367  *  to a regular file.  in this case, the unma    200  *  to a regular file.  in this case, the unmapping will need
368  *  to invoke file system routines that need t    201  *  to invoke file system routines that need the global lock.
369  */                                               202  */
370 SYSCALL_DEFINE1(brk, unsigned long, brk)       !! 203 asmlinkage unsigned long sys_brk(unsigned long brk)
371 {                                                 204 {
372         struct mm_struct *mm = current->mm;       205         struct mm_struct *mm = current->mm;
373                                                   206 
374         if (brk < mm->start_brk || brk > mm->c !! 207         if (brk < mm->end_code || brk < mm->start_brk || brk > mm->context.end_brk)
375                 return mm->brk;                   208                 return mm->brk;
376                                                   209 
377         if (mm->brk == brk)                       210         if (mm->brk == brk)
378                 return mm->brk;                   211                 return mm->brk;
379                                                   212 
380         /*                                        213         /*
381          * Always allow shrinking brk             214          * Always allow shrinking brk
382          */                                       215          */
383         if (brk <= mm->brk) {                     216         if (brk <= mm->brk) {
384                 mm->brk = brk;                    217                 mm->brk = brk;
385                 return brk;                       218                 return brk;
386         }                                         219         }
387                                                   220 
388         /*                                        221         /*
389          * Ok, looks good - let it rip.           222          * Ok, looks good - let it rip.
390          */                                       223          */
391         flush_icache_user_range(mm->brk, brk); << 
392         return mm->brk = brk;                     224         return mm->brk = brk;
393 }                                                 225 }
394                                                   226 
395 /*                                                227 /*
396  * initialise the percpu counter for VM and re !! 228  * Combine the mmap "prot" and "flags" argument into one "vm_flags" used
397  */                                            !! 229  * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
398 void __init mmap_init(void)                    !! 230  * into "VM_xxx".
399 {                                              !! 231  */
400         int ret;                               !! 232 static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flags)
401                                                !! 233 {
402         ret = percpu_counter_init(&vm_committe !! 234 #define _trans(x,bit1,bit2) \
403         VM_BUG_ON(ret);                        !! 235 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
404         vm_region_jar = KMEM_CACHE(vm_region,  !! 236 
405 }                                              !! 237         unsigned long prot_bits, flag_bits;
406                                                !! 238         prot_bits =
407 /*                                             !! 239                 _trans(prot, PROT_READ, VM_READ) |
408  * validate the region tree                    !! 240                 _trans(prot, PROT_WRITE, VM_WRITE) |
409  * - the caller must hold the region lock      !! 241                 _trans(prot, PROT_EXEC, VM_EXEC);
410  */                                            !! 242         flag_bits =
411 #ifdef CONFIG_DEBUG_NOMMU_REGIONS              !! 243                 _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
412 static noinline void validate_nommu_regions(vo !! 244                 _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
413 {                                              !! 245                 _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
414         struct vm_region *region, *last;       !! 246         return prot_bits | flag_bits;
415         struct rb_node *p, *lastp;             !! 247 #undef _trans
416                                                !! 248 }
417         lastp = rb_first(&nommu_region_tree);  !! 249 
418         if (!lastp)                            !! 250 #ifdef DEBUG
419                 return;                        !! 251 static void show_process_blocks(void)
420                                                !! 252 {
421         last = rb_entry(lastp, struct vm_regio !! 253         struct mm_tblock_struct *tblock;
422         BUG_ON(last->vm_end <= last->vm_start) !! 254 
423         BUG_ON(last->vm_top < last->vm_end);   !! 255         printk("Process blocks %d:", current->pid);
424                                                !! 256 
425         while ((p = rb_next(lastp))) {         !! 257         for (tblock = &current->mm->context.tblock; tblock; tblock = tblock->next) {
426                 region = rb_entry(p, struct vm !! 258                 printk(" %p: %p", tblock, tblock->rblock);
427                 last = rb_entry(lastp, struct  !! 259                 if (tblock->rblock)
428                                                !! 260                         printk(" (%d @%p #%d)", kobjsize(tblock->rblock->kblock), tblock->rblock->kblock, tblock->rblock->refcount);
429                 BUG_ON(region->vm_end <= regio !! 261                 printk(tblock->next ? " ->" : ".\n");
430                 BUG_ON(region->vm_top < region !! 262         }
431                 BUG_ON(region->vm_start < last !! 263 }
432                                                !! 264 #endif /* DEBUG */
433                 lastp = p;                     !! 265 
434         }                                      !! 266 unsigned long do_mmap_pgoff(
435 }                                              !! 267         struct file * file,
436 #else                                          !! 268         unsigned long addr,
437 static void validate_nommu_regions(void)       !! 269         unsigned long len,
438 {                                              !! 270         unsigned long prot,
439 }                                              !! 271         unsigned long flags,
440 #endif                                         !! 272         unsigned long pgoff)
441                                                !! 273 {
442 /*                                             !! 274         void * result;
443  * add a region into the global tree           !! 275         struct mm_tblock_struct * tblock;
444  */                                            !! 276         unsigned int vm_flags;
445 static void add_nommu_region(struct vm_region  << 
446 {                                              << 
447         struct vm_region *pregion;             << 
448         struct rb_node **p, *parent;           << 
449                                                << 
450         validate_nommu_regions();              << 
451                                                << 
452         parent = NULL;                         << 
453         p = &nommu_region_tree.rb_node;        << 
454         while (*p) {                           << 
455                 parent = *p;                   << 
456                 pregion = rb_entry(parent, str << 
457                 if (region->vm_start < pregion << 
458                         p = &(*p)->rb_left;    << 
459                 else if (region->vm_start > pr << 
460                         p = &(*p)->rb_right;   << 
461                 else if (pregion == region)    << 
462                         return;                << 
463                 else                           << 
464                         BUG();                 << 
465         }                                      << 
466                                                << 
467         rb_link_node(&region->vm_rb, parent, p << 
468         rb_insert_color(&region->vm_rb, &nommu << 
469                                                << 
470         validate_nommu_regions();              << 
471 }                                              << 
472                                                << 
473 /*                                             << 
474  * delete a region from the global tree        << 
475  */                                            << 
476 static void delete_nommu_region(struct vm_regi << 
477 {                                              << 
478         BUG_ON(!nommu_region_tree.rb_node);    << 
479                                                << 
480         validate_nommu_regions();              << 
481         rb_erase(&region->vm_rb, &nommu_region << 
482         validate_nommu_regions();              << 
483 }                                              << 
484                                                << 
485 /*                                             << 
486  * free a contiguous series of pages           << 
487  */                                            << 
488 static void free_page_series(unsigned long fro << 
489 {                                              << 
490         for (; from < to; from += PAGE_SIZE) { << 
491                 struct page *page = virt_to_pa << 
492                                                << 
493                 atomic_long_dec(&mmap_pages_al << 
494                 put_page(page);                << 
495         }                                      << 
496 }                                              << 
497                                                << 
498 /*                                             << 
499  * release a reference to a region             << 
500  * - the caller must hold the region semaphore << 
501  * - the region may not have been added to the << 
502  *   will equal vm_start                       << 
503  */                                            << 
504 static void __put_nommu_region(struct vm_regio << 
505         __releases(nommu_region_sem)           << 
506 {                                              << 
507         BUG_ON(!nommu_region_tree.rb_node);    << 
508                                                << 
509         if (--region->vm_usage == 0) {         << 
510                 if (region->vm_top > region->v << 
511                         delete_nommu_region(re << 
512                 up_write(&nommu_region_sem);   << 
513                                                << 
514                 if (region->vm_file)           << 
515                         fput(region->vm_file); << 
516                                                << 
517                 /* IO memory and memory shared << 
518                  * from ramfs/tmpfs mustn't be << 
519                 if (region->vm_flags & VM_MAPP << 
520                         free_page_series(regio << 
521                 kmem_cache_free(vm_region_jar, << 
522         } else {                               << 
523                 up_write(&nommu_region_sem);   << 
524         }                                      << 
525 }                                              << 
526                                                << 
527 /*                                             << 
528  * release a reference to a region             << 
529  */                                            << 
530 static void put_nommu_region(struct vm_region  << 
531 {                                              << 
532         down_write(&nommu_region_sem);         << 
533         __put_nommu_region(region);            << 
534 }                                              << 
535                                                << 
536 static void setup_vma_to_mm(struct vm_area_str << 
537 {                                              << 
538         vma->vm_mm = mm;                       << 
539                                                << 
540         /* add the VMA to the mapping */       << 
541         if (vma->vm_file) {                    << 
542                 struct address_space *mapping  << 
543                                                << 
544                 i_mmap_lock_write(mapping);    << 
545                 flush_dcache_mmap_lock(mapping << 
546                 vma_interval_tree_insert(vma,  << 
547                 flush_dcache_mmap_unlock(mappi << 
548                 i_mmap_unlock_write(mapping);  << 
549         }                                      << 
550 }                                              << 
551                                                << 
552 static void cleanup_vma_from_mm(struct vm_area << 
553 {                                              << 
554         vma->vm_mm->map_count--;               << 
555         /* remove the VMA from the mapping */  << 
556         if (vma->vm_file) {                    << 
557                 struct address_space *mapping; << 
558                 mapping = vma->vm_file->f_mapp << 
559                                                << 
560                 i_mmap_lock_write(mapping);    << 
561                 flush_dcache_mmap_lock(mapping << 
562                 vma_interval_tree_remove(vma,  << 
563                 flush_dcache_mmap_unlock(mappi << 
564                 i_mmap_unlock_write(mapping);  << 
565         }                                      << 
566 }                                              << 
567                                                   277 
568 /*                                             !! 278         /*
569  * delete a VMA from its owning mm_struct and  !! 279          * Get the !CONFIG_MMU specific checks done first
570  */                                            !! 280          */
571 static int delete_vma_from_mm(struct vm_area_s !! 281         if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file)) {
572 {                                              !! 282                 printk("MAP_SHARED not supported (cannot write mappings to disk)\n");
573         VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_ << 
574                                                << 
575         vma_iter_config(&vmi, vma->vm_start, v << 
576         if (vma_iter_prealloc(&vmi, vma)) {    << 
577                 pr_warn("Allocation of vma tre << 
578                        current->pid);          << 
579                 return -ENOMEM;                << 
580         }                                      << 
581         cleanup_vma_from_mm(vma);              << 
582                                                << 
583         /* remove from the MM's tree and list  << 
584         vma_iter_clear(&vmi);                  << 
585         return 0;                              << 
586 }                                              << 
587 /*                                             << 
588  * destroy a VMA record                        << 
589  */                                            << 
590 static void delete_vma(struct mm_struct *mm, s << 
591 {                                              << 
592         vma_close(vma);                        << 
593         if (vma->vm_file)                      << 
594                 fput(vma->vm_file);            << 
595         put_nommu_region(vma->vm_region);      << 
596         vm_area_free(vma);                     << 
597 }                                              << 
598                                                << 
599 struct vm_area_struct *find_vma_intersection(s << 
600                                              u << 
601                                              u << 
602 {                                              << 
603         unsigned long index = start_addr;      << 
604                                                << 
605         mmap_assert_locked(mm);                << 
606         return mt_find(&mm->mm_mt, &index, end << 
607 }                                              << 
608 EXPORT_SYMBOL(find_vma_intersection);          << 
609                                                << 
610 /*                                             << 
611  * look up the first VMA in which addr resides << 
612  * - should be called with mm->mmap_lock at le << 
613  */                                            << 
614 struct vm_area_struct *find_vma(struct mm_stru << 
615 {                                              << 
616         VMA_ITERATOR(vmi, mm, addr);           << 
617                                                << 
618         return vma_iter_load(&vmi);            << 
619 }                                              << 
620 EXPORT_SYMBOL(find_vma);                       << 
621                                                << 
622 /*                                             << 
623  * At least xtensa ends up having protection f << 
624  * MMU.. No stack expansion, at least.         << 
625  */                                            << 
626 struct vm_area_struct *lock_mm_and_find_vma(st << 
627                         unsigned long addr, st << 
628 {                                              << 
629         struct vm_area_struct *vma;            << 
630                                                << 
631         mmap_read_lock(mm);                    << 
632         vma = vma_lookup(mm, addr);            << 
633         if (!vma)                              << 
634                 mmap_read_unlock(mm);          << 
635         return vma;                            << 
636 }                                              << 
637                                                << 
638 /*                                             << 
639  * expand a stack to a given address           << 
640  * - not supported under NOMMU conditions      << 
641  */                                            << 
642 int expand_stack_locked(struct vm_area_struct  << 
643 {                                              << 
644         return -ENOMEM;                        << 
645 }                                              << 
646                                                << 
647 struct vm_area_struct *expand_stack(struct mm_ << 
648 {                                              << 
649         mmap_read_unlock(mm);                  << 
650         return NULL;                           << 
651 }                                              << 
652                                                << 
653 /*                                             << 
654  * look up the first VMA exactly that exactly  << 
655  * - should be called with mm->mmap_lock at le << 
656  */                                            << 
657 static struct vm_area_struct *find_vma_exact(s << 
658                                              u << 
659                                              u << 
660 {                                              << 
661         struct vm_area_struct *vma;            << 
662         unsigned long end = addr + len;        << 
663         VMA_ITERATOR(vmi, mm, addr);           << 
664                                                << 
665         vma = vma_iter_load(&vmi);             << 
666         if (!vma)                              << 
667                 return NULL;                   << 
668         if (vma->vm_start != addr)             << 
669                 return NULL;                   << 
670         if (vma->vm_end != end)                << 
671                 return NULL;                   << 
672                                                << 
673         return vma;                            << 
674 }                                              << 
675                                                << 
676 /*                                             << 
677  * determine whether a mapping should be permi << 
678  * mapping we're capable of supporting         << 
679  */                                            << 
680 static int validate_mmap_request(struct file * << 
681                                  unsigned long << 
682                                  unsigned long << 
683                                  unsigned long << 
684                                  unsigned long << 
685                                  unsigned long << 
686                                  unsigned long << 
687 {                                              << 
688         unsigned long capabilities, rlen;      << 
689         int ret;                               << 
690                                                << 
691         /* do the simple checks first */       << 
692         if (flags & MAP_FIXED)                 << 
693                 return -EINVAL;                << 
694                                                << 
695         if ((flags & MAP_TYPE) != MAP_PRIVATE  << 
696             (flags & MAP_TYPE) != MAP_SHARED)  << 
697                 return -EINVAL;                << 
698                                                << 
699         if (!len)                              << 
700                 return -EINVAL;                   283                 return -EINVAL;
701                                                << 
702         /* Careful about overflows.. */        << 
703         rlen = PAGE_ALIGN(len);                << 
704         if (!rlen || rlen > TASK_SIZE)         << 
705                 return -ENOMEM;                << 
706                                                << 
707         /* offset overflow? */                 << 
708         if ((pgoff + (rlen >> PAGE_SHIFT)) < p << 
709                 return -EOVERFLOW;             << 
710                                                << 
711         if (file) {                            << 
712                 /* files must support mmap */  << 
713                 if (!file->f_op->mmap)         << 
714                         return -ENODEV;        << 
715                                                << 
716                 /* work out if what we've got  << 
717                  * - we support chardevs that  << 
718                  * - we support files/blockdev << 
719                  */                            << 
720                 if (file->f_op->mmap_capabilit << 
721                         capabilities = file->f << 
722                 } else {                       << 
723                         /* no explicit capabil << 
724                          * defaults */         << 
725                         switch (file_inode(fil << 
726                         case S_IFREG:          << 
727                         case S_IFBLK:          << 
728                                 capabilities = << 
729                                 break;         << 
730                                                << 
731                         case S_IFCHR:          << 
732                                 capabilities = << 
733                                         NOMMU_ << 
734                                         NOMMU_ << 
735                                         NOMMU_ << 
736                                 break;         << 
737                                                << 
738                         default:               << 
739                                 return -EINVAL << 
740                         }                      << 
741                 }                              << 
742                                                << 
743                 /* eliminate any capabilities  << 
744                  * device */                   << 
745                 if (!file->f_op->get_unmapped_ << 
746                         capabilities &= ~NOMMU << 
747                 if (!(file->f_mode & FMODE_CAN << 
748                         capabilities &= ~NOMMU << 
749                                                << 
750                 /* The file shall have been op << 
751                 if (!(file->f_mode & FMODE_REA << 
752                         return -EACCES;        << 
753                                                << 
754                 if (flags & MAP_SHARED) {      << 
755                         /* do checks for writi << 
756                         if ((prot & PROT_WRITE << 
757                             !(file->f_mode & F << 
758                                 return -EACCES << 
759                                                << 
760                         if (IS_APPEND(file_ino << 
761                             (file->f_mode & FM << 
762                                 return -EACCES << 
763                                                << 
764                         if (!(capabilities & N << 
765                                 return -ENODEV << 
766                                                << 
767                         /* we mustn't privatis << 
768                         capabilities &= ~NOMMU << 
769                 } else {                       << 
770                         /* we're going to read << 
771                          * allocate */         << 
772                         if (!(capabilities & N << 
773                                 return -ENODEV << 
774                                                << 
775                         /* we don't permit a p << 
776                          * shared with the bac << 
777                         if (prot & PROT_WRITE) << 
778                                 capabilities & << 
779                 }                              << 
780                                                << 
781                 if (capabilities & NOMMU_MAP_D << 
782                         if (((prot & PROT_READ << 
783                             ((prot & PROT_WRIT << 
784                             ((prot & PROT_EXEC << 
785                             ) {                << 
786                                 capabilities & << 
787                                 if (flags & MA << 
788                                         pr_war << 
789                                         return << 
790                                 }              << 
791                         }                      << 
792                 }                              << 
793                                                << 
794                 /* handle executable mappings  << 
795                  * mappings */                 << 
796                 if (path_noexec(&file->f_path) << 
797                         if (prot & PROT_EXEC)  << 
798                                 return -EPERM; << 
799                 } else if ((prot & PROT_READ)  << 
800                         /* handle implication  << 
801                         if (current->personali << 
802                                 if (capabiliti << 
803                                         prot | << 
804                         }                      << 
805                 } else if ((prot & PROT_READ)  << 
806                          (prot & PROT_EXEC) && << 
807                          !(capabilities & NOMM << 
808                          ) {                   << 
809                         /* backing file is not << 
810                         capabilities &= ~NOMMU << 
811                 }                              << 
812         } else {                               << 
813                 /* anonymous mappings are alwa << 
814                  * privately mapped            << 
815                  */                            << 
816                 capabilities = NOMMU_MAP_COPY; << 
817                                                << 
818                 /* handle PROT_EXEC implicatio << 
819                 if ((prot & PROT_READ) &&      << 
820                     (current->personality & RE << 
821                         prot |= PROT_EXEC;     << 
822         }                                      << 
823                                                << 
824         /* allow the security API to have its  << 
825         ret = security_mmap_addr(addr);        << 
826         if (ret < 0)                           << 
827                 return ret;                    << 
828                                                << 
829         /* looks okay */                       << 
830         *_capabilities = capabilities;         << 
831         return 0;                              << 
832 }                                              << 
833                                                << 
834 /*                                             << 
835  * we've determined that we can make the mappi << 
836  * now know into VMA flags                     << 
837  */                                            << 
838 static unsigned long determine_vm_flags(struct << 
839                                         unsign << 
840                                         unsign << 
841                                         unsign << 
842 {                                              << 
843         unsigned long vm_flags;                << 
844                                                << 
845         vm_flags = calc_vm_prot_bits(prot, 0)  << 
846                                                << 
847         if (!file) {                           << 
848                 /*                             << 
849                  * MAP_ANONYMOUS. MAP_SHARED i << 
850                  * there is no fork().         << 
851                  */                            << 
852                 vm_flags |= VM_MAYREAD | VM_MA << 
853         } else if (flags & MAP_PRIVATE) {      << 
854                 /* MAP_PRIVATE file mapping */ << 
855                 if (capabilities & NOMMU_MAP_D << 
856                         vm_flags |= (capabilit << 
857                 else                           << 
858                         vm_flags |= VM_MAYREAD << 
859                                                << 
860                 if (!(prot & PROT_WRITE) && !c << 
861                         /*                     << 
862                          * R/O private file ma << 
863                          * modify memory, espe << 
864                          * (e.g., set breakpoi << 
865                          * permissions (no mpr << 
866                          * the file mapping, w << 
867                          * ramfs/tmpfs/shmfs a << 
868                          */                    << 
869                         vm_flags |= VM_MAYOVER << 
870         } else {                               << 
871                 /* MAP_SHARED file mapping: NO << 
872                 vm_flags |= VM_SHARED | VM_MAY << 
873                             (capabilities & NO << 
874         }                                         284         }
875                                                !! 285         
876         return vm_flags;                       !! 286         if ((prot & PROT_WRITE) && (flags & MAP_PRIVATE)) {
877 }                                              !! 287                 printk("Private writable mappings not supported\n");
878                                                !! 288                 return -EINVAL;
879 /*                                             << 
880  * set up a shared mapping on a file (the driv << 
881  * pins the storage)                           << 
882  */                                            << 
883 static int do_mmap_shared_file(struct vm_area_ << 
884 {                                              << 
885         int ret;                               << 
886                                                << 
887         ret = mmap_file(vma->vm_file, vma);    << 
888         if (ret == 0) {                        << 
889                 vma->vm_region->vm_top = vma-> << 
890                 return 0;                      << 
891         }                                         289         }
892         if (ret != -ENOSYS)                    !! 290         
893                 return ret;                    << 
894                                                << 
895         /* getting -ENOSYS indicates that dire << 
896          * opposed to tried but failed) so we  << 
897          * it's not possible to make a private << 
898         return -ENODEV;                        << 
899 }                                              << 
900                                                << 
901 /*                                             << 
902  * set up a private mapping or an anonymous sh << 
903  */                                            << 
904 static int do_mmap_private(struct vm_area_stru << 
905                            struct vm_region *r << 
906                            unsigned long len,  << 
907                            unsigned long capab << 
908 {                                              << 
909         unsigned long total, point;            << 
910         void *base;                            << 
911         int ret, order;                        << 
912                                                << 
913         /*                                        291         /*
914          * Invoke the file's mapping function  !! 292          *      now all the standard checks
915          * shared mappings on devices or memor << 
916          * it may attempt to share, which will << 
917          * happy.                              << 
918          */                                    << 
919         if (capabilities & NOMMU_MAP_DIRECT) { << 
920                 ret = mmap_file(vma->vm_file,  << 
921                 /* shouldn't return success if << 
922                 if (WARN_ON_ONCE(!is_nommu_sha << 
923                         ret = -ENOSYS;         << 
924                 if (ret == 0) {                << 
925                         vma->vm_region->vm_top << 
926                         return 0;              << 
927                 }                              << 
928                 if (ret != -ENOSYS)            << 
929                         return ret;            << 
930                                                << 
931                 /* getting an ENOSYS error ind << 
932                  * possible (as opposed to tri << 
933                  * make a private copy of the  << 
934         }                                      << 
935                                                << 
936                                                << 
937         /* allocate some memory to hold the ma << 
938          * - note that this may not return a p << 
939          *   we're allocating is smaller than  << 
940          */                                       293          */
941         order = get_order(len);                !! 294         if (file && (!file->f_op || !file->f_op->mmap))
942         total = 1 << order;                    !! 295                 return -ENODEV;
943         point = len >> PAGE_SHIFT;             << 
944                                                << 
945         /* we don't want to allocate a power-o << 
946         if (sysctl_nr_trim_pages && total - po << 
947                 total = point;                 << 
948                                                << 
949         base = alloc_pages_exact(total << PAGE << 
950         if (!base)                             << 
951                 goto enomem;                   << 
952                                                << 
953         atomic_long_add(total, &mmap_pages_all << 
954                                                << 
955         vm_flags_set(vma, VM_MAPPED_COPY);     << 
956         region->vm_flags = vma->vm_flags;      << 
957         region->vm_start = (unsigned long) bas << 
958         region->vm_end   = region->vm_start +  << 
959         region->vm_top   = region->vm_start +  << 
960                                                << 
961         vma->vm_start = region->vm_start;      << 
962         vma->vm_end   = region->vm_start + len << 
963                                                << 
964         if (vma->vm_file) {                    << 
965                 /* read the contents of a file << 
966                 loff_t fpos;                   << 
967                                                << 
968                 fpos = vma->vm_pgoff;          << 
969                 fpos <<= PAGE_SHIFT;           << 
970                                                << 
971                 ret = kernel_read(vma->vm_file << 
972                 if (ret < 0)                   << 
973                         goto error_free;       << 
974                                                << 
975                 /* clear the last little bit * << 
976                 if (ret < len)                 << 
977                         memset(base + ret, 0,  << 
978                                                << 
979         } else {                               << 
980                 vma_set_anonymous(vma);        << 
981         }                                      << 
982                                                << 
983         return 0;                              << 
984                                                << 
985 error_free:                                    << 
986         free_page_series(region->vm_start, reg << 
987         region->vm_start = vma->vm_start = 0;  << 
988         region->vm_end   = vma->vm_end = 0;    << 
989         region->vm_top   = 0;                  << 
990         return ret;                            << 
991                                                << 
992 enomem:                                        << 
993         pr_err("Allocation of length %lu from  << 
994                len, current->pid, current->com << 
995         show_mem();                            << 
996         return -ENOMEM;                        << 
997 }                                              << 
998                                                << 
999 /*                                             << 
1000  * handle mapping creation for uClinux        << 
1001  */                                           << 
1002 unsigned long do_mmap(struct file *file,      << 
1003                         unsigned long addr,   << 
1004                         unsigned long len,    << 
1005                         unsigned long prot,   << 
1006                         unsigned long flags,  << 
1007                         vm_flags_t vm_flags,  << 
1008                         unsigned long pgoff,  << 
1009                         unsigned long *popula << 
1010                         struct list_head *uf) << 
1011 {                                             << 
1012         struct vm_area_struct *vma;           << 
1013         struct vm_region *region;             << 
1014         struct rb_node *rb;                   << 
1015         unsigned long capabilities, result;   << 
1016         int ret;                              << 
1017         VMA_ITERATOR(vmi, current->mm, 0);    << 
1018                                               << 
1019         *populate = 0;                        << 
1020                                                  296 
1021         /* decide whether we should attempt t !! 297         if (PAGE_ALIGN(len) == 0)
1022          * mapping */                         !! 298                 return addr;
1023         ret = validate_mmap_request(file, add << 
1024                                     &capabili << 
1025         if (ret < 0)                          << 
1026                 return ret;                   << 
1027                                                  299 
1028         /* we ignore the address hint */      !! 300         if (len > TASK_SIZE)
1029         addr = 0;                             !! 301                 return -EINVAL;
1030         len = PAGE_ALIGN(len);                << 
1031                                               << 
1032         /* we've determined that we can make  << 
1033          * now know into VMA flags */         << 
1034         vm_flags |= determine_vm_flags(file,  << 
1035                                               << 
1036                                               << 
1037         /* we're going to need to record the  << 
1038         region = kmem_cache_zalloc(vm_region_ << 
1039         if (!region)                          << 
1040                 goto error_getting_region;    << 
1041                                               << 
1042         vma = vm_area_alloc(current->mm);     << 
1043         if (!vma)                             << 
1044                 goto error_getting_vma;       << 
1045                                               << 
1046         region->vm_usage = 1;                 << 
1047         region->vm_flags = vm_flags;          << 
1048         region->vm_pgoff = pgoff;             << 
1049                                               << 
1050         vm_flags_init(vma, vm_flags);         << 
1051         vma->vm_pgoff = pgoff;                << 
1052                                               << 
1053         if (file) {                           << 
1054                 region->vm_file = get_file(fi << 
1055                 vma->vm_file = get_file(file) << 
1056         }                                     << 
1057                                                  302 
1058         down_write(&nommu_region_sem);        !! 303         /* offset overflow? */
                                                   >> 304         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
                                                   >> 305                 return -EINVAL;
1059                                                  306 
1060         /* if we want to share, we need to ch !! 307         /* Do simple checking here so the lower-level routines won't have
1061          * mmap() calls that overlap with our !! 308          * to. we assume access permissions have been handled by the open
1062          * - we can only share with a superse !! 309          * of the memory object, so we don't do any here.
1063          * - shared mappings on character dev << 
1064          *   permitted to overlap inexactly a << 
1065          *   these cases, sharing is handled  << 
1066          *   than here                        << 
1067          */                                      310          */
1068         if (is_nommu_shared_mapping(vm_flags) !! 311         vm_flags = calc_vm_flags(prot,flags) /* | mm->def_flags */ | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1069                 struct vm_region *pregion;    << 
1070                 unsigned long pglen, rpglen,  << 
1071                                               << 
1072                 pglen = (len + PAGE_SIZE - 1) << 
1073                 pgend = pgoff + pglen;        << 
1074                                               << 
1075                 for (rb = rb_first(&nommu_reg << 
1076                         pregion = rb_entry(rb << 
1077                                               << 
1078                         if (!is_nommu_shared_ << 
1079                                 continue;     << 
1080                                               << 
1081                         /* search for overlap << 
1082                         if (file_inode(pregio << 
1083                             file_inode(file)) << 
1084                                 continue;     << 
1085                                               << 
1086                         if (pregion->vm_pgoff << 
1087                                 continue;     << 
1088                                               << 
1089                         rpglen = pregion->vm_ << 
1090                         rpglen = (rpglen + PA << 
1091                         rpgend = pregion->vm_ << 
1092                         if (pgoff >= rpgend)  << 
1093                                 continue;     << 
1094                                               << 
1095                         /* handle inexactly o << 
1096                          * mappings */        << 
1097                         if ((pregion->vm_pgof << 
1098                             !(pgoff >= pregio << 
1099                                 /* new mappin << 
1100                                 if (!(capabil << 
1101                                         goto  << 
1102                                 continue;     << 
1103                         }                     << 
1104                                                  312 
1105                         /* we've found a regi !! 313         /*
1106                         pregion->vm_usage++;  !! 314          * determine the object being mapped and call the appropriate
1107                         vma->vm_region = preg !! 315          * specific mapper. 
1108                         start = pregion->vm_s << 
1109                         start += (pgoff - pre << 
1110                         vma->vm_start = start << 
1111                         vma->vm_end = start + << 
1112                                               << 
1113                         if (pregion->vm_flags << 
1114                                 vm_flags_set( << 
1115                         else {                << 
1116                                 ret = do_mmap << 
1117                                 if (ret < 0)  << 
1118                                         vma-> << 
1119                                         vma-> << 
1120                                         vma-> << 
1121                                         pregi << 
1122                                         pregi << 
1123                                         goto  << 
1124                                 }             << 
1125                         }                     << 
1126                         fput(region->vm_file) << 
1127                         kmem_cache_free(vm_re << 
1128                         region = pregion;     << 
1129                         result = start;       << 
1130                         goto share;           << 
1131                 }                             << 
1132                                               << 
1133                 /* obtain the address at whic << 
1134                  * - this is the hook for qua << 
1135                  *   tell us the location of  << 
1136                  */                           << 
1137                 if (capabilities & NOMMU_MAP_ << 
1138                         addr = file->f_op->ge << 
1139                                               << 
1140                         if (IS_ERR_VALUE(addr << 
1141                                 ret = addr;   << 
1142                                 if (ret != -E << 
1143                                         goto  << 
1144                                               << 
1145                                 /* the driver << 
1146                                  * the mappin << 
1147                                  * it */      << 
1148                                 ret = -ENODEV << 
1149                                 if (!(capabil << 
1150                                         goto  << 
1151                                               << 
1152                                 capabilities  << 
1153                         } else {              << 
1154                                 vma->vm_start << 
1155                                 vma->vm_end = << 
1156                         }                     << 
1157                 }                             << 
1158         }                                     << 
1159                                               << 
1160         vma->vm_region = region;              << 
1161                                               << 
1162         /* set up the mapping                 << 
1163          * - the region is filled in if NOMMU << 
1164          */                                      316          */
1165         if (file && vma->vm_flags & VM_SHARED !! 317         if (file) {
1166                 ret = do_mmap_shared_file(vma !! 318                 struct vm_area_struct vma;
1167         else                                  !! 319                 int error;
1168                 ret = do_mmap_private(vma, re << 
1169         if (ret < 0)                          << 
1170                 goto error_just_free;         << 
1171         add_nommu_region(region);             << 
1172                                               << 
1173         /* clear anonymous mappings that don' << 
1174         if (!vma->vm_file &&                  << 
1175             (!IS_ENABLED(CONFIG_MMAP_ALLOW_UN << 
1176              !(flags & MAP_UNINITIALIZED)))   << 
1177                 memset((void *)region->vm_sta << 
1178                        region->vm_end - regio << 
1179                                               << 
1180         /* okay... we have a mapping; now we  << 
1181         result = vma->vm_start;               << 
1182                                               << 
1183         current->mm->total_vm += len >> PAGE_ << 
1184                                               << 
1185 share:                                        << 
1186         BUG_ON(!vma->vm_region);              << 
1187         vma_iter_config(&vmi, vma->vm_start,  << 
1188         if (vma_iter_prealloc(&vmi, vma))     << 
1189                 goto error_just_free;         << 
1190                                               << 
1191         setup_vma_to_mm(vma, current->mm);    << 
1192         current->mm->map_count++;             << 
1193         /* add the VMA to the tree */         << 
1194         vma_iter_store(&vmi, vma);            << 
1195                                               << 
1196         /* we flush the region from the icach << 
1197          * mapping of it is made  */          << 
1198         if (vma->vm_flags & VM_EXEC && !regio << 
1199                 flush_icache_user_range(regio << 
1200                 region->vm_icache_flushed = t << 
1201         }                                     << 
1202                                               << 
1203         up_write(&nommu_region_sem);          << 
1204                                               << 
1205         return result;                        << 
1206                                               << 
1207 error_just_free:                              << 
1208         up_write(&nommu_region_sem);          << 
1209 error:                                        << 
1210         vma_iter_free(&vmi);                  << 
1211         if (region->vm_file)                  << 
1212                 fput(region->vm_file);        << 
1213         kmem_cache_free(vm_region_jar, region << 
1214         if (vma->vm_file)                     << 
1215                 fput(vma->vm_file);           << 
1216         vm_area_free(vma);                    << 
1217         return ret;                           << 
1218                                               << 
1219 sharing_violation:                            << 
1220         up_write(&nommu_region_sem);          << 
1221         pr_warn("Attempt to share mismatched  << 
1222         ret = -EINVAL;                        << 
1223         goto error;                           << 
1224                                               << 
1225 error_getting_vma:                            << 
1226         kmem_cache_free(vm_region_jar, region << 
1227         pr_warn("Allocation of vma for %lu by << 
1228                         len, current->pid);   << 
1229         show_mem();                           << 
1230         return -ENOMEM;                       << 
1231                                               << 
1232 error_getting_region:                         << 
1233         pr_warn("Allocation of vm region for  << 
1234                         len, current->pid);   << 
1235         show_mem();                           << 
1236         return -ENOMEM;                       << 
1237 }                                             << 
1238                                               << 
1239 unsigned long ksys_mmap_pgoff(unsigned long a << 
1240                               unsigned long p << 
1241                               unsigned long f << 
1242 {                                             << 
1243         struct file *file = NULL;             << 
1244         unsigned long retval = -EBADF;        << 
1245                                               << 
1246         audit_mmap_fd(fd, flags);             << 
1247         if (!(flags & MAP_ANONYMOUS)) {       << 
1248                 file = fget(fd);              << 
1249                 if (!file)                    << 
1250                         goto out;             << 
1251         }                                     << 
1252                                               << 
1253         retval = vm_mmap_pgoff(file, addr, le << 
1254                                               << 
1255         if (file)                             << 
1256                 fput(file);                   << 
1257 out:                                          << 
1258         return retval;                        << 
1259 }                                             << 
1260                                               << 
1261 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, ad << 
1262                 unsigned long, prot, unsigned << 
1263                 unsigned long, fd, unsigned l << 
1264 {                                             << 
1265         return ksys_mmap_pgoff(addr, len, pro << 
1266 }                                             << 
1267                                               << 
1268 #ifdef __ARCH_WANT_SYS_OLD_MMAP               << 
1269 struct mmap_arg_struct {                      << 
1270         unsigned long addr;                   << 
1271         unsigned long len;                    << 
1272         unsigned long prot;                   << 
1273         unsigned long flags;                  << 
1274         unsigned long fd;                     << 
1275         unsigned long offset;                 << 
1276 };                                            << 
1277                                               << 
1278 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_str << 
1279 {                                             << 
1280         struct mmap_arg_struct a;             << 
1281                                               << 
1282         if (copy_from_user(&a, arg, sizeof(a) << 
1283                 return -EFAULT;               << 
1284         if (offset_in_page(a.offset))         << 
1285                 return -EINVAL;               << 
1286                                                  320 
1287         return ksys_mmap_pgoff(a.addr, a.len, !! 321                 if (!file->f_op)
1288                                a.offset >> PA !! 322                         return -ENODEV;
1289 }                                             << 
1290 #endif /* __ARCH_WANT_SYS_OLD_MMAP */         << 
1291                                                  323 
1292 /*                                            !! 324                 vma.vm_start = addr;
1293  * split a vma into two pieces at address 'ad !! 325                 vma.vm_end = addr + len;
1294  * for the first part or the tail.            !! 326                 vma.vm_flags = vm_flags;
1295  */                                           !! 327                 vma.vm_pgoff = pgoff;
1296 static int split_vma(struct vma_iterator *vmi !! 328 
1297                      unsigned long addr, int  !! 329 #ifdef MAGIC_ROM_PTR
1298 {                                             !! 330                 /* First, try simpler routine designed to give us a ROM pointer. */
1299         struct vm_area_struct *new;           !! 331 
1300         struct vm_region *region;             !! 332                 if (file->f_op->romptr && !(prot & PROT_WRITE)) {
1301         unsigned long npages;                 !! 333                         error = file->f_op->romptr(file, &vma);
1302         struct mm_struct *mm;                 !! 334 #ifdef DEBUG
1303                                               !! 335                         printk("romptr mmap returned %d, start 0x%.8x\n", error,
1304         /* we're only permitted to split anon !! 336                                         vma.vm_start);
1305          * only a single usage on the region) !! 337 #endif
1306         if (vma->vm_file)                     !! 338                         if (!error)
                                                   >> 339                                 return vma.vm_start;
                                                   >> 340                         else if (error != -ENOSYS)
                                                   >> 341                                 return error;
                                                   >> 342                 } else
                                                   >> 343 #endif /* MAGIC_ROM_PTR */
                                                   >> 344                 /* Then try full mmap routine, which might return a RAM pointer,
                                                   >> 345                    or do something truly complicated. */
                                                   >> 346                    
                                                   >> 347                 if (file->f_op->mmap) {
                                                   >> 348                         error = file->f_op->mmap(file, &vma);
                                                   >> 349                                    
                                                   >> 350 #ifdef DEBUG
                                                   >> 351                         printk("f_op->mmap() returned %d/%lx\n", error, vma.vm_start);
                                                   >> 352 #endif
                                                   >> 353                         if (!error)
                                                   >> 354                                 return vma.vm_start;
                                                   >> 355                         else if (error != -ENOSYS)
                                                   >> 356                                 return error;
                                                   >> 357                 } else
                                                   >> 358                         return -ENODEV; /* No mapping operations defined */
                                                   >> 359 
                                                   >> 360                 /* An ENOSYS error indicates that mmap isn't possible (as opposed to
                                                   >> 361                    tried but failed) so we'll fall through to the copy. */
                                                   >> 362         }
                                                   >> 363 
                                                   >> 364         tblock = (struct mm_tblock_struct *)
                                                   >> 365                         kmalloc(sizeof(struct mm_tblock_struct), GFP_KERNEL);
                                                   >> 366         if (!tblock) {
                                                   >> 367                 printk("Allocation of tblock for %lu byte allocation from process %d failed\n", len, current->pid);
                                                   >> 368                 show_free_areas();
1307                 return -ENOMEM;                  369                 return -ENOMEM;
                                                   >> 370         }
1308                                                  371 
1309         mm = vma->vm_mm;                      !! 372         tblock->rblock = (struct mm_rblock_struct *)
1310         if (mm->map_count >= sysctl_max_map_c !! 373                         kmalloc(sizeof(struct mm_rblock_struct), GFP_KERNEL);
1311                 return -ENOMEM;               << 
1312                                                  374 
1313         region = kmem_cache_alloc(vm_region_j !! 375         if (!tblock->rblock) {
1314         if (!region)                          !! 376                 printk("Allocation of rblock for %lu byte allocation from process %d failed\n", len, current->pid);
                                                   >> 377                 show_free_areas();
                                                   >> 378                 kfree(tblock);
1315                 return -ENOMEM;                  379                 return -ENOMEM;
1316                                               << 
1317         new = vm_area_dup(vma);               << 
1318         if (!new)                             << 
1319                 goto err_vma_dup;             << 
1320                                               << 
1321         /* most fields are the same, copy all << 
1322         *region = *vma->vm_region;            << 
1323         new->vm_region = region;              << 
1324                                               << 
1325         npages = (addr - vma->vm_start) >> PA << 
1326                                               << 
1327         if (new_below) {                      << 
1328                 region->vm_top = region->vm_e << 
1329         } else {                              << 
1330                 region->vm_start = new->vm_st << 
1331                 region->vm_pgoff = new->vm_pg << 
1332         }                                        380         }
1333                                                  381 
1334         vma_iter_config(vmi, new->vm_start, n !! 382         result = kmalloc(len, GFP_KERNEL);
1335         if (vma_iter_prealloc(vmi, vma)) {    !! 383         if (!result) {
1336                 pr_warn("Allocation of vma tr !! 384                 printk("Allocation of length %lu from process %d failed\n", len,
1337                         current->pid);        !! 385                                 current->pid);
1338                 goto err_vmi_preallocate;     !! 386                 show_free_areas();
                                                   >> 387                 kfree(tblock->rblock);
                                                   >> 388                 kfree(tblock);
                                                   >> 389                 return -ENOMEM;
1339         }                                        390         }
1340                                                  391 
1341         if (new->vm_ops && new->vm_ops->open) !! 392         tblock->rblock->refcount = 1;
1342                 new->vm_ops->open(new);       !! 393         tblock->rblock->kblock = result;
1343                                               !! 394         tblock->rblock->size = len;
1344         down_write(&nommu_region_sem);        !! 395         
1345         delete_nommu_region(vma->vm_region);  !! 396         realalloc += kobjsize(result);
1346         if (new_below) {                      !! 397         askedalloc += len;
1347                 vma->vm_region->vm_start = vm !! 398 
1348                 vma->vm_region->vm_pgoff = vm !! 399 #ifdef WARN_ON_SLACK    
                                                   >> 400         if ((len+WARN_ON_SLACK) <= kobjsize(result))
                                                   >> 401                 printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n", len, current->pid, kobjsize(result)-len);
                                                   >> 402 #endif
                                                   >> 403         
                                                   >> 404         if (file) {
                                                   >> 405                 int error;
                                                   >> 406                 mm_segment_t old_fs = get_fs();
                                                   >> 407                 set_fs(KERNEL_DS);
                                                   >> 408                 error = file->f_op->read(file, (char *) result, len, &file->f_pos);
                                                   >> 409                 set_fs(old_fs);
                                                   >> 410                 if (error < 0) {
                                                   >> 411                         kfree(result);
                                                   >> 412                         kfree(tblock->rblock);
                                                   >> 413                         kfree(tblock);
                                                   >> 414                         return error;
                                                   >> 415                 }
                                                   >> 416                 if (error < len)
                                                   >> 417                         memset(result+error, '\0', len-error);
1349         } else {                                 418         } else {
1350                 vma->vm_region->vm_end = vma- !! 419                 memset(result, '\0', len);
1351                 vma->vm_region->vm_top = addr << 
1352         }                                        420         }
1353         add_nommu_region(vma->vm_region);     << 
1354         add_nommu_region(new->vm_region);     << 
1355         up_write(&nommu_region_sem);          << 
1356                                               << 
1357         setup_vma_to_mm(vma, mm);             << 
1358         setup_vma_to_mm(new, mm);             << 
1359         vma_iter_store(vmi, new);             << 
1360         mm->map_count++;                      << 
1361         return 0;                             << 
1362                                                  421 
1363 err_vmi_preallocate:                          !! 422         realalloc += kobjsize(tblock);
1364         vm_area_free(new);                    !! 423         askedalloc += sizeof(struct mm_tblock_struct);
1365 err_vma_dup:                                  << 
1366         kmem_cache_free(vm_region_jar, region << 
1367         return -ENOMEM;                       << 
1368 }                                             << 
1369                                                  424 
1370 /*                                            !! 425         realalloc += kobjsize(tblock->rblock);
1371  * shrink a VMA by removing the specified chu !! 426         askedalloc += sizeof(struct mm_rblock_struct);
1372  * the end                                    << 
1373  */                                           << 
1374 static int vmi_shrink_vma(struct vma_iterator << 
1375                       struct vm_area_struct * << 
1376                       unsigned long from, uns << 
1377 {                                             << 
1378         struct vm_region *region;             << 
1379                                               << 
1380         /* adjust the VMA's pointers, which m << 
1381          * and list */                        << 
1382         if (from > vma->vm_start) {           << 
1383                 if (vma_iter_clear_gfp(vmi, f << 
1384                         return -ENOMEM;       << 
1385                 vma->vm_end = from;           << 
1386         } else {                              << 
1387                 if (vma_iter_clear_gfp(vmi, v << 
1388                         return -ENOMEM;       << 
1389                 vma->vm_start = to;           << 
1390         }                                     << 
1391                                                  427 
1392         /* cut the backing region down to siz !! 428         tblock->next = current->mm->context.tblock.next;
1393         region = vma->vm_region;              !! 429         current->mm->context.tblock.next = tblock;
1394         BUG_ON(region->vm_usage != 1);        << 
1395                                               << 
1396         down_write(&nommu_region_sem);        << 
1397         delete_nommu_region(region);          << 
1398         if (from > region->vm_start) {        << 
1399                 to = region->vm_top;          << 
1400                 region->vm_top = region->vm_e << 
1401         } else {                              << 
1402                 region->vm_start = to;        << 
1403         }                                     << 
1404         add_nommu_region(region);             << 
1405         up_write(&nommu_region_sem);          << 
1406                                                  430 
1407         free_page_series(from, to);           !! 431 #ifdef DEBUG
1408         return 0;                             !! 432         printk("do_mmap:\n");
                                                   >> 433         show_process_blocks();
                                                   >> 434 #endif    
                                                   >> 435 
                                                   >> 436         return (unsigned long)result;
1409 }                                                437 }
1410                                                  438 
1411 /*                                            !! 439 int do_munmap(struct mm_struct * mm, unsigned long addr, size_t len)
1412  * release a mapping                          << 
1413  * - under NOMMU conditions the chunk to be u << 
1414  *   VMA, though it need not cover the whole  << 
1415  */                                           << 
1416 int do_munmap(struct mm_struct *mm, unsigned  << 
1417 {                                                440 {
1418         VMA_ITERATOR(vmi, mm, start);         !! 441         struct mm_tblock_struct * tblock, *tmp;
1419         struct vm_area_struct *vma;           << 
1420         unsigned long end;                    << 
1421         int ret = 0;                          << 
1422                                                  442 
1423         len = PAGE_ALIGN(len);                !! 443 #ifdef MAGIC_ROM_PTR
1424         if (len == 0)                         !! 444         /*
1425                 return -EINVAL;               !! 445          * For efficiency's sake, if the pointer is obviously in ROM,
                                                   >> 446          * don't bother walking the lists to free it.
                                                   >> 447          */
                                                   >> 448         if (is_in_rom(addr))
                                                   >> 449                 return 0;
                                                   >> 450 #endif
1426                                                  451 
1427         end = start + len;                    !! 452 #ifdef DEBUG
                                                   >> 453         printk("do_munmap:\n");
                                                   >> 454 #endif
1428                                                  455 
1429         /* find the first potentially overlap !! 456         tmp = &mm->context.tblock; /* dummy head */
1430         vma = vma_find(&vmi, end);            !! 457         while ((tblock=tmp->next) && tblock->rblock &&
1431         if (!vma) {                           !! 458                         tblock->rblock->kblock != (void*)addr) 
1432                 static int limit;             !! 459                 tmp = tblock;
1433                 if (limit < 5) {              !! 460                 
1434                         pr_warn("munmap of me !! 461         if (!tblock) {
1435                                         curre !! 462                 printk("munmap of non-mmaped memory by process %d (%s): %p\n",
1436                                         start !! 463                                 current->pid, current->comm, (void*)addr);
1437                         limit++;              << 
1438                 }                             << 
1439                 return -EINVAL;                  464                 return -EINVAL;
1440         }                                        465         }
1441                                               !! 466         if (tblock->rblock) {
1442         /* we're allowed to split an anonymou !! 467                 if (!--tblock->rblock->refcount) {
1443         if (vma->vm_file) {                   !! 468                         if (tblock->rblock->kblock) {
1444                 do {                          !! 469                                 realalloc -= kobjsize(tblock->rblock->kblock);
1445                         if (start > vma->vm_s !! 470                                 askedalloc -= tblock->rblock->size;
1446                                 return -EINVA !! 471                                 kfree(tblock->rblock->kblock);
1447                         if (end == vma->vm_en !! 472                         }
1448                                 goto erase_wh !! 473                         
1449                         vma = vma_find(&vmi,  !! 474                         realalloc -= kobjsize(tblock->rblock);
1450                 } while (vma);                !! 475                         askedalloc -= sizeof(struct mm_rblock_struct);
1451                 return -EINVAL;               !! 476                         kfree(tblock->rblock);
1452         } else {                              << 
1453                 /* the chunk must be a subset << 
1454                 if (start == vma->vm_start && << 
1455                         goto erase_whole_vma; << 
1456                 if (start < vma->vm_start ||  << 
1457                         return -EINVAL;       << 
1458                 if (offset_in_page(start))    << 
1459                         return -EINVAL;       << 
1460                 if (end != vma->vm_end && off << 
1461                         return -EINVAL;       << 
1462                 if (start != vma->vm_start && << 
1463                         ret = split_vma(&vmi, << 
1464                         if (ret < 0)          << 
1465                                 return ret;   << 
1466                 }                                477                 }
1467                 return vmi_shrink_vma(&vmi, v << 
1468         }                                        478         }
                                                   >> 479         tmp->next = tblock->next;
                                                   >> 480         realalloc -= kobjsize(tblock);
                                                   >> 481         askedalloc -= sizeof(struct mm_tblock_struct);
                                                   >> 482         kfree(tblock);
                                                   >> 483 
                                                   >> 484 #ifdef DEBUG
                                                   >> 485         show_process_blocks();
                                                   >> 486 #endif    
1469                                                  487 
1470 erase_whole_vma:                              !! 488         return -EINVAL;
1471         if (delete_vma_from_mm(vma))          << 
1472                 ret = -ENOMEM;                << 
1473         else                                  << 
1474                 delete_vma(mm, vma);          << 
1475         return ret;                           << 
1476 }                                             << 
1477                                               << 
1478 int vm_munmap(unsigned long addr, size_t len) << 
1479 {                                             << 
1480         struct mm_struct *mm = current->mm;   << 
1481         int ret;                              << 
1482                                               << 
1483         mmap_write_lock(mm);                  << 
1484         ret = do_munmap(mm, addr, len, NULL); << 
1485         mmap_write_unlock(mm);                << 
1486         return ret;                           << 
1487 }                                             << 
1488 EXPORT_SYMBOL(vm_munmap);                     << 
1489                                               << 
1490 SYSCALL_DEFINE2(munmap, unsigned long, addr,  << 
1491 {                                             << 
1492         return vm_munmap(addr, len);          << 
1493 }                                                489 }
1494                                                  490 
1495 /*                                            !! 491 /* Release all mmaps. */
1496  * release all the mappings made in a process !! 492 void exit_mmap(struct mm_struct * mm)
1497  */                                           << 
1498 void exit_mmap(struct mm_struct *mm)          << 
1499 {                                                493 {
1500         VMA_ITERATOR(vmi, mm, 0);             !! 494         struct mm_tblock_struct *tmp;
1501         struct vm_area_struct *vma;           << 
1502                                                  495 
1503         if (!mm)                                 496         if (!mm)
1504                 return;                          497                 return;
1505                                                  498 
1506         mm->total_vm = 0;                     !! 499 #ifdef DEBUG
                                                   >> 500         printk("Exit_mmap:\n");
                                                   >> 501 #endif
1507                                                  502 
1508         /*                                    !! 503         while((tmp = mm->context.tblock.next)) {
1509          * Lock the mm to avoid assert compla !! 504                 if (tmp->rblock) {
1510          * user of the mm                     !! 505                         if (!--tmp->rblock->refcount) {
1511          */                                   !! 506                                 if (tmp->rblock->kblock) {
1512         mmap_write_lock(mm);                  !! 507                                         realalloc -= kobjsize(tmp->rblock->kblock);
1513         for_each_vma(vmi, vma) {              !! 508                                         askedalloc -= tmp->rblock->size;
1514                 cleanup_vma_from_mm(vma);     !! 509                                         kfree(tmp->rblock->kblock);
1515                 delete_vma(mm, vma);          !! 510                                 }
1516                 cond_resched();               !! 511                                 realalloc -= kobjsize(tmp->rblock);
                                                   >> 512                                 askedalloc -= sizeof(struct mm_rblock_struct);
                                                   >> 513                                 kfree(tmp->rblock);
                                                   >> 514                         }
                                                   >> 515                         tmp->rblock = 0;
                                                   >> 516                 }
                                                   >> 517                 mm->context.tblock.next = tmp->next;
                                                   >> 518                 realalloc -= kobjsize(tmp);
                                                   >> 519                 askedalloc -= sizeof(struct mm_tblock_struct);
                                                   >> 520                 kfree(tmp);
1517         }                                        521         }
1518         __mt_destroy(&mm->mm_mt);             << 
1519         mmap_write_unlock(mm);                << 
1520 }                                             << 
1521                                               << 
1522 /*                                            << 
1523  * expand (or shrink) an existing mapping, po << 
1524  * time (controlled by the MREMAP_MAYMOVE fla << 
1525  *                                            << 
1526  * under NOMMU conditions, we only permit cha << 
1527  * as long as it stays within the region allo << 
1528  * block is not shareable                     << 
1529  *                                            << 
1530  * MREMAP_FIXED is not supported under NOMMU  << 
1531  */                                           << 
1532 static unsigned long do_mremap(unsigned long  << 
1533                         unsigned long old_len << 
1534                         unsigned long flags,  << 
1535 {                                             << 
1536         struct vm_area_struct *vma;           << 
1537                                               << 
1538         /* insanity checks first */           << 
1539         old_len = PAGE_ALIGN(old_len);        << 
1540         new_len = PAGE_ALIGN(new_len);        << 
1541         if (old_len == 0 || new_len == 0)     << 
1542                 return (unsigned long) -EINVA << 
1543                                               << 
1544         if (offset_in_page(addr))             << 
1545                 return -EINVAL;               << 
1546                                               << 
1547         if (flags & MREMAP_FIXED && new_addr  << 
1548                 return (unsigned long) -EINVA << 
1549                                               << 
1550         vma = find_vma_exact(current->mm, add << 
1551         if (!vma)                             << 
1552                 return (unsigned long) -EINVA << 
1553                                               << 
1554         if (vma->vm_end != vma->vm_start + ol << 
1555                 return (unsigned long) -EFAUL << 
1556                                               << 
1557         if (is_nommu_shared_mapping(vma->vm_f << 
1558                 return (unsigned long) -EPERM << 
1559                                               << 
1560         if (new_len > vma->vm_region->vm_end  << 
1561                 return (unsigned long) -ENOME << 
1562                                                  522 
1563         /* all checks complete - do it */     !! 523 #ifdef DEBUG
1564         vma->vm_end = vma->vm_start + new_len !! 524         show_process_blocks();
1565         return vma->vm_start;                 !! 525 #endif    
1566 }                                                526 }
1567                                                  527 
1568 SYSCALL_DEFINE5(mremap, unsigned long, addr,  !! 528 asmlinkage long sys_munmap(unsigned long addr, size_t len)
1569                 unsigned long, new_len, unsig << 
1570                 unsigned long, new_addr)      << 
1571 {                                                529 {
1572         unsigned long ret;                    !! 530         int ret;
                                                   >> 531         struct mm_struct *mm = current->mm;
1573                                                  532 
1574         mmap_write_lock(current->mm);         !! 533         down_write(&mm->mmap_sem);
1575         ret = do_mremap(addr, old_len, new_le !! 534         ret = do_munmap(mm, addr, len);
1576         mmap_write_unlock(current->mm);       !! 535         up_write(&mm->mmap_sem);
1577         return ret;                              536         return ret;
1578 }                                                537 }
1579                                                  538 
1580 int remap_pfn_range(struct vm_area_struct *vm !! 539 unsigned long do_brk(unsigned long addr, unsigned long len)
1581                 unsigned long pfn, unsigned l << 
1582 {                                             << 
1583         if (addr != (pfn << PAGE_SHIFT))      << 
1584                 return -EINVAL;               << 
1585                                               << 
1586         vm_flags_set(vma, VM_IO | VM_PFNMAP | << 
1587         return 0;                             << 
1588 }                                             << 
1589 EXPORT_SYMBOL(remap_pfn_range);               << 
1590                                               << 
1591 int vm_iomap_memory(struct vm_area_struct *vm << 
1592 {                                                540 {
1593         unsigned long pfn = start >> PAGE_SHI !! 541         return -ENOMEM;
1594         unsigned long vm_len = vma->vm_end -  << 
1595                                               << 
1596         pfn += vma->vm_pgoff;                 << 
1597         return io_remap_pfn_range(vma, vma->v << 
1598 }                                             << 
1599 EXPORT_SYMBOL(vm_iomap_memory);               << 
1600                                               << 
1601 int remap_vmalloc_range(struct vm_area_struct << 
1602                         unsigned long pgoff)  << 
1603 {                                             << 
1604         unsigned int size = vma->vm_end - vma << 
1605                                               << 
1606         if (!(vma->vm_flags & VM_USERMAP))    << 
1607                 return -EINVAL;               << 
1608                                               << 
1609         vma->vm_start = (unsigned long)(addr  << 
1610         vma->vm_end = vma->vm_start + size;   << 
1611                                               << 
1612         return 0;                             << 
1613 }                                             << 
1614 EXPORT_SYMBOL(remap_vmalloc_range);           << 
1615                                               << 
1616 vm_fault_t filemap_fault(struct vm_fault *vmf << 
1617 {                                             << 
1618         BUG();                                << 
1619         return 0;                             << 
1620 }                                             << 
1621 EXPORT_SYMBOL(filemap_fault);                 << 
1622                                               << 
1623 vm_fault_t filemap_map_pages(struct vm_fault  << 
1624                 pgoff_t start_pgoff, pgoff_t  << 
1625 {                                             << 
1626         BUG();                                << 
1627         return 0;                             << 
1628 }                                             << 
1629 EXPORT_SYMBOL(filemap_map_pages);             << 
1630                                               << 
1631 static int __access_remote_vm(struct mm_struc << 
1632                               void *buf, int  << 
1633 {                                             << 
1634         struct vm_area_struct *vma;           << 
1635         int write = gup_flags & FOLL_WRITE;   << 
1636                                               << 
1637         if (mmap_read_lock_killable(mm))      << 
1638                 return 0;                     << 
1639                                               << 
1640         /* the access must start within one o << 
1641         vma = find_vma(mm, addr);             << 
1642         if (vma) {                            << 
1643                 /* don't overrun this mapping << 
1644                 if (addr + len >= vma->vm_end << 
1645                         len = vma->vm_end - a << 
1646                                               << 
1647                 /* only read or write mapping << 
1648                 if (write && vma->vm_flags &  << 
1649                         copy_to_user_page(vma << 
1650                                          (voi << 
1651                 else if (!write && vma->vm_fl << 
1652                         copy_from_user_page(v << 
1653                                             b << 
1654                 else                          << 
1655                         len = 0;              << 
1656         } else {                              << 
1657                 len = 0;                      << 
1658         }                                     << 
1659                                               << 
1660         mmap_read_unlock(mm);                 << 
1661                                               << 
1662         return len;                           << 
1663 }                                                542 }
1664                                                  543 
1665 /**                                           !! 544 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
1666  * access_remote_vm - access another process' << 
1667  * @mm:         the mm_struct of the target a << 
1668  * @addr:       start address to access       << 
1669  * @buf:        source or destination buffer  << 
1670  * @len:        number of bytes to transfer   << 
1671  * @gup_flags:  flags modifying lookup behavi << 
1672  *                                            << 
1673  * The caller must hold a reference on @mm.   << 
1674  */                                           << 
1675 int access_remote_vm(struct mm_struct *mm, un << 
1676                 void *buf, int len, unsigned  << 
1677 {                                                545 {
1678         return __access_remote_vm(mm, addr, b !! 546         return NULL;
1679 }                                                547 }
1680                                                  548 
1681 /*                                            !! 549 struct page * follow_page(struct mm_struct *mm, unsigned long addr, int write)
1682  * Access another process' address space.     << 
1683  * - source/target buffer must be kernel spac << 
1684  */                                           << 
1685 int access_process_vm(struct task_struct *tsk << 
1686                 unsigned int gup_flags)       << 
1687 {                                                550 {
1688         struct mm_struct *mm;                 !! 551         return NULL;
1689                                               << 
1690         if (addr + len < addr)                << 
1691                 return 0;                     << 
1692                                               << 
1693         mm = get_task_mm(tsk);                << 
1694         if (!mm)                              << 
1695                 return 0;                     << 
1696                                               << 
1697         len = __access_remote_vm(mm, addr, bu << 
1698                                               << 
1699         mmput(mm);                            << 
1700         return len;                           << 
1701 }                                                552 }
1702 EXPORT_SYMBOL_GPL(access_process_vm);         << 
1703                                                  553 
1704 /**                                           !! 554 int remap_page_range(struct vm_area_struct *vma, unsigned long from,
1705  * nommu_shrink_inode_mappings - Shrink the s !! 555                 unsigned long to, unsigned long size, pgprot_t prot)
1706  * @inode: The inode to check                 << 
1707  * @size: The current filesize of the inode   << 
1708  * @newsize: The proposed filesize of the ino << 
1709  *                                            << 
1710  * Check the shared mappings on an inode on b << 
1711  * make sure that any outstanding VMAs aren't << 
1712  * vm_regions that extend beyond so that do_m << 
1713  * automatically grant mappings that are too  << 
1714  */                                           << 
1715 int nommu_shrink_inode_mappings(struct inode  << 
1716                                 size_t newsiz << 
1717 {                                                556 {
1718         struct vm_area_struct *vma;           !! 557         return -EPERM;
1719         struct vm_region *region;             << 
1720         pgoff_t low, high;                    << 
1721         size_t r_size, r_top;                 << 
1722                                               << 
1723         low = newsize >> PAGE_SHIFT;          << 
1724         high = (size + PAGE_SIZE - 1) >> PAGE << 
1725                                               << 
1726         down_write(&nommu_region_sem);        << 
1727         i_mmap_lock_read(inode->i_mapping);   << 
1728                                               << 
1729         /* search for VMAs that fall within t << 
1730         vma_interval_tree_foreach(vma, &inode << 
1731                 /* found one - only intereste << 
1732                  * cache */                   << 
1733                 if (vma->vm_flags & VM_SHARED << 
1734                         i_mmap_unlock_read(in << 
1735                         up_write(&nommu_regio << 
1736                         return -ETXTBSY; /* n << 
1737                 }                             << 
1738         }                                     << 
1739                                               << 
1740         /* reduce any regions that overlap th << 
1741          * these will be pointed to by VMAs t << 
1742          *                                    << 
1743          * we don't check for any regions tha << 
1744          * shouldn't be any                   << 
1745          */                                   << 
1746         vma_interval_tree_foreach(vma, &inode << 
1747                 if (!(vma->vm_flags & VM_SHAR << 
1748                         continue;             << 
1749                                               << 
1750                 region = vma->vm_region;      << 
1751                 r_size = region->vm_top - reg << 
1752                 r_top = (region->vm_pgoff <<  << 
1753                                               << 
1754                 if (r_top > newsize) {        << 
1755                         region->vm_top -= r_t << 
1756                         if (region->vm_end >  << 
1757                                 region->vm_en << 
1758                 }                             << 
1759         }                                     << 
1760                                               << 
1761         i_mmap_unlock_read(inode->i_mapping); << 
1762         up_write(&nommu_region_sem);          << 
1763         return 0;                             << 
1764 }                                                558 }
1765                                                  559 
1766 /*                                            !! 560 unsigned long get_unmapped_area(struct file *file, unsigned long addr,
1767  * Initialise sysctl_user_reserve_kbytes.     !! 561         unsigned long len, unsigned long pgoff, unsigned long flags)
1768  *                                            << 
1769  * This is intended to prevent a user from st << 
1770  * process, such that they cannot recover (ki << 
1771  * mode.                                      << 
1772  *                                            << 
1773  * The default value is min(3% of free memory << 
1774  * 128MB is enough to recover with sshd/login << 
1775  */                                           << 
1776 static int __meminit init_user_reserve(void)  << 
1777 {                                                562 {
1778         unsigned long free_kbytes;            !! 563         return -ENOMEM;
1779                                               << 
1780         free_kbytes = K(global_zone_page_stat << 
1781                                               << 
1782         sysctl_user_reserve_kbytes = min(free << 
1783         return 0;                             << 
1784 }                                                564 }
1785 subsys_initcall(init_user_reserve);           << 
1786                                                  565 
1787 /*                                            !! 566 void pte_chain_init(void)
1788  * Initialise sysctl_admin_reserve_kbytes.    << 
1789  *                                            << 
1790  * The purpose of sysctl_admin_reserve_kbytes << 
1791  * to log in and kill a memory hogging proces << 
1792  *                                            << 
1793  * Systems with more than 256MB will reserve  << 
1794  * with sshd, bash, and top in OVERCOMMIT_GUE << 
1795  * only reserve 3% of free pages by default.  << 
1796  */                                           << 
1797 static int __meminit init_admin_reserve(void) << 
1798 {                                                567 {
1799         unsigned long free_kbytes;            << 
1800                                               << 
1801         free_kbytes = K(global_zone_page_stat << 
1802                                               << 
1803         sysctl_admin_reserve_kbytes = min(fre << 
1804         return 0;                             << 
1805 }                                                568 }
1806 subsys_initcall(init_admin_reserve);          << 
1807                                                  569 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php