~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/mmap.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  *  linux/arch/arm/mm/mmap.c
  4  */
  5 #include <linux/fs.h>
  6 #include <linux/mm.h>
  7 #include <linux/mman.h>
  8 #include <linux/shm.h>
  9 #include <linux/sched/signal.h>
 10 #include <linux/sched/mm.h>
 11 #include <linux/io.h>
 12 #include <linux/personality.h>
 13 #include <linux/random.h>
 14 #include <asm/cachetype.h>
 15 
 16 #define COLOUR_ALIGN(addr,pgoff)                \
 17         ((((addr)+SHMLBA-1)&~(SHMLBA-1)) +      \
 18          (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 19 
 20 /*
 21  * We need to ensure that shared mappings are correctly aligned to
 22  * avoid aliasing issues with VIPT caches.  We need to ensure that
 23  * a specific page of an object is always mapped at a multiple of
 24  * SHMLBA bytes.
 25  *
 26  * We unconditionally provide this function for all cases, however
 27  * in the VIVT case, we optimise out the alignment rules.
 28  */
 29 unsigned long
 30 arch_get_unmapped_area(struct file *filp, unsigned long addr,
 31                 unsigned long len, unsigned long pgoff, unsigned long flags)
 32 {
 33         struct mm_struct *mm = current->mm;
 34         struct vm_area_struct *vma;
 35         int do_align = 0;
 36         int aliasing = cache_is_vipt_aliasing();
 37         struct vm_unmapped_area_info info = {};
 38 
 39         /*
 40          * We only need to do colour alignment if either the I or D
 41          * caches alias.
 42          */
 43         if (aliasing)
 44                 do_align = filp || (flags & MAP_SHARED);
 45 
 46         /*
 47          * We enforce the MAP_FIXED case.
 48          */
 49         if (flags & MAP_FIXED) {
 50                 if (aliasing && flags & MAP_SHARED &&
 51                     (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
 52                         return -EINVAL;
 53                 return addr;
 54         }
 55 
 56         if (len > TASK_SIZE)
 57                 return -ENOMEM;
 58 
 59         if (addr) {
 60                 if (do_align)
 61                         addr = COLOUR_ALIGN(addr, pgoff);
 62                 else
 63                         addr = PAGE_ALIGN(addr);
 64 
 65                 vma = find_vma(mm, addr);
 66                 if (TASK_SIZE - len >= addr &&
 67                     (!vma || addr + len <= vm_start_gap(vma)))
 68                         return addr;
 69         }
 70 
 71         info.length = len;
 72         info.low_limit = mm->mmap_base;
 73         info.high_limit = TASK_SIZE;
 74         info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
 75         info.align_offset = pgoff << PAGE_SHIFT;
 76         return vm_unmapped_area(&info);
 77 }
 78 
 79 unsigned long
 80 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 81                         const unsigned long len, const unsigned long pgoff,
 82                         const unsigned long flags)
 83 {
 84         struct vm_area_struct *vma;
 85         struct mm_struct *mm = current->mm;
 86         unsigned long addr = addr0;
 87         int do_align = 0;
 88         int aliasing = cache_is_vipt_aliasing();
 89         struct vm_unmapped_area_info info = {};
 90 
 91         /*
 92          * We only need to do colour alignment if either the I or D
 93          * caches alias.
 94          */
 95         if (aliasing)
 96                 do_align = filp || (flags & MAP_SHARED);
 97 
 98         /* requested length too big for entire address space */
 99         if (len > TASK_SIZE)
100                 return -ENOMEM;
101 
102         if (flags & MAP_FIXED) {
103                 if (aliasing && flags & MAP_SHARED &&
104                     (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
105                         return -EINVAL;
106                 return addr;
107         }
108 
109         /* requesting a specific address */
110         if (addr) {
111                 if (do_align)
112                         addr = COLOUR_ALIGN(addr, pgoff);
113                 else
114                         addr = PAGE_ALIGN(addr);
115                 vma = find_vma(mm, addr);
116                 if (TASK_SIZE - len >= addr &&
117                                 (!vma || addr + len <= vm_start_gap(vma)))
118                         return addr;
119         }
120 
121         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
122         info.length = len;
123         info.low_limit = FIRST_USER_ADDRESS;
124         info.high_limit = mm->mmap_base;
125         info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
126         info.align_offset = pgoff << PAGE_SHIFT;
127         addr = vm_unmapped_area(&info);
128 
129         /*
130          * A failed mmap() very likely causes application failure,
131          * so fall back to the bottom-up function here. This scenario
132          * can happen with large stack limits and large mmap()
133          * allocations.
134          */
135         if (addr & ~PAGE_MASK) {
136                 VM_BUG_ON(addr != -ENOMEM);
137                 info.flags = 0;
138                 info.low_limit = mm->mmap_base;
139                 info.high_limit = TASK_SIZE;
140                 addr = vm_unmapped_area(&info);
141         }
142 
143         return addr;
144 }
145 
146 /*
147  * You really shouldn't be using read() or write() on /dev/mem.  This
148  * might go away in the future.
149  */
150 int valid_phys_addr_range(phys_addr_t addr, size_t size)
151 {
152         if (addr < PHYS_OFFSET)
153                 return 0;
154         if (addr + size > __pa(high_memory - 1) + 1)
155                 return 0;
156 
157         return 1;
158 }
159 
160 /*
161  * Do not allow /dev/mem mappings beyond the supported physical range.
162  */
163 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
164 {
165         return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
166 }
167 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php