~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/loongarch/mm/mmap.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4  */
  5 #include <linux/export.h>
  6 #include <linux/io.h>
  7 #include <linux/kfence.h>
  8 #include <linux/memblock.h>
  9 #include <linux/mm.h>
 10 #include <linux/mman.h>
 11 
 12 #define SHM_ALIGN_MASK  (SHMLBA - 1)
 13 
 14 #define COLOUR_ALIGN(addr, pgoff)                       \
 15         ((((addr) + SHM_ALIGN_MASK) & ~SHM_ALIGN_MASK)  \
 16          + (((pgoff) << PAGE_SHIFT) & SHM_ALIGN_MASK))
 17 
 18 enum mmap_allocation_direction {UP, DOWN};
 19 
 20 static unsigned long arch_get_unmapped_area_common(struct file *filp,
 21         unsigned long addr0, unsigned long len, unsigned long pgoff,
 22         unsigned long flags, enum mmap_allocation_direction dir)
 23 {
 24         struct mm_struct *mm = current->mm;
 25         struct vm_area_struct *vma;
 26         unsigned long addr = addr0;
 27         int do_color_align;
 28         struct vm_unmapped_area_info info = {};
 29 
 30         if (unlikely(len > TASK_SIZE))
 31                 return -ENOMEM;
 32 
 33         if (flags & MAP_FIXED) {
 34                 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
 35                 if (TASK_SIZE - len < addr)
 36                         return -EINVAL;
 37 
 38                 /*
 39                  * We do not accept a shared mapping if it would violate
 40                  * cache aliasing constraints.
 41                  */
 42                 if ((flags & MAP_SHARED) &&
 43                     ((addr - (pgoff << PAGE_SHIFT)) & SHM_ALIGN_MASK))
 44                         return -EINVAL;
 45                 return addr;
 46         }
 47 
 48         do_color_align = 0;
 49         if (filp || (flags & MAP_SHARED))
 50                 do_color_align = 1;
 51 
 52         /* requesting a specific address */
 53         if (addr) {
 54                 if (do_color_align)
 55                         addr = COLOUR_ALIGN(addr, pgoff);
 56                 else
 57                         addr = PAGE_ALIGN(addr);
 58 
 59                 vma = find_vma(mm, addr);
 60                 if (TASK_SIZE - len >= addr &&
 61                     (!vma || addr + len <= vm_start_gap(vma)))
 62                         return addr;
 63         }
 64 
 65         info.length = len;
 66         info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
 67         info.align_offset = pgoff << PAGE_SHIFT;
 68 
 69         if (dir == DOWN) {
 70                 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 71                 info.low_limit = PAGE_SIZE;
 72                 info.high_limit = mm->mmap_base;
 73                 addr = vm_unmapped_area(&info);
 74 
 75                 if (!(addr & ~PAGE_MASK))
 76                         return addr;
 77 
 78                 /*
 79                  * A failed mmap() very likely causes application failure,
 80                  * so fall back to the bottom-up function here. This scenario
 81                  * can happen with large stack limits and large mmap()
 82                  * allocations.
 83                  */
 84         }
 85 
 86         info.low_limit = mm->mmap_base;
 87         info.high_limit = TASK_SIZE;
 88         return vm_unmapped_area(&info);
 89 }
 90 
 91 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
 92         unsigned long len, unsigned long pgoff, unsigned long flags)
 93 {
 94         return arch_get_unmapped_area_common(filp,
 95                         addr0, len, pgoff, flags, UP);
 96 }
 97 
 98 /*
 99  * There is no need to export this but sched.h declares the function as
100  * extern so making it static here results in an error.
101  */
102 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
103         unsigned long addr0, unsigned long len, unsigned long pgoff,
104         unsigned long flags)
105 {
106         return arch_get_unmapped_area_common(filp,
107                         addr0, len, pgoff, flags, DOWN);
108 }
109 
110 int __virt_addr_valid(volatile void *kaddr)
111 {
112         unsigned long vaddr = (unsigned long)kaddr;
113 
114         if (is_kfence_address((void *)kaddr))
115                 return 1;
116 
117         if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
118                 return 0;
119 
120         return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
121 }
122 EXPORT_SYMBOL_GPL(__virt_addr_valid);
123 
124 /*
125  * You really shouldn't be using read() or write() on /dev/mem.  This might go
126  * away in the future.
127  */
128 int valid_phys_addr_range(phys_addr_t addr, size_t size)
129 {
130         /*
131          * Check whether addr is covered by a memory region without the
132          * MEMBLOCK_NOMAP attribute, and whether that region covers the
133          * entire range. In theory, this could lead to false negatives
134          * if the range is covered by distinct but adjacent memory regions
135          * that only differ in other attributes. However, few of such
136          * attributes have been defined, and it is debatable whether it
137          * follows that /dev/mem read() calls should be able traverse
138          * such boundaries.
139          */
140         return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
141 }
142 
143 /*
144  * Do not allow /dev/mem mappings beyond the supported physical range.
145  */
146 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
147 {
148         return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
149 }
150 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php