1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * ARC700 mmap !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. 4 * 5 * 5 * (started from arm version - for VIPT alias !! 6 * Copyright (C) 2011 Wind River Systems, 6 * !! 7 * written by Ralf Baechle <ralf@linux-mips.org> 7 * Copyright (C) 2013 Synopsys, Inc. (www.syno << 8 */ 8 */ 9 !! 9 #include <linux/compiler.h> 10 #include <linux/fs.h> !! 10 #include <linux/elf-randomize.h> >> 11 #include <linux/errno.h> 11 #include <linux/mm.h> 12 #include <linux/mm.h> 12 #include <linux/mman.h> 13 #include <linux/mman.h> >> 14 #include <linux/export.h> >> 15 #include <linux/personality.h> >> 16 #include <linux/random.h> >> 17 #include <linux/sched/signal.h> 13 #include <linux/sched/mm.h> 18 #include <linux/sched/mm.h> 14 19 15 #include <asm/cacheflush.h> !! 20 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ >> 21 EXPORT_SYMBOL(shm_align_mask); 16 22 17 /* !! 23 #define COLOUR_ALIGN(addr, pgoff) \ 18 * Ensure that shared mappings are correctly a !! 24 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 19 * avoid aliasing issues with VIPT caches. !! 25 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 20 * We need to ensure that !! 26 21 * a specific page of an object is always mapp !! 27 enum mmap_allocation_direction {UP, DOWN}; 22 * SHMLBA bytes. !! 28 23 */ !! 29 static unsigned long arch_get_unmapped_area_common(struct file *filp, 24 unsigned long !! 30 unsigned long addr0, unsigned long len, unsigned long pgoff, 25 arch_get_unmapped_area(struct file *filp, unsi !! 31 unsigned long flags, enum mmap_allocation_direction dir) 26 unsigned long len, unsigned lo << 27 unsigned long flags, vm_flags_ << 28 { 32 { 29 struct mm_struct *mm = current->mm; 33 struct mm_struct *mm = current->mm; 30 struct vm_area_struct *vma; 34 struct vm_area_struct *vma; >> 35 unsigned long addr = addr0; >> 36 int do_color_align; 31 struct vm_unmapped_area_info info = {} 37 struct vm_unmapped_area_info info = {}; 32 38 33 /* !! 39 if (unlikely(len > TASK_SIZE)) 34 * We enforce the MAP_FIXED case. !! 40 return -ENOMEM; 35 */ !! 41 36 if (flags & MAP_FIXED) { 42 if (flags & MAP_FIXED) { 37 if (flags & MAP_SHARED && !! 43 /* Even MAP_FIXED mappings must reside within TASK_SIZE */ 38 (addr - (pgoff << PAGE_SHI !! 44 if (TASK_SIZE - len < addr) >> 45 return -EINVAL; >> 46 >> 47 /* >> 48 * We do not accept a shared mapping if it would violate >> 49 * cache aliasing constraints. >> 50 */ >> 51 if ((flags & MAP_SHARED) && >> 52 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) 39 return -EINVAL; 53 return -EINVAL; 40 return addr; 54 return addr; 41 } 55 } 42 56 43 if (len > TASK_SIZE) !! 57 do_color_align = 0; 44 return -ENOMEM; !! 58 if (filp || (flags & MAP_SHARED)) >> 59 do_color_align = 1; 45 60 >> 61 /* requesting a specific address */ 46 if (addr) { 62 if (addr) { 47 addr = PAGE_ALIGN(addr); !! 63 if (do_color_align) >> 64 addr = COLOUR_ALIGN(addr, pgoff); >> 65 else >> 66 addr = PAGE_ALIGN(addr); 48 67 49 vma = find_vma(mm, addr); 68 vma = find_vma(mm, addr); 50 if (TASK_SIZE - len >= addr && 69 if (TASK_SIZE - len >= addr && 51 (!vma || addr + len <= vm_ 70 (!vma || addr + len <= vm_start_gap(vma))) 52 return addr; 71 return addr; 53 } 72 } 54 73 55 info.length = len; 74 info.length = len; >> 75 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; >> 76 info.align_offset = pgoff << PAGE_SHIFT; >> 77 >> 78 if (dir == DOWN) { >> 79 info.flags = VM_UNMAPPED_AREA_TOPDOWN; >> 80 info.low_limit = PAGE_SIZE; >> 81 info.high_limit = mm->mmap_base; >> 82 addr = vm_unmapped_area(&info); >> 83 >> 84 if (!(addr & ~PAGE_MASK)) >> 85 return addr; >> 86 >> 87 /* >> 88 * A failed mmap() very likely causes application failure, >> 89 * so fall back to the bottom-up function here. This scenario >> 90 * can happen with large stack limits and large mmap() >> 91 * allocations. >> 92 */ >> 93 } >> 94 56 info.low_limit = mm->mmap_base; 95 info.low_limit = mm->mmap_base; 57 info.high_limit = TASK_SIZE; 96 info.high_limit = TASK_SIZE; 58 info.align_offset = pgoff << PAGE_SHIF << 59 return vm_unmapped_area(&info); 97 return vm_unmapped_area(&info); 60 } 98 } 61 99 62 static const pgprot_t protection_map[16] = { !! 100 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, 63 [VM_NONE] !! 101 unsigned long len, unsigned long pgoff, unsigned long flags, 64 [VM_READ] !! 102 vm_flags_t vm_flags) 65 [VM_WRITE] !! 103 { 66 [VM_WRITE | VM_READ] !! 104 return arch_get_unmapped_area_common(filp, 67 [VM_EXEC] !! 105 addr0, len, pgoff, flags, UP); 68 [VM_EXEC | VM_READ] !! 106 } 69 [VM_EXEC | VM_WRITE] !! 107 70 [VM_EXEC | VM_WRITE | VM_READ] !! 108 /* 71 [VM_SHARED] !! 109 * There is no need to export this but sched.h declares the function as 72 [VM_SHARED | VM_READ] !! 110 * extern so making it static here results in an error. 73 [VM_SHARED | VM_WRITE] !! 111 */ 74 [VM_SHARED | VM_WRITE | VM_READ] !! 112 unsigned long arch_get_unmapped_area_topdown(struct file *filp, 75 [VM_SHARED | VM_EXEC] !! 113 unsigned long addr0, unsigned long len, unsigned long pgoff, 76 [VM_SHARED | VM_EXEC | VM_READ] !! 114 unsigned long flags, vm_flags_t vm_flags) 77 [VM_SHARED | VM_EXEC | VM_WRITE] !! 115 { 78 [VM_SHARED | VM_EXEC | VM_WRITE | VM_R !! 116 return arch_get_unmapped_area_common(filp, 79 }; !! 117 addr0, len, pgoff, flags, DOWN); 80 DECLARE_VM_GET_PAGE_PROT !! 118 } >> 119 >> 120 bool __virt_addr_valid(const volatile void *kaddr) >> 121 { >> 122 unsigned long vaddr = (unsigned long)kaddr; >> 123 >> 124 if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE)) >> 125 return false; >> 126 >> 127 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); >> 128 } >> 129 EXPORT_SYMBOL_GPL(__virt_addr_valid); 81 130
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.