1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * Based on arch/arm/mm/mmap.c !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. 4 * 5 * 5 * Copyright (C) 2012 ARM Ltd. !! 6 * Copyright (C) 2011 Wind River Systems, >> 7 * written by Ralf Baechle <ralf@linux-mips.org> 6 */ 8 */ 7 !! 9 #include <linux/compiler.h> 8 #include <linux/io.h> !! 10 #include <linux/elf-randomize.h> 9 #include <linux/memblock.h> !! 11 #include <linux/errno.h> 10 #include <linux/mm.h> 12 #include <linux/mm.h> 11 #include <linux/types.h> !! 13 #include <linux/mman.h> >> 14 #include <linux/export.h> >> 15 #include <linux/personality.h> >> 16 #include <linux/random.h> >> 17 #include <linux/sched/signal.h> >> 18 #include <linux/sched/mm.h> >> 19 >> 20 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ >> 21 EXPORT_SYMBOL(shm_align_mask); >> 22 >> 23 #define COLOUR_ALIGN(addr, pgoff) \ >> 24 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ >> 25 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) >> 26 >> 27 enum mmap_allocation_direction {UP, DOWN}; >> 28 >> 29 static unsigned long arch_get_unmapped_area_common(struct file *filp, >> 30 unsigned long addr0, unsigned long len, unsigned long pgoff, >> 31 unsigned long flags, enum mmap_allocation_direction dir) >> 32 { >> 33 struct mm_struct *mm = current->mm; >> 34 struct vm_area_struct *vma; >> 35 unsigned long addr = addr0; >> 36 int do_color_align; >> 37 struct vm_unmapped_area_info info = {}; >> 38 >> 39 if (unlikely(len > TASK_SIZE)) >> 40 return -ENOMEM; >> 41 >> 42 if (flags & MAP_FIXED) { >> 43 /* Even MAP_FIXED mappings must reside within TASK_SIZE */ >> 44 if (TASK_SIZE - len < addr) >> 45 return -EINVAL; >> 46 >> 47 /* >> 48 * We do not accept a shared mapping if it would violate >> 49 * cache aliasing constraints. >> 50 */ >> 51 if ((flags & MAP_SHARED) && >> 52 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) >> 53 return -EINVAL; >> 54 return addr; >> 55 } 12 56 13 #include <asm/cpufeature.h> !! 57 do_color_align = 0; 14 #include <asm/page.h> !! 58 if (filp || (flags & MAP_SHARED)) >> 59 do_color_align = 1; >> 60 >> 61 /* requesting a specific address */ >> 62 if (addr) { >> 63 if (do_color_align) >> 64 addr = COLOUR_ALIGN(addr, pgoff); >> 65 else >> 66 addr = PAGE_ALIGN(addr); >> 67 >> 68 vma = find_vma(mm, addr); >> 69 if (TASK_SIZE - len >= addr && >> 70 (!vma || addr + len <= vm_start_gap(vma))) >> 71 return addr; >> 72 } 15 73 16 static pgprot_t protection_map[16] __ro_after_ !! 74 info.length = len; 17 [VM_NONE] !! 75 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; 18 [VM_READ] !! 76 info.align_offset = pgoff << PAGE_SHIFT; 19 [VM_WRITE] !! 77 20 [VM_WRITE | VM_READ] !! 78 if (dir == DOWN) { 21 /* PAGE_EXECONLY if Enhanced PAN */ !! 79 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 22 [VM_EXEC] !! 80 info.low_limit = PAGE_SIZE; 23 [VM_EXEC | VM_READ] !! 81 info.high_limit = mm->mmap_base; 24 [VM_EXEC | VM_WRITE] !! 82 addr = vm_unmapped_area(&info); 25 [VM_EXEC | VM_WRITE | VM_READ] !! 83 26 [VM_SHARED] !! 84 if (!(addr & ~PAGE_MASK)) 27 [VM_SHARED | VM_READ] !! 85 return addr; 28 [VM_SHARED | VM_WRITE] !! 86 29 [VM_SHARED | VM_WRITE | VM_READ] !! 87 /* 30 /* PAGE_EXECONLY if Enhanced PAN */ !! 88 * A failed mmap() very likely causes application failure, 31 [VM_SHARED | VM_EXEC] !! 89 * so fall back to the bottom-up function here. This scenario 32 [VM_SHARED | VM_EXEC | VM_READ] !! 90 * can happen with large stack limits and large mmap() 33 [VM_SHARED | VM_EXEC | VM_WRITE] !! 91 * allocations. 34 [VM_SHARED | VM_EXEC | VM_WRITE | VM_R !! 92 */ 35 }; !! 93 } 36 94 37 /* !! 95 info.low_limit = mm->mmap_base; 38 * You really shouldn't be using read() or wri !! 96 info.high_limit = TASK_SIZE; 39 * away in the future. !! 97 return vm_unmapped_area(&info); 40 */ << 41 int valid_phys_addr_range(phys_addr_t addr, si << 42 { << 43 /* << 44 * Check whether addr is covered by a << 45 * MEMBLOCK_NOMAP attribute, and wheth << 46 * entire range. In theory, this could << 47 * if the range is covered by distinct << 48 * that only differ in other attribute << 49 * attributes have been defined, and i << 50 * follows that /dev/mem read() calls << 51 * such boundaries. << 52 */ << 53 return memblock_is_region_memory(addr, << 54 memblock_is_map_memory(addr); << 55 } 98 } 56 99 57 /* !! 100 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, 58 * Do not allow /dev/mem mappings beyond the s !! 101 unsigned long len, unsigned long pgoff, unsigned long flags, 59 */ !! 102 vm_flags_t vm_flags) 60 int valid_mmap_phys_addr_range(unsigned long p << 61 { 103 { 62 return !(((pfn << PAGE_SHIFT) + size) !! 104 return arch_get_unmapped_area_common(filp, >> 105 addr0, len, pgoff, flags, UP); 63 } 106 } 64 107 65 static int __init adjust_protection_map(void) !! 108 /* >> 109 * There is no need to export this but sched.h declares the function as >> 110 * extern so making it static here results in an error. >> 111 */ >> 112 unsigned long arch_get_unmapped_area_topdown(struct file *filp, >> 113 unsigned long addr0, unsigned long len, unsigned long pgoff, >> 114 unsigned long flags, vm_flags_t vm_flags) 66 { 115 { 67 /* !! 116 return arch_get_unmapped_area_common(filp, 68 * With Enhanced PAN we can honour the !! 117 addr0, len, pgoff, flags, DOWN); 69 * there is no PAN override with such << 70 */ << 71 if (cpus_have_cap(ARM64_HAS_EPAN)) { << 72 protection_map[VM_EXEC] = PAGE << 73 protection_map[VM_EXEC | VM_SH << 74 } << 75 << 76 if (lpa2_is_enabled()) << 77 for (int i = 0; i < ARRAY_SIZE << 78 pgprot_val(protection_ << 79 << 80 return 0; << 81 } 118 } 82 arch_initcall(adjust_protection_map); << 83 119 84 pgprot_t vm_get_page_prot(unsigned long vm_fla !! 120 bool __virt_addr_valid(const volatile void *kaddr) 85 { 121 { 86 pteval_t prot = pgprot_val(protection_ !! 122 unsigned long vaddr = (unsigned long)kaddr; 87 (VM_READ|VM << 88 123 89 if (vm_flags & VM_ARM64_BTI) !! 124 if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE)) 90 prot |= PTE_GP; !! 125 return false; 91 << 92 /* << 93 * There are two conditions required f << 94 * memory type: (1) the user requested << 95 * mmap() or mprotect() and (2) the co << 96 * register (1) as VM_MTE in the vma-> << 97 * VM_MTE_ALLOWED. Note that the latte << 98 * mmap() call since mprotect() does n << 99 * Checking for VM_MTE only is suffici << 100 * does not permit (VM_MTE & !VM_MTE_A << 101 */ << 102 if (vm_flags & VM_MTE) << 103 prot |= PTE_ATTRINDX(MT_NORMAL << 104 << 105 #ifdef CONFIG_ARCH_HAS_PKEYS << 106 if (system_supports_poe()) { << 107 if (vm_flags & VM_PKEY_BIT0) << 108 prot |= PTE_PO_IDX_0; << 109 if (vm_flags & VM_PKEY_BIT1) << 110 prot |= PTE_PO_IDX_1; << 111 if (vm_flags & VM_PKEY_BIT2) << 112 prot |= PTE_PO_IDX_2; << 113 } << 114 #endif << 115 126 116 return __pgprot(prot); !! 127 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); 117 } 128 } 118 EXPORT_SYMBOL(vm_get_page_prot); !! 129 EXPORT_SYMBOL_GPL(__virt_addr_valid); 119 130
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.