1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * MMU-generic set_memory implementation for powerpc 5 * 6 * Copyright 2019-2021, IBM Corporation. 7 */ 8 9 #include <linux/mm.h> 10 #include <linux/vmalloc.h> 11 #include <linux/set_memory.h> 12 13 #include <asm/mmu.h> 14 #include <asm/page.h> 15 #include <asm/pgtable.h> 16 17 #include <mm/mmu_decl.h> 18 19 static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr, 20 unsigned long old, unsigned long new) 21 { 22 return pte_update(&init_mm, addr, ptep, old & ~new, new & ~old, 0); 23 } 24 25 /* 26 * Updates the attributes of a page atomically. 27 * 28 * This sequence is safe against concurrent updates, and also allows updating the 29 * attributes of a page currently being executed or accessed. 30 */ 31 static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) 32 { 33 long action = (long)data; 34 35 addr &= PAGE_MASK; 36 /* modify the PTE bits as desired */ 37 switch (action) { 38 case SET_MEMORY_RO: 39 /* Don't clear DIRTY bit */ 40 pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO); 41 break; 42 case SET_MEMORY_ROX: 43 /* Don't clear DIRTY bit */ 44 pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_ROX); 45 break; 46 case SET_MEMORY_RW: 47 pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW); 48 break; 49 case SET_MEMORY_NX: 50 pte_update_delta(ptep, addr, _PAGE_KERNEL_ROX, _PAGE_KERNEL_RO); 51 break; 52 case SET_MEMORY_X: 53 pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_ROX); 54 break; 55 case SET_MEMORY_NP: 56 pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0); 57 break; 58 case SET_MEMORY_P: 59 pte_update(&init_mm, addr, ptep, 0, _PAGE_PRESENT, 0); 60 break; 61 default: 62 WARN_ON_ONCE(1); 63 break; 64 } 65 66 /* See ptesync comment in radix__set_pte_at() */ 67 if (radix_enabled()) 68 asm volatile("ptesync": : :"memory"); 69 70 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 71 72 return 0; 73 } 74 75 int change_memory_attr(unsigned long addr, int numpages, long action) 76 { 77 unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE); 78 unsigned long size = numpages * PAGE_SIZE; 79 80 if (!numpages) 81 return 0; 82 83 if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) && 84 is_vm_area_hugepages((void *)addr))) 85 return -EINVAL; 86 87 #ifdef CONFIG_PPC_BOOK3S_64 88 /* 89 * On hash, the linear mapping is not in the Linux page table so 90 * apply_to_existing_page_range() will have no effect. If in the future 91 * the set_memory_* functions are used on the linear map this will need 92 * to be updated. 93 */ 94 if (!radix_enabled()) { 95 int region = get_region_id(addr); 96 97 if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID)) 98 return -EINVAL; 99 } 100 #endif 101 102 return apply_to_existing_page_range(&init_mm, start, size, 103 change_page_attr, (void *)action); 104 } 105 106 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) 107 #ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC 108 void __kernel_map_pages(struct page *page, int numpages, int enable) 109 { 110 int err; 111 unsigned long addr = (unsigned long)page_address(page); 112 113 if (PageHighMem(page)) 114 return; 115 116 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled()) 117 err = hash__kernel_map_pages(page, numpages, enable); 118 else if (enable) 119 err = set_memory_p(addr, numpages); 120 else 121 err = set_memory_np(addr, numpages); 122 123 if (err) 124 panic("%s: changing memory protections failed\n", __func__); 125 } 126 #endif 127 #endif 128
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.