1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright (c) 2021, Google LLC. 5 * Pasha Tatashin <pasha.tatashin@soleen.com> 6 */ 7 #include <linux/kstrtox.h> 8 #include <linux/mm.h> 9 #include <linux/page_table_check.h> 10 #include <linux/swap.h> 11 #include <linux/swapops.h> 12 13 #undef pr_fmt 14 #define pr_fmt(fmt) "page_table_check: " fmt 15 16 struct page_table_check { 17 atomic_t anon_map_count; 18 atomic_t file_map_count; 19 }; 20 21 static bool __page_table_check_enabled __initdata = 22 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED); 23 24 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled); 25 EXPORT_SYMBOL(page_table_check_disabled); 26 27 static int __init early_page_table_check_param(char *buf) 28 { 29 return kstrtobool(buf, &__page_table_check_enabled); 30 } 31 32 early_param("page_table_check", early_page_table_check_param); 33 34 static bool __init need_page_table_check(void) 35 { 36 return __page_table_check_enabled; 37 } 38 39 static void __init init_page_table_check(void) 40 { 41 if (!__page_table_check_enabled) 42 return; 43 static_branch_disable(&page_table_check_disabled); 44 } 45 46 struct page_ext_operations page_table_check_ops = { 47 .size = sizeof(struct page_table_check), 48 .need = need_page_table_check, 49 .init = init_page_table_check, 50 .need_shared_flags = false, 51 }; 52 53 static struct page_table_check *get_page_table_check(struct page_ext *page_ext) 54 { 55 BUG_ON(!page_ext); 56 return page_ext_data(page_ext, &page_table_check_ops); 57 } 58 59 /* 60 * An entry is removed from the page table, decrement the counters for that page 61 * verify that it is of correct type and counters do not become negative. 62 */ 63 static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt) 64 { 65 struct page_ext *page_ext; 66 struct page *page; 67 unsigned long i; 68 bool anon; 69 70 if (!pfn_valid(pfn)) 71 return; 72 73 page = pfn_to_page(pfn); 74 page_ext = page_ext_get(page); 75 76 if (!page_ext) 77 return; 78 79 BUG_ON(PageSlab(page)); 80 anon = PageAnon(page); 81 82 for (i = 0; i < pgcnt; i++) { 83 struct page_table_check *ptc = get_page_table_check(page_ext); 84 85 if (anon) { 86 BUG_ON(atomic_read(&ptc->file_map_count)); 87 BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0); 88 } else { 89 BUG_ON(atomic_read(&ptc->anon_map_count)); 90 BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0); 91 } 92 page_ext = page_ext_next(page_ext); 93 } 94 page_ext_put(page_ext); 95 } 96 97 /* 98 * A new entry is added to the page table, increment the counters for that page 99 * verify that it is of correct type and is not being mapped with a different 100 * type to a different process. 101 */ 102 static void page_table_check_set(unsigned long pfn, unsigned long pgcnt, 103 bool rw) 104 { 105 struct page_ext *page_ext; 106 struct page *page; 107 unsigned long i; 108 bool anon; 109 110 if (!pfn_valid(pfn)) 111 return; 112 113 page = pfn_to_page(pfn); 114 page_ext = page_ext_get(page); 115 116 if (!page_ext) 117 return; 118 119 BUG_ON(PageSlab(page)); 120 anon = PageAnon(page); 121 122 for (i = 0; i < pgcnt; i++) { 123 struct page_table_check *ptc = get_page_table_check(page_ext); 124 125 if (anon) { 126 BUG_ON(atomic_read(&ptc->file_map_count)); 127 BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw); 128 } else { 129 BUG_ON(atomic_read(&ptc->anon_map_count)); 130 BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0); 131 } 132 page_ext = page_ext_next(page_ext); 133 } 134 page_ext_put(page_ext); 135 } 136 137 /* 138 * page is on free list, or is being allocated, verify that counters are zeroes 139 * crash if they are not. 140 */ 141 void __page_table_check_zero(struct page *page, unsigned int order) 142 { 143 struct page_ext *page_ext; 144 unsigned long i; 145 146 BUG_ON(PageSlab(page)); 147 148 page_ext = page_ext_get(page); 149 150 if (!page_ext) 151 return; 152 153 for (i = 0; i < (1ul << order); i++) { 154 struct page_table_check *ptc = get_page_table_check(page_ext); 155 156 BUG_ON(atomic_read(&ptc->anon_map_count)); 157 BUG_ON(atomic_read(&ptc->file_map_count)); 158 page_ext = page_ext_next(page_ext); 159 } 160 page_ext_put(page_ext); 161 } 162 163 void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) 164 { 165 if (&init_mm == mm) 166 return; 167 168 if (pte_user_accessible_page(pte)) { 169 page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT); 170 } 171 } 172 EXPORT_SYMBOL(__page_table_check_pte_clear); 173 174 void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd) 175 { 176 if (&init_mm == mm) 177 return; 178 179 if (pmd_user_accessible_page(pmd)) { 180 page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT); 181 } 182 } 183 EXPORT_SYMBOL(__page_table_check_pmd_clear); 184 185 void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud) 186 { 187 if (&init_mm == mm) 188 return; 189 190 if (pud_user_accessible_page(pud)) { 191 page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT); 192 } 193 } 194 EXPORT_SYMBOL(__page_table_check_pud_clear); 195 196 /* Whether the swap entry cached writable information */ 197 static inline bool swap_cached_writable(swp_entry_t entry) 198 { 199 return is_writable_device_exclusive_entry(entry) || 200 is_writable_device_private_entry(entry) || 201 is_writable_migration_entry(entry); 202 } 203 204 static inline void page_table_check_pte_flags(pte_t pte) 205 { 206 if (pte_present(pte) && pte_uffd_wp(pte)) 207 WARN_ON_ONCE(pte_write(pte)); 208 else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte)) 209 WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte))); 210 } 211 212 void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, 213 unsigned int nr) 214 { 215 unsigned int i; 216 217 if (&init_mm == mm) 218 return; 219 220 page_table_check_pte_flags(pte); 221 222 for (i = 0; i < nr; i++) 223 __page_table_check_pte_clear(mm, ptep_get(ptep + i)); 224 if (pte_user_accessible_page(pte)) 225 page_table_check_set(pte_pfn(pte), nr, pte_write(pte)); 226 } 227 EXPORT_SYMBOL(__page_table_check_ptes_set); 228 229 static inline void page_table_check_pmd_flags(pmd_t pmd) 230 { 231 if (pmd_present(pmd) && pmd_uffd_wp(pmd)) 232 WARN_ON_ONCE(pmd_write(pmd)); 233 else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd)) 234 WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd))); 235 } 236 237 void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd) 238 { 239 if (&init_mm == mm) 240 return; 241 242 page_table_check_pmd_flags(pmd); 243 244 __page_table_check_pmd_clear(mm, *pmdp); 245 if (pmd_user_accessible_page(pmd)) { 246 page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT, 247 pmd_write(pmd)); 248 } 249 } 250 EXPORT_SYMBOL(__page_table_check_pmd_set); 251 252 void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud) 253 { 254 if (&init_mm == mm) 255 return; 256 257 __page_table_check_pud_clear(mm, *pudp); 258 if (pud_user_accessible_page(pud)) { 259 page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT, 260 pud_write(pud)); 261 } 262 } 263 EXPORT_SYMBOL(__page_table_check_pud_set); 264 265 void __page_table_check_pte_clear_range(struct mm_struct *mm, 266 unsigned long addr, 267 pmd_t pmd) 268 { 269 if (&init_mm == mm) 270 return; 271 272 if (!pmd_bad(pmd) && !pmd_leaf(pmd)) { 273 pte_t *ptep = pte_offset_map(&pmd, addr); 274 unsigned long i; 275 276 if (WARN_ON(!ptep)) 277 return; 278 for (i = 0; i < PTRS_PER_PTE; i++) { 279 __page_table_check_pte_clear(mm, ptep_get(ptep)); 280 addr += PAGE_SIZE; 281 ptep++; 282 } 283 pte_unmap(ptep - PTRS_PER_PTE); 284 } 285 } 286
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.