~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/mm/pageattr.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (C) 2019 SiFive
  4  */
  5 
  6 #include <linux/pagewalk.h>
  7 #include <linux/pgtable.h>
  8 #include <linux/vmalloc.h>
  9 #include <asm/tlbflush.h>
 10 #include <asm/bitops.h>
 11 #include <asm/set_memory.h>
 12 
 13 struct pageattr_masks {
 14         pgprot_t set_mask;
 15         pgprot_t clear_mask;
 16 };
 17 
 18 static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
 19 {
 20         struct pageattr_masks *masks = walk->private;
 21         unsigned long new_val = val;
 22 
 23         new_val &= ~(pgprot_val(masks->clear_mask));
 24         new_val |= (pgprot_val(masks->set_mask));
 25 
 26         return new_val;
 27 }
 28 
 29 static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
 30                               unsigned long next, struct mm_walk *walk)
 31 {
 32         p4d_t val = p4dp_get(p4d);
 33 
 34         if (p4d_leaf(val)) {
 35                 val = __p4d(set_pageattr_masks(p4d_val(val), walk));
 36                 set_p4d(p4d, val);
 37         }
 38 
 39         return 0;
 40 }
 41 
 42 static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
 43                               unsigned long next, struct mm_walk *walk)
 44 {
 45         pud_t val = pudp_get(pud);
 46 
 47         if (pud_leaf(val)) {
 48                 val = __pud(set_pageattr_masks(pud_val(val), walk));
 49                 set_pud(pud, val);
 50         }
 51 
 52         return 0;
 53 }
 54 
 55 static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
 56                               unsigned long next, struct mm_walk *walk)
 57 {
 58         pmd_t val = pmdp_get(pmd);
 59 
 60         if (pmd_leaf(val)) {
 61                 val = __pmd(set_pageattr_masks(pmd_val(val), walk));
 62                 set_pmd(pmd, val);
 63         }
 64 
 65         return 0;
 66 }
 67 
 68 static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
 69                               unsigned long next, struct mm_walk *walk)
 70 {
 71         pte_t val = ptep_get(pte);
 72 
 73         val = __pte(set_pageattr_masks(pte_val(val), walk));
 74         set_pte(pte, val);
 75 
 76         return 0;
 77 }
 78 
 79 static int pageattr_pte_hole(unsigned long addr, unsigned long next,
 80                              int depth, struct mm_walk *walk)
 81 {
 82         /* Nothing to do here */
 83         return 0;
 84 }
 85 
 86 static const struct mm_walk_ops pageattr_ops = {
 87         .p4d_entry = pageattr_p4d_entry,
 88         .pud_entry = pageattr_pud_entry,
 89         .pmd_entry = pageattr_pmd_entry,
 90         .pte_entry = pageattr_pte_entry,
 91         .pte_hole = pageattr_pte_hole,
 92         .walk_lock = PGWALK_RDLOCK,
 93 };
 94 
 95 #ifdef CONFIG_64BIT
 96 static int __split_linear_mapping_pmd(pud_t *pudp,
 97                                       unsigned long vaddr, unsigned long end)
 98 {
 99         pmd_t *pmdp;
100         unsigned long next;
101 
102         pmdp = pmd_offset(pudp, vaddr);
103 
104         do {
105                 next = pmd_addr_end(vaddr, end);
106 
107                 if (next - vaddr >= PMD_SIZE &&
108                     vaddr <= (vaddr & PMD_MASK) && end >= next)
109                         continue;
110 
111                 if (pmd_leaf(pmdp_get(pmdp))) {
112                         struct page *pte_page;
113                         unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
114                         pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
115                         pte_t *ptep_new;
116                         int i;
117 
118                         pte_page = alloc_page(GFP_KERNEL);
119                         if (!pte_page)
120                                 return -ENOMEM;
121 
122                         ptep_new = (pte_t *)page_address(pte_page);
123                         for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
124                                 set_pte(ptep_new, pfn_pte(pfn + i, prot));
125 
126                         smp_wmb();
127 
128                         set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
129                 }
130         } while (pmdp++, vaddr = next, vaddr != end);
131 
132         return 0;
133 }
134 
135 static int __split_linear_mapping_pud(p4d_t *p4dp,
136                                       unsigned long vaddr, unsigned long end)
137 {
138         pud_t *pudp;
139         unsigned long next;
140         int ret;
141 
142         pudp = pud_offset(p4dp, vaddr);
143 
144         do {
145                 next = pud_addr_end(vaddr, end);
146 
147                 if (next - vaddr >= PUD_SIZE &&
148                     vaddr <= (vaddr & PUD_MASK) && end >= next)
149                         continue;
150 
151                 if (pud_leaf(pudp_get(pudp))) {
152                         struct page *pmd_page;
153                         unsigned long pfn = _pud_pfn(pudp_get(pudp));
154                         pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
155                         pmd_t *pmdp_new;
156                         int i;
157 
158                         pmd_page = alloc_page(GFP_KERNEL);
159                         if (!pmd_page)
160                                 return -ENOMEM;
161 
162                         pmdp_new = (pmd_t *)page_address(pmd_page);
163                         for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
164                                 set_pmd(pmdp_new,
165                                         pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
166 
167                         smp_wmb();
168 
169                         set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
170                 }
171 
172                 ret = __split_linear_mapping_pmd(pudp, vaddr, next);
173                 if (ret)
174                         return ret;
175         } while (pudp++, vaddr = next, vaddr != end);
176 
177         return 0;
178 }
179 
180 static int __split_linear_mapping_p4d(pgd_t *pgdp,
181                                       unsigned long vaddr, unsigned long end)
182 {
183         p4d_t *p4dp;
184         unsigned long next;
185         int ret;
186 
187         p4dp = p4d_offset(pgdp, vaddr);
188 
189         do {
190                 next = p4d_addr_end(vaddr, end);
191 
192                 /*
193                  * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
194                  * need to split, we'll change the protections on the whole P4D.
195                  */
196                 if (next - vaddr >= P4D_SIZE &&
197                     vaddr <= (vaddr & P4D_MASK) && end >= next)
198                         continue;
199 
200                 if (p4d_leaf(p4dp_get(p4dp))) {
201                         struct page *pud_page;
202                         unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
203                         pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
204                         pud_t *pudp_new;
205                         int i;
206 
207                         pud_page = alloc_page(GFP_KERNEL);
208                         if (!pud_page)
209                                 return -ENOMEM;
210 
211                         /*
212                          * Fill the pud level with leaf puds that have the same
213                          * protections as the leaf p4d.
214                          */
215                         pudp_new = (pud_t *)page_address(pud_page);
216                         for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
217                                 set_pud(pudp_new,
218                                         pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
219 
220                         /*
221                          * Make sure the pud filling is not reordered with the
222                          * p4d store which could result in seeing a partially
223                          * filled pud level.
224                          */
225                         smp_wmb();
226 
227                         set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
228                 }
229 
230                 ret = __split_linear_mapping_pud(p4dp, vaddr, next);
231                 if (ret)
232                         return ret;
233         } while (p4dp++, vaddr = next, vaddr != end);
234 
235         return 0;
236 }
237 
238 static int __split_linear_mapping_pgd(pgd_t *pgdp,
239                                       unsigned long vaddr,
240                                       unsigned long end)
241 {
242         unsigned long next;
243         int ret;
244 
245         do {
246                 next = pgd_addr_end(vaddr, end);
247                 /* We never use PGD mappings for the linear mapping */
248                 ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
249                 if (ret)
250                         return ret;
251         } while (pgdp++, vaddr = next, vaddr != end);
252 
253         return 0;
254 }
255 
256 static int split_linear_mapping(unsigned long start, unsigned long end)
257 {
258         return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
259 }
260 #endif  /* CONFIG_64BIT */
261 
262 static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
263                         pgprot_t clear_mask)
264 {
265         int ret;
266         unsigned long start = addr;
267         unsigned long end = start + PAGE_SIZE * numpages;
268         unsigned long __maybe_unused lm_start;
269         unsigned long __maybe_unused lm_end;
270         struct pageattr_masks masks = {
271                 .set_mask = set_mask,
272                 .clear_mask = clear_mask
273         };
274 
275         if (!numpages)
276                 return 0;
277 
278         mmap_write_lock(&init_mm);
279 
280 #ifdef CONFIG_64BIT
281         /*
282          * We are about to change the permissions of a kernel mapping, we must
283          * apply the same changes to its linear mapping alias, which may imply
284          * splitting a huge mapping.
285          */
286 
287         if (is_vmalloc_or_module_addr((void *)start)) {
288                 struct vm_struct *area = NULL;
289                 int i, page_start;
290 
291                 area = find_vm_area((void *)start);
292                 page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
293 
294                 for (i = page_start; i < page_start + numpages; ++i) {
295                         lm_start = (unsigned long)page_address(area->pages[i]);
296                         lm_end = lm_start + PAGE_SIZE;
297 
298                         ret = split_linear_mapping(lm_start, lm_end);
299                         if (ret)
300                                 goto unlock;
301 
302                         ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
303                                                     &pageattr_ops, NULL, &masks);
304                         if (ret)
305                                 goto unlock;
306                 }
307         } else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
308                 if (is_kernel_mapping(start)) {
309                         lm_start = (unsigned long)lm_alias(start);
310                         lm_end = (unsigned long)lm_alias(end);
311                 } else {
312                         lm_start = start;
313                         lm_end = end;
314                 }
315 
316                 ret = split_linear_mapping(lm_start, lm_end);
317                 if (ret)
318                         goto unlock;
319 
320                 ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
321                                             &pageattr_ops, NULL, &masks);
322                 if (ret)
323                         goto unlock;
324         }
325 
326         ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
327                                      &masks);
328 
329 unlock:
330         mmap_write_unlock(&init_mm);
331 
332         /*
333          * We can't use flush_tlb_kernel_range() here as we may have split a
334          * hugepage that is larger than that, so let's flush everything.
335          */
336         flush_tlb_all();
337 #else
338         ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
339                                      &masks);
340 
341         mmap_write_unlock(&init_mm);
342 
343         flush_tlb_kernel_range(start, end);
344 #endif
345 
346         return ret;
347 }
348 
349 int set_memory_rw_nx(unsigned long addr, int numpages)
350 {
351         return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
352                             __pgprot(_PAGE_EXEC));
353 }
354 
355 int set_memory_ro(unsigned long addr, int numpages)
356 {
357         return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
358                             __pgprot(_PAGE_WRITE));
359 }
360 
361 int set_memory_rw(unsigned long addr, int numpages)
362 {
363         return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
364                             __pgprot(0));
365 }
366 
367 int set_memory_x(unsigned long addr, int numpages)
368 {
369         return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
370 }
371 
372 int set_memory_nx(unsigned long addr, int numpages)
373 {
374         return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
375 }
376 
377 int set_direct_map_invalid_noflush(struct page *page)
378 {
379         return __set_memory((unsigned long)page_address(page), 1,
380                             __pgprot(0), __pgprot(_PAGE_PRESENT));
381 }
382 
383 int set_direct_map_default_noflush(struct page *page)
384 {
385         return __set_memory((unsigned long)page_address(page), 1,
386                             PAGE_KERNEL, __pgprot(_PAGE_EXEC));
387 }
388 
389 #ifdef CONFIG_DEBUG_PAGEALLOC
390 static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data)
391 {
392         int enable = *(int *)data;
393 
394         unsigned long val = pte_val(ptep_get(pte));
395 
396         if (enable)
397                 val |= _PAGE_PRESENT;
398         else
399                 val &= ~_PAGE_PRESENT;
400 
401         set_pte(pte, __pte(val));
402 
403         return 0;
404 }
405 
406 void __kernel_map_pages(struct page *page, int numpages, int enable)
407 {
408         if (!debug_pagealloc_enabled())
409                 return;
410 
411         unsigned long start = (unsigned long)page_address(page);
412         unsigned long size = PAGE_SIZE * numpages;
413 
414         apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable);
415 
416         flush_tlb_kernel_range(start, start + size);
417 }
418 #endif
419 
420 bool kernel_page_present(struct page *page)
421 {
422         unsigned long addr = (unsigned long)page_address(page);
423         pgd_t *pgd;
424         pud_t *pud;
425         p4d_t *p4d;
426         pmd_t *pmd;
427         pte_t *pte;
428 
429         pgd = pgd_offset_k(addr);
430         if (!pgd_present(pgdp_get(pgd)))
431                 return false;
432         if (pgd_leaf(pgdp_get(pgd)))
433                 return true;
434 
435         p4d = p4d_offset(pgd, addr);
436         if (!p4d_present(p4dp_get(p4d)))
437                 return false;
438         if (p4d_leaf(p4dp_get(p4d)))
439                 return true;
440 
441         pud = pud_offset(p4d, addr);
442         if (!pud_present(pudp_get(pud)))
443                 return false;
444         if (pud_leaf(pudp_get(pud)))
445                 return true;
446 
447         pmd = pmd_offset(pud, addr);
448         if (!pmd_present(pmdp_get(pmd)))
449                 return false;
450         if (pmd_leaf(pmdp_get(pmd)))
451                 return true;
452 
453         pte = pte_offset_kernel(pmd, addr);
454         return pte_present(ptep_get(pte));
455 }
456 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php