~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/mm/pti.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright(c) 2017 Intel Corporation. All rights reserved.
  4  *
  5  * This code is based in part on work published here:
  6  *
  7  *      https://github.com/IAIK/KAISER
  8  *
  9  * The original work was written by and signed off by for the Linux
 10  * kernel by:
 11  *
 12  *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
 13  *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
 14  *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
 15  *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
 16  *
 17  * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
 18  * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
 19  *                     Andy Lutomirsky <luto@amacapital.net>
 20  */
 21 #include <linux/kernel.h>
 22 #include <linux/errno.h>
 23 #include <linux/string.h>
 24 #include <linux/types.h>
 25 #include <linux/bug.h>
 26 #include <linux/init.h>
 27 #include <linux/spinlock.h>
 28 #include <linux/mm.h>
 29 #include <linux/uaccess.h>
 30 #include <linux/cpu.h>
 31 
 32 #include <asm/cpufeature.h>
 33 #include <asm/hypervisor.h>
 34 #include <asm/vsyscall.h>
 35 #include <asm/cmdline.h>
 36 #include <asm/pti.h>
 37 #include <asm/tlbflush.h>
 38 #include <asm/desc.h>
 39 #include <asm/sections.h>
 40 #include <asm/set_memory.h>
 41 
 42 #undef pr_fmt
 43 #define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
 44 
 45 /* Backporting helper */
 46 #ifndef __GFP_NOTRACK
 47 #define __GFP_NOTRACK   0
 48 #endif
 49 
 50 /*
 51  * Define the page-table levels we clone for user-space on 32
 52  * and 64 bit.
 53  */
 54 #ifdef CONFIG_X86_64
 55 #define PTI_LEVEL_KERNEL_IMAGE  PTI_CLONE_PMD
 56 #else
 57 #define PTI_LEVEL_KERNEL_IMAGE  PTI_CLONE_PTE
 58 #endif
 59 
 60 static void __init pti_print_if_insecure(const char *reason)
 61 {
 62         if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 63                 pr_info("%s\n", reason);
 64 }
 65 
 66 static void __init pti_print_if_secure(const char *reason)
 67 {
 68         if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 69                 pr_info("%s\n", reason);
 70 }
 71 
 72 /* Assume mode is auto unless overridden via cmdline below. */
 73 static enum pti_mode {
 74         PTI_AUTO = 0,
 75         PTI_FORCE_OFF,
 76         PTI_FORCE_ON
 77 } pti_mode;
 78 
 79 void __init pti_check_boottime_disable(void)
 80 {
 81         if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
 82                 pti_mode = PTI_FORCE_OFF;
 83                 pti_print_if_insecure("disabled on XEN PV.");
 84                 return;
 85         }
 86 
 87         if (cpu_mitigations_off())
 88                 pti_mode = PTI_FORCE_OFF;
 89         if (pti_mode == PTI_FORCE_OFF) {
 90                 pti_print_if_insecure("disabled on command line.");
 91                 return;
 92         }
 93 
 94         if (pti_mode == PTI_FORCE_ON)
 95                 pti_print_if_secure("force enabled on command line.");
 96 
 97         if (pti_mode == PTI_AUTO && !boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 98                 return;
 99 
100         setup_force_cpu_cap(X86_FEATURE_PTI);
101 }
102 
103 static int __init pti_parse_cmdline(char *arg)
104 {
105         if (!strcmp(arg, "off"))
106                 pti_mode = PTI_FORCE_OFF;
107         else if (!strcmp(arg, "on"))
108                 pti_mode = PTI_FORCE_ON;
109         else if (!strcmp(arg, "auto"))
110                 pti_mode = PTI_AUTO;
111         else
112                 return -EINVAL;
113         return 0;
114 }
115 early_param("pti", pti_parse_cmdline);
116 
117 static int __init pti_parse_cmdline_nopti(char *arg)
118 {
119         pti_mode = PTI_FORCE_OFF;
120         return 0;
121 }
122 early_param("nopti", pti_parse_cmdline_nopti);
123 
124 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
125 {
126         /*
127          * Changes to the high (kernel) portion of the kernelmode page
128          * tables are not automatically propagated to the usermode tables.
129          *
130          * Users should keep in mind that, unlike the kernelmode tables,
131          * there is no vmalloc_fault equivalent for the usermode tables.
132          * Top-level entries added to init_mm's usermode pgd after boot
133          * will not be automatically propagated to other mms.
134          */
135         if (!pgdp_maps_userspace(pgdp))
136                 return pgd;
137 
138         /*
139          * The user page tables get the full PGD, accessible from
140          * userspace:
141          */
142         kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
143 
144         /*
145          * If this is normal user memory, make it NX in the kernel
146          * pagetables so that, if we somehow screw up and return to
147          * usermode with the kernel CR3 loaded, we'll get a page fault
148          * instead of allowing user code to execute with the wrong CR3.
149          *
150          * As exceptions, we don't set NX if:
151          *  - _PAGE_USER is not set.  This could be an executable
152          *     EFI runtime mapping or something similar, and the kernel
153          *     may execute from it
154          *  - we don't have NX support
155          *  - we're clearing the PGD (i.e. the new pgd is not present).
156          */
157         if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
158             (__supported_pte_mask & _PAGE_NX))
159                 pgd.pgd |= _PAGE_NX;
160 
161         /* return the copy of the PGD we want the kernel to use: */
162         return pgd;
163 }
164 
165 /*
166  * Walk the user copy of the page tables (optionally) trying to allocate
167  * page table pages on the way down.
168  *
169  * Returns a pointer to a P4D on success, or NULL on failure.
170  */
171 static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
172 {
173         pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
174         gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
175 
176         if (address < PAGE_OFFSET) {
177                 WARN_ONCE(1, "attempt to walk user address\n");
178                 return NULL;
179         }
180 
181         if (pgd_none(*pgd)) {
182                 unsigned long new_p4d_page = __get_free_page(gfp);
183                 if (WARN_ON_ONCE(!new_p4d_page))
184                         return NULL;
185 
186                 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
187         }
188         BUILD_BUG_ON(pgd_leaf(*pgd) != 0);
189 
190         return p4d_offset(pgd, address);
191 }
192 
193 /*
194  * Walk the user copy of the page tables (optionally) trying to allocate
195  * page table pages on the way down.
196  *
197  * Returns a pointer to a PMD on success, or NULL on failure.
198  */
199 static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
200 {
201         gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
202         p4d_t *p4d;
203         pud_t *pud;
204 
205         p4d = pti_user_pagetable_walk_p4d(address);
206         if (!p4d)
207                 return NULL;
208 
209         BUILD_BUG_ON(p4d_leaf(*p4d) != 0);
210         if (p4d_none(*p4d)) {
211                 unsigned long new_pud_page = __get_free_page(gfp);
212                 if (WARN_ON_ONCE(!new_pud_page))
213                         return NULL;
214 
215                 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
216         }
217 
218         pud = pud_offset(p4d, address);
219         /* The user page tables do not use large mappings: */
220         if (pud_leaf(*pud)) {
221                 WARN_ON(1);
222                 return NULL;
223         }
224         if (pud_none(*pud)) {
225                 unsigned long new_pmd_page = __get_free_page(gfp);
226                 if (WARN_ON_ONCE(!new_pmd_page))
227                         return NULL;
228 
229                 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
230         }
231 
232         return pmd_offset(pud, address);
233 }
234 
235 /*
236  * Walk the shadow copy of the page tables (optionally) trying to allocate
237  * page table pages on the way down.  Does not support large pages.
238  *
239  * Note: this is only used when mapping *new* kernel data into the
240  * user/shadow page tables.  It is never used for userspace data.
241  *
242  * Returns a pointer to a PTE on success, or NULL on failure.
243  */
244 static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
245 {
246         gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
247         pmd_t *pmd;
248         pte_t *pte;
249 
250         pmd = pti_user_pagetable_walk_pmd(address);
251         if (!pmd)
252                 return NULL;
253 
254         /* Large PMD mapping found */
255         if (pmd_leaf(*pmd)) {
256                 /* Clear the PMD if we hit a large mapping from the first round */
257                 if (late_text) {
258                         set_pmd(pmd, __pmd(0));
259                 } else {
260                         WARN_ON_ONCE(1);
261                         return NULL;
262                 }
263         }
264 
265         if (pmd_none(*pmd)) {
266                 unsigned long new_pte_page = __get_free_page(gfp);
267                 if (!new_pte_page)
268                         return NULL;
269 
270                 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
271         }
272 
273         pte = pte_offset_kernel(pmd, address);
274         if (pte_flags(*pte) & _PAGE_USER) {
275                 WARN_ONCE(1, "attempt to walk to user pte\n");
276                 return NULL;
277         }
278         return pte;
279 }
280 
281 #ifdef CONFIG_X86_VSYSCALL_EMULATION
282 static void __init pti_setup_vsyscall(void)
283 {
284         pte_t *pte, *target_pte;
285         unsigned int level;
286 
287         pte = lookup_address(VSYSCALL_ADDR, &level);
288         if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
289                 return;
290 
291         target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
292         if (WARN_ON(!target_pte))
293                 return;
294 
295         *target_pte = *pte;
296         set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
297 }
298 #else
299 static void __init pti_setup_vsyscall(void) { }
300 #endif
301 
302 enum pti_clone_level {
303         PTI_CLONE_PMD,
304         PTI_CLONE_PTE,
305 };
306 
307 static void
308 pti_clone_pgtable(unsigned long start, unsigned long end,
309                   enum pti_clone_level level, bool late_text)
310 {
311         unsigned long addr;
312 
313         /*
314          * Clone the populated PMDs which cover start to end. These PMD areas
315          * can have holes.
316          */
317         for (addr = start; addr < end;) {
318                 pte_t *pte, *target_pte;
319                 pmd_t *pmd, *target_pmd;
320                 pgd_t *pgd;
321                 p4d_t *p4d;
322                 pud_t *pud;
323 
324                 /* Overflow check */
325                 if (addr < start)
326                         break;
327 
328                 pgd = pgd_offset_k(addr);
329                 if (WARN_ON(pgd_none(*pgd)))
330                         return;
331                 p4d = p4d_offset(pgd, addr);
332                 if (WARN_ON(p4d_none(*p4d)))
333                         return;
334 
335                 pud = pud_offset(p4d, addr);
336                 if (pud_none(*pud)) {
337                         WARN_ON_ONCE(addr & ~PUD_MASK);
338                         addr = round_up(addr + 1, PUD_SIZE);
339                         continue;
340                 }
341 
342                 pmd = pmd_offset(pud, addr);
343                 if (pmd_none(*pmd)) {
344                         WARN_ON_ONCE(addr & ~PMD_MASK);
345                         addr = round_up(addr + 1, PMD_SIZE);
346                         continue;
347                 }
348 
349                 if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {
350                         target_pmd = pti_user_pagetable_walk_pmd(addr);
351                         if (WARN_ON(!target_pmd))
352                                 return;
353 
354                         /*
355                          * Only clone present PMDs.  This ensures only setting
356                          * _PAGE_GLOBAL on present PMDs.  This should only be
357                          * called on well-known addresses anyway, so a non-
358                          * present PMD would be a surprise.
359                          */
360                         if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
361                                 return;
362 
363                         /*
364                          * Setting 'target_pmd' below creates a mapping in both
365                          * the user and kernel page tables.  It is effectively
366                          * global, so set it as global in both copies.  Note:
367                          * the X86_FEATURE_PGE check is not _required_ because
368                          * the CPU ignores _PAGE_GLOBAL when PGE is not
369                          * supported.  The check keeps consistency with
370                          * code that only set this bit when supported.
371                          */
372                         if (boot_cpu_has(X86_FEATURE_PGE))
373                                 *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
374 
375                         /*
376                          * Copy the PMD.  That is, the kernelmode and usermode
377                          * tables will share the last-level page tables of this
378                          * address range
379                          */
380                         *target_pmd = *pmd;
381 
382                         addr = round_up(addr + 1, PMD_SIZE);
383 
384                 } else if (level == PTI_CLONE_PTE) {
385 
386                         /* Walk the page-table down to the pte level */
387                         pte = pte_offset_kernel(pmd, addr);
388                         if (pte_none(*pte)) {
389                                 addr = round_up(addr + 1, PAGE_SIZE);
390                                 continue;
391                         }
392 
393                         /* Only clone present PTEs */
394                         if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
395                                 return;
396 
397                         /* Allocate PTE in the user page-table */
398                         target_pte = pti_user_pagetable_walk_pte(addr, late_text);
399                         if (WARN_ON(!target_pte))
400                                 return;
401 
402                         /* Set GLOBAL bit in both PTEs */
403                         if (boot_cpu_has(X86_FEATURE_PGE))
404                                 *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
405 
406                         /* Clone the PTE */
407                         *target_pte = *pte;
408 
409                         addr = round_up(addr + 1, PAGE_SIZE);
410 
411                 } else {
412                         BUG();
413                 }
414         }
415 }
416 
417 #ifdef CONFIG_X86_64
418 /*
419  * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
420  * next-level entry on 5-level systems.
421  */
422 static void __init pti_clone_p4d(unsigned long addr)
423 {
424         p4d_t *kernel_p4d, *user_p4d;
425         pgd_t *kernel_pgd;
426 
427         user_p4d = pti_user_pagetable_walk_p4d(addr);
428         if (!user_p4d)
429                 return;
430 
431         kernel_pgd = pgd_offset_k(addr);
432         kernel_p4d = p4d_offset(kernel_pgd, addr);
433         *user_p4d = *kernel_p4d;
434 }
435 
436 /*
437  * Clone the CPU_ENTRY_AREA and associated data into the user space visible
438  * page table.
439  */
440 static void __init pti_clone_user_shared(void)
441 {
442         unsigned int cpu;
443 
444         pti_clone_p4d(CPU_ENTRY_AREA_BASE);
445 
446         for_each_possible_cpu(cpu) {
447                 /*
448                  * The SYSCALL64 entry code needs one word of scratch space
449                  * in which to spill a register.  It lives in the sp2 slot
450                  * of the CPU's TSS.
451                  *
452                  * This is done for all possible CPUs during boot to ensure
453                  * that it's propagated to all mms.
454                  */
455 
456                 unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
457                 phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
458                 pte_t *target_pte;
459 
460                 target_pte = pti_user_pagetable_walk_pte(va, false);
461                 if (WARN_ON(!target_pte))
462                         return;
463 
464                 *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
465         }
466 }
467 
468 #else /* CONFIG_X86_64 */
469 
470 /*
471  * On 32 bit PAE systems with 1GB of Kernel address space there is only
472  * one pgd/p4d for the whole kernel. Cloning that would map the whole
473  * address space into the user page-tables, making PTI useless. So clone
474  * the page-table on the PMD level to prevent that.
475  */
476 static void __init pti_clone_user_shared(void)
477 {
478         unsigned long start, end;
479 
480         start = CPU_ENTRY_AREA_BASE;
481         end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
482 
483         pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
484 }
485 #endif /* CONFIG_X86_64 */
486 
487 /*
488  * Clone the ESPFIX P4D into the user space visible page table
489  */
490 static void __init pti_setup_espfix64(void)
491 {
492 #ifdef CONFIG_X86_ESPFIX64
493         pti_clone_p4d(ESPFIX_BASE_ADDR);
494 #endif
495 }
496 
497 /*
498  * Clone the populated PMDs of the entry text and force it RO.
499  */
500 static void pti_clone_entry_text(bool late)
501 {
502         pti_clone_pgtable((unsigned long) __entry_text_start,
503                           (unsigned long) __entry_text_end,
504                           PTI_LEVEL_KERNEL_IMAGE, late);
505 }
506 
507 /*
508  * Global pages and PCIDs are both ways to make kernel TLB entries
509  * live longer, reduce TLB misses and improve kernel performance.
510  * But, leaving all kernel text Global makes it potentially accessible
511  * to Meltdown-style attacks which make it trivial to find gadgets or
512  * defeat KASLR.
513  *
514  * Only use global pages when it is really worth it.
515  */
516 static inline bool pti_kernel_image_global_ok(void)
517 {
518         /*
519          * Systems with PCIDs get little benefit from global
520          * kernel text and are not worth the downsides.
521          */
522         if (cpu_feature_enabled(X86_FEATURE_PCID))
523                 return false;
524 
525         /*
526          * Only do global kernel image for pti=auto.  Do the most
527          * secure thing (not global) if pti=on specified.
528          */
529         if (pti_mode != PTI_AUTO)
530                 return false;
531 
532         /*
533          * K8 may not tolerate the cleared _PAGE_RW on the userspace
534          * global kernel image pages.  Do the safe thing (disable
535          * global kernel image).  This is unlikely to ever be
536          * noticed because PTI is disabled by default on AMD CPUs.
537          */
538         if (boot_cpu_has(X86_FEATURE_K8))
539                 return false;
540 
541         /*
542          * RANDSTRUCT derives its hardening benefits from the
543          * attacker's lack of knowledge about the layout of kernel
544          * data structures.  Keep the kernel image non-global in
545          * cases where RANDSTRUCT is in use to help keep the layout a
546          * secret.
547          */
548         if (IS_ENABLED(CONFIG_RANDSTRUCT))
549                 return false;
550 
551         return true;
552 }
553 
554 /*
555  * For some configurations, map all of kernel text into the user page
556  * tables.  This reduces TLB misses, especially on non-PCID systems.
557  */
558 static void pti_clone_kernel_text(void)
559 {
560         /*
561          * rodata is part of the kernel image and is normally
562          * readable on the filesystem or on the web.  But, do not
563          * clone the areas past rodata, they might contain secrets.
564          */
565         unsigned long start = PFN_ALIGN(_text);
566         unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
567         unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
568 
569         if (!pti_kernel_image_global_ok())
570                 return;
571 
572         pr_debug("mapping partial kernel image into user address space\n");
573 
574         /*
575          * Note that this will undo _some_ of the work that
576          * pti_set_kernel_image_nonglobal() did to clear the
577          * global bit.
578          */
579         pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
580 
581         /*
582          * pti_clone_pgtable() will set the global bit in any PMDs
583          * that it clones, but we also need to get any PTEs in
584          * the last level for areas that are not huge-page-aligned.
585          */
586 
587         /* Set the global bit for normal non-__init kernel text: */
588         set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
589 }
590 
591 static void pti_set_kernel_image_nonglobal(void)
592 {
593         /*
594          * The identity map is created with PMDs, regardless of the
595          * actual length of the kernel.  We need to clear
596          * _PAGE_GLOBAL up to a PMD boundary, not just to the end
597          * of the image.
598          */
599         unsigned long start = PFN_ALIGN(_text);
600         unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
601 
602         /*
603          * This clears _PAGE_GLOBAL from the entire kernel image.
604          * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
605          * areas that are mapped to userspace.
606          */
607         set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
608 }
609 
610 /*
611  * Initialize kernel page table isolation
612  */
613 void __init pti_init(void)
614 {
615         if (!boot_cpu_has(X86_FEATURE_PTI))
616                 return;
617 
618         pr_info("enabled\n");
619 
620 #ifdef CONFIG_X86_32
621         /*
622          * We check for X86_FEATURE_PCID here. But the init-code will
623          * clear the feature flag on 32 bit because the feature is not
624          * supported on 32 bit anyway. To print the warning we need to
625          * check with cpuid directly again.
626          */
627         if (cpuid_ecx(0x1) & BIT(17)) {
628                 /* Use printk to work around pr_fmt() */
629                 printk(KERN_WARNING "\n");
630                 printk(KERN_WARNING "************************************************************\n");
631                 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
632                 printk(KERN_WARNING "**                                                        **\n");
633                 printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
634                 printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
635                 printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
636                 printk(KERN_WARNING "**                                                        **\n");
637                 printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
638                 printk(KERN_WARNING "************************************************************\n");
639         }
640 #endif
641 
642         pti_clone_user_shared();
643 
644         /* Undo all global bits from the init pagetables in head_64.S: */
645         pti_set_kernel_image_nonglobal();
646 
647         /* Replace some of the global bits just for shared entry text: */
648         /*
649          * This is very early in boot. Device and Late initcalls can do
650          * modprobe before free_initmem() and mark_readonly(). This
651          * pti_clone_entry_text() allows those user-mode-helpers to function,
652          * but notably the text is still RW.
653          */
654         pti_clone_entry_text(false);
655         pti_setup_espfix64();
656         pti_setup_vsyscall();
657 }
658 
659 /*
660  * Finalize the kernel mappings in the userspace page-table. Some of the
661  * mappings for the kernel image might have changed since pti_init()
662  * cloned them. This is because parts of the kernel image have been
663  * mapped RO and/or NX.  These changes need to be cloned again to the
664  * userspace page-table.
665  */
666 void pti_finalize(void)
667 {
668         if (!boot_cpu_has(X86_FEATURE_PTI))
669                 return;
670         /*
671          * This is after free_initmem() (all initcalls are done) and we've done
672          * mark_readonly(). Text is now NX which might've split some PMDs
673          * relative to the early clone.
674          */
675         pti_clone_entry_text(true);
676         pti_clone_kernel_text();
677 
678         debug_checkwx_user();
679 }
680 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php