~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/mm/kaslr.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * This file implements KASLR memory randomization for x86_64. It randomizes
  4  * the virtual address space of kernel memory regions (physical memory
  5  * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
  6  * exploits relying on predictable kernel addresses.
  7  *
  8  * Entropy is generated using the KASLR early boot functions now shared in
  9  * the lib directory (originally written by Kees Cook). Randomization is
 10  * done on PGD & P4D/PUD page table levels to increase possible addresses.
 11  * The physical memory mapping code was adapted to support P4D/PUD level
 12  * virtual addresses. This implementation on the best configuration provides
 13  * 30,000 possible virtual addresses in average for each memory region.
 14  * An additional low memory page is used to ensure each CPU can start with
 15  * a PGD aligned virtual address (for realmode).
 16  *
 17  * The order of each memory region is not changed. The feature looks at
 18  * the available space for the regions based on different configuration
 19  * options and randomizes the base and space between each. The size of the
 20  * physical memory mapping is the available physical memory.
 21  */
 22 
 23 #include <linux/kernel.h>
 24 #include <linux/init.h>
 25 #include <linux/random.h>
 26 #include <linux/memblock.h>
 27 #include <linux/pgtable.h>
 28 
 29 #include <asm/setup.h>
 30 #include <asm/kaslr.h>
 31 
 32 #include "mm_internal.h"
 33 
 34 #define TB_SHIFT 40
 35 
 36 /*
 37  * The end address could depend on more configuration options to make the
 38  * highest amount of space for randomization available, but that's too hard
 39  * to keep straight and caused issues already.
 40  */
 41 static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
 42 
 43 /*
 44  * Memory regions randomized by KASLR (except modules that use a separate logic
 45  * earlier during boot). The list is ordered based on virtual addresses. This
 46  * order is kept after randomization.
 47  */
 48 static __initdata struct kaslr_memory_region {
 49         unsigned long *base;
 50         unsigned long *end;
 51         unsigned long size_tb;
 52 } kaslr_regions[] = {
 53         {
 54                 .base   = &page_offset_base,
 55                 .end    = &physmem_end,
 56         },
 57         {
 58                 .base   = &vmalloc_base,
 59         },
 60         {
 61                 .base   = &vmemmap_base,
 62         },
 63 };
 64 
 65 /* The end of the possible address space for physical memory */
 66 unsigned long physmem_end __ro_after_init;
 67 
 68 /* Get size in bytes used by the memory region */
 69 static inline unsigned long get_padding(struct kaslr_memory_region *region)
 70 {
 71         return (region->size_tb << TB_SHIFT);
 72 }
 73 
 74 /* Initialize base and padding for each memory region randomized with KASLR */
 75 void __init kernel_randomize_memory(void)
 76 {
 77         size_t i;
 78         unsigned long vaddr_start, vaddr;
 79         unsigned long rand, memory_tb;
 80         struct rnd_state rand_state;
 81         unsigned long remain_entropy;
 82         unsigned long vmemmap_size;
 83 
 84         vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
 85         vaddr = vaddr_start;
 86 
 87         /*
 88          * These BUILD_BUG_ON checks ensure the memory layout is consistent
 89          * with the vaddr_start/vaddr_end variables. These checks are very
 90          * limited....
 91          */
 92         BUILD_BUG_ON(vaddr_start >= vaddr_end);
 93         BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
 94         BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
 95 
 96         /* Preset the end of the possible address space for physical memory */
 97         physmem_end = ((1ULL << MAX_PHYSMEM_BITS) - 1);
 98         if (!kaslr_memory_enabled())
 99                 return;
100 
101         kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
102         kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
103 
104         /*
105          * Update Physical memory mapping to available and
106          * add padding if needed (especially for memory hotplug support).
107          */
108         BUG_ON(kaslr_regions[0].base != &page_offset_base);
109         memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
110                 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
111 
112         /* Adapt physical memory region size based on available memory */
113         if (memory_tb < kaslr_regions[0].size_tb)
114                 kaslr_regions[0].size_tb = memory_tb;
115 
116         /*
117          * Calculate the vmemmap region size in TBs, aligned to a TB
118          * boundary.
119          */
120         vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
121                         sizeof(struct page);
122         kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
123 
124         /* Calculate entropy available between regions */
125         remain_entropy = vaddr_end - vaddr_start;
126         for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
127                 remain_entropy -= get_padding(&kaslr_regions[i]);
128 
129         prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
130 
131         for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
132                 unsigned long entropy;
133 
134                 /*
135                  * Select a random virtual address using the extra entropy
136                  * available.
137                  */
138                 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
139                 prandom_bytes_state(&rand_state, &rand, sizeof(rand));
140                 entropy = (rand % (entropy + 1)) & PUD_MASK;
141                 vaddr += entropy;
142                 *kaslr_regions[i].base = vaddr;
143 
144                 /* Calculate the end of the region */
145                 vaddr += get_padding(&kaslr_regions[i]);
146                 /*
147                  * KASLR trims the maximum possible size of the
148                  * direct-map. Update the physmem_end boundary.
149                  * No rounding required as the region starts
150                  * PUD aligned and size is in units of TB.
151                  */
152                 if (kaslr_regions[i].end)
153                         *kaslr_regions[i].end = __pa_nodebug(vaddr - 1);
154 
155                 /* Add a minimum padding based on randomization alignment. */
156                 vaddr = round_up(vaddr + 1, PUD_SIZE);
157                 remain_entropy -= entropy;
158         }
159 }
160 
161 void __meminit init_trampoline_kaslr(void)
162 {
163         pud_t *pud_page_tramp, *pud, *pud_tramp;
164         p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
165         unsigned long paddr, vaddr;
166         pgd_t *pgd;
167 
168         pud_page_tramp = alloc_low_page();
169 
170         /*
171          * There are two mappings for the low 1MB area, the direct mapping
172          * and the 1:1 mapping for the real mode trampoline:
173          *
174          * Direct mapping: virt_addr = phys_addr + PAGE_OFFSET
175          * 1:1 mapping:    virt_addr = phys_addr
176          */
177         paddr = 0;
178         vaddr = (unsigned long)__va(paddr);
179         pgd = pgd_offset_k(vaddr);
180 
181         p4d = p4d_offset(pgd, vaddr);
182         pud = pud_offset(p4d, vaddr);
183 
184         pud_tramp = pud_page_tramp + pud_index(paddr);
185         *pud_tramp = *pud;
186 
187         if (pgtable_l5_enabled()) {
188                 p4d_page_tramp = alloc_low_page();
189 
190                 p4d_tramp = p4d_page_tramp + p4d_index(paddr);
191 
192                 set_p4d(p4d_tramp,
193                         __p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
194 
195                 trampoline_pgd_entry =
196                         __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp));
197         } else {
198                 trampoline_pgd_entry =
199                         __pgd(_KERNPG_TABLE | __pa(pud_page_tramp));
200         }
201 }
202 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php