~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/boot/vmem.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/sched/task.h>
  3 #include <linux/pgtable.h>
  4 #include <linux/kasan.h>
  5 #include <asm/page-states.h>
  6 #include <asm/pgalloc.h>
  7 #include <asm/facility.h>
  8 #include <asm/sections.h>
  9 #include <asm/ctlreg.h>
 10 #include <asm/physmem_info.h>
 11 #include <asm/maccess.h>
 12 #include <asm/abs_lowcore.h>
 13 #include "decompressor.h"
 14 #include "boot.h"
 15 
 16 struct ctlreg __bootdata_preserved(s390_invalid_asce);
 17 
 18 #ifdef CONFIG_PROC_FS
 19 atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
 20 #endif
 21 
 22 #define init_mm                 (*(struct mm_struct *)vmlinux.init_mm_off)
 23 #define swapper_pg_dir          vmlinux.swapper_pg_dir_off
 24 #define invalid_pg_dir          vmlinux.invalid_pg_dir_off
 25 
 26 enum populate_mode {
 27         POPULATE_NONE,
 28         POPULATE_DIRECT,
 29         POPULATE_LOWCORE,
 30         POPULATE_ABS_LOWCORE,
 31         POPULATE_IDENTITY,
 32         POPULATE_KERNEL,
 33 #ifdef CONFIG_KASAN
 34         POPULATE_KASAN_MAP_SHADOW,
 35         POPULATE_KASAN_ZERO_SHADOW,
 36         POPULATE_KASAN_SHALLOW
 37 #endif
 38 };
 39 
 40 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode);
 41 
 42 #ifdef CONFIG_KASAN
 43 
 44 #define kasan_early_shadow_page vmlinux.kasan_early_shadow_page_off
 45 #define kasan_early_shadow_pte  ((pte_t *)vmlinux.kasan_early_shadow_pte_off)
 46 #define kasan_early_shadow_pmd  ((pmd_t *)vmlinux.kasan_early_shadow_pmd_off)
 47 #define kasan_early_shadow_pud  ((pud_t *)vmlinux.kasan_early_shadow_pud_off)
 48 #define kasan_early_shadow_p4d  ((p4d_t *)vmlinux.kasan_early_shadow_p4d_off)
 49 #define __sha(x)                ((unsigned long)kasan_mem_to_shadow((void *)x))
 50 
 51 static pte_t pte_z;
 52 
 53 static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
 54 {
 55         start = PAGE_ALIGN_DOWN(__sha(start));
 56         end = PAGE_ALIGN(__sha(end));
 57         pgtable_populate(start, end, mode);
 58 }
 59 
 60 static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
 61 {
 62         pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
 63         pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
 64         p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
 65         unsigned long memgap_start = 0;
 66         unsigned long untracked_end;
 67         unsigned long start, end;
 68         int i;
 69 
 70         pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
 71         if (!machine.has_nx)
 72                 pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
 73         crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
 74         crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
 75         crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
 76         memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
 77         __arch_set_page_dat(kasan_early_shadow_p4d, 1UL << CRST_ALLOC_ORDER);
 78         __arch_set_page_dat(kasan_early_shadow_pud, 1UL << CRST_ALLOC_ORDER);
 79         __arch_set_page_dat(kasan_early_shadow_pmd, 1UL << CRST_ALLOC_ORDER);
 80         __arch_set_page_dat(kasan_early_shadow_pte, 1);
 81 
 82         for_each_physmem_usable_range(i, &start, &end) {
 83                 kasan_populate((unsigned long)__identity_va(start),
 84                                (unsigned long)__identity_va(end),
 85                                POPULATE_KASAN_MAP_SHADOW);
 86                 if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260) {
 87                         kasan_populate((unsigned long)__identity_va(memgap_start),
 88                                        (unsigned long)__identity_va(start),
 89                                        POPULATE_KASAN_ZERO_SHADOW);
 90                 }
 91                 memgap_start = end;
 92         }
 93         kasan_populate(kernel_start, kernel_end, POPULATE_KASAN_MAP_SHADOW);
 94         kasan_populate(0, (unsigned long)__identity_va(0), POPULATE_KASAN_ZERO_SHADOW);
 95         kasan_populate(AMODE31_START, AMODE31_END, POPULATE_KASAN_ZERO_SHADOW);
 96         if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
 97                 untracked_end = VMALLOC_START;
 98                 /* shallowly populate kasan shadow for vmalloc and modules */
 99                 kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
100         } else {
101                 untracked_end = MODULES_VADDR;
102         }
103         /* populate kasan shadow for untracked memory */
104         kasan_populate((unsigned long)__identity_va(ident_map_size), untracked_end,
105                        POPULATE_KASAN_ZERO_SHADOW);
106         kasan_populate(kernel_end, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
107 }
108 
109 static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
110                                            unsigned long end, enum populate_mode mode)
111 {
112         if (mode == POPULATE_KASAN_ZERO_SHADOW &&
113             IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
114                 pgd_populate(&init_mm, pgd, kasan_early_shadow_p4d);
115                 return true;
116         }
117         return false;
118 }
119 
120 static bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
121                                            unsigned long end, enum populate_mode mode)
122 {
123         if (mode == POPULATE_KASAN_ZERO_SHADOW &&
124             IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
125                 p4d_populate(&init_mm, p4d, kasan_early_shadow_pud);
126                 return true;
127         }
128         return false;
129 }
130 
131 static bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
132                                            unsigned long end, enum populate_mode mode)
133 {
134         if (mode == POPULATE_KASAN_ZERO_SHADOW &&
135             IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
136                 pud_populate(&init_mm, pud, kasan_early_shadow_pmd);
137                 return true;
138         }
139         return false;
140 }
141 
142 static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
143                                            unsigned long end, enum populate_mode mode)
144 {
145         if (mode == POPULATE_KASAN_ZERO_SHADOW &&
146             IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
147                 pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
148                 return true;
149         }
150         return false;
151 }
152 
153 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
154 {
155         if (mode == POPULATE_KASAN_ZERO_SHADOW) {
156                 set_pte(pte, pte_z);
157                 return true;
158         }
159         return false;
160 }
161 #else
162 
163 static inline void kasan_populate_shadow(unsigned long kernel_start, unsigned long kernel_end)
164 {
165 }
166 
167 static inline bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,
168                                                   unsigned long end, enum populate_mode mode)
169 {
170         return false;
171 }
172 
173 static inline bool kasan_p4d_populate_zero_shadow(p4d_t *p4d, unsigned long addr,
174                                                   unsigned long end, enum populate_mode mode)
175 {
176         return false;
177 }
178 
179 static inline bool kasan_pud_populate_zero_shadow(pud_t *pud, unsigned long addr,
180                                                   unsigned long end, enum populate_mode mode)
181 {
182         return false;
183 }
184 
185 static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr,
186                                                   unsigned long end, enum populate_mode mode)
187 {
188         return false;
189 }
190 
191 static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode)
192 {
193         return false;
194 }
195 
196 #endif
197 
198 /*
199  * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
200  */
201 static inline pte_t *__virt_to_kpte(unsigned long va)
202 {
203         return pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va), va);
204 }
205 
206 static void *boot_crst_alloc(unsigned long val)
207 {
208         unsigned long size = PAGE_SIZE << CRST_ALLOC_ORDER;
209         unsigned long *table;
210 
211         table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
212         crst_table_init(table, val);
213         __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
214         return table;
215 }
216 
217 static pte_t *boot_pte_alloc(void)
218 {
219         static void *pte_leftover;
220         pte_t *pte;
221 
222         /*
223          * handling pte_leftovers this way helps to avoid memory fragmentation
224          * during POPULATE_KASAN_MAP_SHADOW when EDAT is off
225          */
226         if (!pte_leftover) {
227                 pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
228                 pte = pte_leftover + _PAGE_TABLE_SIZE;
229                 __arch_set_page_dat(pte, 1);
230         } else {
231                 pte = pte_leftover;
232                 pte_leftover = NULL;
233         }
234 
235         memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
236         return pte;
237 }
238 
239 static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
240 {
241         switch (mode) {
242         case POPULATE_NONE:
243                 return -1;
244         case POPULATE_DIRECT:
245                 return addr;
246         case POPULATE_LOWCORE:
247                 return __lowcore_pa(addr);
248         case POPULATE_ABS_LOWCORE:
249                 return __abs_lowcore_pa(addr);
250         case POPULATE_KERNEL:
251                 return __kernel_pa(addr);
252         case POPULATE_IDENTITY:
253                 return __identity_pa(addr);
254 #ifdef CONFIG_KASAN
255         case POPULATE_KASAN_MAP_SHADOW:
256                 addr = physmem_alloc_top_down(RR_VMEM, size, size);
257                 memset((void *)addr, 0, size);
258                 return addr;
259 #endif
260         default:
261                 return -1;
262         }
263 }
264 
265 static bool large_allowed(enum populate_mode mode)
266 {
267         return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
268 }
269 
270 static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
271                           enum populate_mode mode)
272 {
273         unsigned long size = end - addr;
274 
275         return machine.has_edat2 && large_allowed(mode) &&
276                IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
277                IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
278 }
279 
280 static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
281                           enum populate_mode mode)
282 {
283         unsigned long size = end - addr;
284 
285         return machine.has_edat1 && large_allowed(mode) &&
286                IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
287                IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
288 }
289 
290 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
291                                  enum populate_mode mode)
292 {
293         unsigned long pages = 0;
294         pte_t *pte, entry;
295 
296         pte = pte_offset_kernel(pmd, addr);
297         for (; addr < end; addr += PAGE_SIZE, pte++) {
298                 if (pte_none(*pte)) {
299                         if (kasan_pte_populate_zero_shadow(pte, mode))
300                                 continue;
301                         entry = __pte(_pa(addr, PAGE_SIZE, mode));
302                         entry = set_pte_bit(entry, PAGE_KERNEL);
303                         if (!machine.has_nx)
304                                 entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
305                         set_pte(pte, entry);
306                         pages++;
307                 }
308         }
309         if (mode == POPULATE_DIRECT)
310                 update_page_count(PG_DIRECT_MAP_4K, pages);
311 }
312 
313 static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
314                                  enum populate_mode mode)
315 {
316         unsigned long next, pages = 0;
317         pmd_t *pmd, entry;
318         pte_t *pte;
319 
320         pmd = pmd_offset(pud, addr);
321         for (; addr < end; addr = next, pmd++) {
322                 next = pmd_addr_end(addr, end);
323                 if (pmd_none(*pmd)) {
324                         if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
325                                 continue;
326                         if (can_large_pmd(pmd, addr, next, mode)) {
327                                 entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
328                                 entry = set_pmd_bit(entry, SEGMENT_KERNEL);
329                                 if (!machine.has_nx)
330                                         entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
331                                 set_pmd(pmd, entry);
332                                 pages++;
333                                 continue;
334                         }
335                         pte = boot_pte_alloc();
336                         pmd_populate(&init_mm, pmd, pte);
337                 } else if (pmd_leaf(*pmd)) {
338                         continue;
339                 }
340                 pgtable_pte_populate(pmd, addr, next, mode);
341         }
342         if (mode == POPULATE_DIRECT)
343                 update_page_count(PG_DIRECT_MAP_1M, pages);
344 }
345 
346 static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
347                                  enum populate_mode mode)
348 {
349         unsigned long next, pages = 0;
350         pud_t *pud, entry;
351         pmd_t *pmd;
352 
353         pud = pud_offset(p4d, addr);
354         for (; addr < end; addr = next, pud++) {
355                 next = pud_addr_end(addr, end);
356                 if (pud_none(*pud)) {
357                         if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
358                                 continue;
359                         if (can_large_pud(pud, addr, next, mode)) {
360                                 entry = __pud(_pa(addr, _REGION3_SIZE, mode));
361                                 entry = set_pud_bit(entry, REGION3_KERNEL);
362                                 if (!machine.has_nx)
363                                         entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
364                                 set_pud(pud, entry);
365                                 pages++;
366                                 continue;
367                         }
368                         pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
369                         pud_populate(&init_mm, pud, pmd);
370                 } else if (pud_leaf(*pud)) {
371                         continue;
372                 }
373                 pgtable_pmd_populate(pud, addr, next, mode);
374         }
375         if (mode == POPULATE_DIRECT)
376                 update_page_count(PG_DIRECT_MAP_2G, pages);
377 }
378 
379 static void pgtable_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end,
380                                  enum populate_mode mode)
381 {
382         unsigned long next;
383         p4d_t *p4d;
384         pud_t *pud;
385 
386         p4d = p4d_offset(pgd, addr);
387         for (; addr < end; addr = next, p4d++) {
388                 next = p4d_addr_end(addr, end);
389                 if (p4d_none(*p4d)) {
390                         if (kasan_p4d_populate_zero_shadow(p4d, addr, next, mode))
391                                 continue;
392                         pud = boot_crst_alloc(_REGION3_ENTRY_EMPTY);
393                         p4d_populate(&init_mm, p4d, pud);
394                 }
395                 pgtable_pud_populate(p4d, addr, next, mode);
396         }
397 }
398 
399 static void pgtable_populate(unsigned long addr, unsigned long end, enum populate_mode mode)
400 {
401         unsigned long next;
402         pgd_t *pgd;
403         p4d_t *p4d;
404 
405         pgd = pgd_offset(&init_mm, addr);
406         for (; addr < end; addr = next, pgd++) {
407                 next = pgd_addr_end(addr, end);
408                 if (pgd_none(*pgd)) {
409                         if (kasan_pgd_populate_zero_shadow(pgd, addr, next, mode))
410                                 continue;
411                         p4d = boot_crst_alloc(_REGION2_ENTRY_EMPTY);
412                         pgd_populate(&init_mm, pgd, p4d);
413                 }
414 #ifdef CONFIG_KASAN
415                 if (mode == POPULATE_KASAN_SHALLOW)
416                         continue;
417 #endif
418                 pgtable_p4d_populate(pgd, addr, next, mode);
419         }
420 }
421 
422 void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit)
423 {
424         unsigned long lowcore_address = 0;
425         unsigned long start, end;
426         unsigned long asce_type;
427         unsigned long asce_bits;
428         pgd_t *init_mm_pgd;
429         int i;
430 
431         /*
432          * Mark whole memory as no-dat. This must be done before any
433          * page tables are allocated, or kernel image builtin pages
434          * are marked as dat tables.
435          */
436         for_each_physmem_online_range(i, &start, &end)
437                 __arch_set_page_nodat((void *)start, (end - start) >> PAGE_SHIFT);
438 
439         /*
440          * init_mm->pgd contains virtual address of swapper_pg_dir.
441          * It is unusable at this stage since DAT is yet off. Swap
442          * it for physical address of swapper_pg_dir and restore
443          * the virtual address after all page tables are created.
444          */
445         init_mm_pgd = init_mm.pgd;
446         init_mm.pgd = (pgd_t *)swapper_pg_dir;
447 
448         if (asce_limit == _REGION1_SIZE) {
449                 asce_type = _REGION2_ENTRY_EMPTY;
450                 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
451         } else {
452                 asce_type = _REGION3_ENTRY_EMPTY;
453                 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
454         }
455         s390_invalid_asce.val = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
456 
457         crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
458         crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
459         __arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER);
460         __arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER);
461 
462         if (relocate_lowcore)
463                 lowcore_address = LOWCORE_ALT_ADDRESS;
464 
465         /*
466          * To allow prefixing the lowcore must be mapped with 4KB pages.
467          * To prevent creation of a large page at address 0 first map
468          * the lowcore and create the identity mapping only afterwards.
469          */
470         pgtable_populate(lowcore_address,
471                          lowcore_address + sizeof(struct lowcore),
472                          POPULATE_LOWCORE);
473         for_each_physmem_usable_range(i, &start, &end) {
474                 pgtable_populate((unsigned long)__identity_va(start),
475                                  (unsigned long)__identity_va(end),
476                                  POPULATE_IDENTITY);
477         }
478         pgtable_populate(kernel_start, kernel_end, POPULATE_KERNEL);
479         pgtable_populate(AMODE31_START, AMODE31_END, POPULATE_DIRECT);
480         pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
481                          POPULATE_ABS_LOWCORE);
482         pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
483                          POPULATE_NONE);
484         memcpy_real_ptep = __identity_va(__virt_to_kpte(__memcpy_real_area));
485 
486         kasan_populate_shadow(kernel_start, kernel_end);
487 
488         get_lowcore()->kernel_asce.val = swapper_pg_dir | asce_bits;
489         get_lowcore()->user_asce = s390_invalid_asce;
490 
491         local_ctl_load(1, &get_lowcore()->kernel_asce);
492         local_ctl_load(7, &get_lowcore()->user_asce);
493         local_ctl_load(13, &get_lowcore()->kernel_asce);
494 
495         init_mm.context.asce = get_lowcore()->kernel_asce.val;
496         init_mm.pgd = init_mm_pgd;
497 }
498 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php