~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/nios2/mm/init.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/nios2/mm/init.c (Architecture ppc) and /arch/mips/mm/init.c (Architecture mips)


  1 /*                                                  1 /*
  2  * Copyright (C) 2013 Altera Corporation       << 
  3  * Copyright (C) 2010 Tobias Klauser <tklauser << 
  4  * Copyright (C) 2009 Wind River Systems Inc   << 
  5  *   Implemented by fredrik.markstrom@gmail.co << 
  6  * Copyright (C) 2004 Microtronix Datacom Ltd  << 
  7  *                                             << 
  8  * based on arch/m68k/mm/init.c                << 
  9  *                                             << 
 10  * This file is subject to the terms and condi      2  * This file is subject to the terms and conditions of the GNU General Public
 11  * License. See the file "COPYING" in the main !!   3  * License.  See the file "COPYING" in the main directory of this archive
 12  * for more details.                                4  * for more details.
                                                   >>   5  *
                                                   >>   6  * Copyright (C) 1994 - 2000 Ralf Baechle
                                                   >>   7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
                                                   >>   8  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
                                                   >>   9  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
 13  */                                                10  */
 14                                                !!  11 #include <linux/bug.h>
                                                   >>  12 #include <linux/init.h>
                                                   >>  13 #include <linux/export.h>
 15 #include <linux/signal.h>                          14 #include <linux/signal.h>
 16 #include <linux/sched.h>                           15 #include <linux/sched.h>
                                                   >>  16 #include <linux/smp.h>
 17 #include <linux/kernel.h>                          17 #include <linux/kernel.h>
 18 #include <linux/errno.h>                           18 #include <linux/errno.h>
 19 #include <linux/string.h>                          19 #include <linux/string.h>
 20 #include <linux/types.h>                           20 #include <linux/types.h>
                                                   >>  21 #include <linux/pagemap.h>
 21 #include <linux/ptrace.h>                          22 #include <linux/ptrace.h>
 22 #include <linux/mman.h>                            23 #include <linux/mman.h>
 23 #include <linux/mm.h>                              24 #include <linux/mm.h>
 24 #include <linux/init.h>                        << 
 25 #include <linux/pagemap.h>                     << 
 26 #include <linux/memblock.h>                        25 #include <linux/memblock.h>
 27 #include <linux/slab.h>                        !!  26 #include <linux/highmem.h>
 28 #include <linux/binfmts.h>                     !!  27 #include <linux/swap.h>
                                                   >>  28 #include <linux/proc_fs.h>
                                                   >>  29 #include <linux/pfn.h>
                                                   >>  30 #include <linux/hardirq.h>
                                                   >>  31 #include <linux/gfp.h>
                                                   >>  32 #include <linux/kcore.h>
                                                   >>  33 #include <linux/initrd.h>
 29 #include <linux/execmem.h>                         34 #include <linux/execmem.h>
 30                                                    35 
 31 #include <asm/setup.h>                         !!  36 #include <asm/bootinfo.h>
 32 #include <asm/page.h>                          !!  37 #include <asm/cachectl.h>
                                                   >>  38 #include <asm/cpu.h>
                                                   >>  39 #include <asm/dma.h>
                                                   >>  40 #include <asm/maar.h>
                                                   >>  41 #include <asm/mmu_context.h>
                                                   >>  42 #include <asm/mmzone.h>
 33 #include <asm/sections.h>                          43 #include <asm/sections.h>
                                                   >>  44 #include <asm/pgalloc.h>
 34 #include <asm/tlb.h>                               45 #include <asm/tlb.h>
 35 #include <asm/mmu_context.h>                   !!  46 #include <asm/fixmap.h>
 36 #include <asm/cpuinfo.h>                       << 
 37 #include <asm/processor.h>                     << 
 38                                                    47 
 39 pgd_t *pgd_current;                            !!  48 /*
                                                   >>  49  * We have up to 8 empty zeroed pages so we can map one of the right colour
                                                   >>  50  * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
                                                   >>  51  * where we have to avoid VCED / VECI exceptions for good performance at
                                                   >>  52  * any price.  Since page is never written to after the initialization we
                                                   >>  53  * don't have to care about aliases on other CPUs.
                                                   >>  54  */
                                                   >>  55 unsigned long empty_zero_page, zero_page_mask;
                                                   >>  56 EXPORT_SYMBOL_GPL(empty_zero_page);
                                                   >>  57 EXPORT_SYMBOL(zero_page_mask);
 40                                                    58 
 41 /*                                                 59 /*
 42  * paging_init() continues the virtual memory  !!  60  * Not static inline because used by IP27 special magic initialization code
 43  * was begun by the code in arch/head.S.       << 
 44  * The parameters are pointers to where to sti << 
 45  * addresses of available kernel virtual memor << 
 46  */                                                61  */
 47 void __init paging_init(void)                  !!  62 void setup_zero_pages(void)
 48 {                                                  63 {
 49         unsigned long max_zone_pfn[MAX_NR_ZONE !!  64         unsigned int order, i;
                                                   >>  65         struct page *page;
 50                                                    66 
 51         pagetable_init();                      !!  67         if (cpu_has_vce)
 52         pgd_current = swapper_pg_dir;          !!  68                 order = 3;
                                                   >>  69         else
                                                   >>  70                 order = 0;
                                                   >>  71 
                                                   >>  72         empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
                                                   >>  73         if (!empty_zero_page)
                                                   >>  74                 panic("Oh boy, that early out of memory?");
                                                   >>  75 
                                                   >>  76         page = virt_to_page((void *)empty_zero_page);
                                                   >>  77         split_page(page, order);
                                                   >>  78         for (i = 0; i < (1 << order); i++, page++)
                                                   >>  79                 mark_page_reserved(page);
                                                   >>  80 
                                                   >>  81         zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
                                                   >>  82 }
 53                                                    83 
 54         max_zone_pfn[ZONE_NORMAL] = max_mapnr; !!  84 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
                                                   >>  85 {
                                                   >>  86         enum fixed_addresses idx;
                                                   >>  87         unsigned int old_mmid;
                                                   >>  88         unsigned long vaddr, flags, entrylo;
                                                   >>  89         unsigned long old_ctx;
                                                   >>  90         pte_t pte;
                                                   >>  91         int tlbidx;
                                                   >>  92 
                                                   >>  93         BUG_ON(folio_test_dcache_dirty(page_folio(page)));
                                                   >>  94 
                                                   >>  95         preempt_disable();
                                                   >>  96         pagefault_disable();
                                                   >>  97         idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
                                                   >>  98         idx += in_interrupt() ? FIX_N_COLOURS : 0;
                                                   >>  99         vaddr = __fix_to_virt(FIX_CMAP_END - idx);
                                                   >> 100         pte = mk_pte(page, prot);
                                                   >> 101 #if defined(CONFIG_XPA)
                                                   >> 102         entrylo = pte_to_entrylo(pte.pte_high);
                                                   >> 103 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
                                                   >> 104         entrylo = pte.pte_high;
                                                   >> 105 #else
                                                   >> 106         entrylo = pte_to_entrylo(pte_val(pte));
                                                   >> 107 #endif
                                                   >> 108 
                                                   >> 109         local_irq_save(flags);
                                                   >> 110         old_ctx = read_c0_entryhi();
                                                   >> 111         write_c0_entryhi(vaddr & (PAGE_MASK << 1));
                                                   >> 112         write_c0_entrylo0(entrylo);
                                                   >> 113         write_c0_entrylo1(entrylo);
                                                   >> 114         if (cpu_has_mmid) {
                                                   >> 115                 old_mmid = read_c0_memorymapid();
                                                   >> 116                 write_c0_memorymapid(MMID_KERNEL_WIRED);
                                                   >> 117         }
                                                   >> 118 #ifdef CONFIG_XPA
                                                   >> 119         if (cpu_has_xpa) {
                                                   >> 120                 entrylo = (pte.pte_low & _PFNX_MASK);
                                                   >> 121                 writex_c0_entrylo0(entrylo);
                                                   >> 122                 writex_c0_entrylo1(entrylo);
                                                   >> 123         }
                                                   >> 124 #endif
                                                   >> 125         tlbidx = num_wired_entries();
                                                   >> 126         write_c0_wired(tlbidx + 1);
                                                   >> 127         write_c0_index(tlbidx);
                                                   >> 128         mtc0_tlbw_hazard();
                                                   >> 129         tlb_write_indexed();
                                                   >> 130         tlbw_use_hazard();
                                                   >> 131         write_c0_entryhi(old_ctx);
                                                   >> 132         if (cpu_has_mmid)
                                                   >> 133                 write_c0_memorymapid(old_mmid);
                                                   >> 134         local_irq_restore(flags);
 55                                                   135 
 56         /* pass the memory from the bootmem al !! 136         return (void*) vaddr;
 57         free_area_init(max_zone_pfn);          !! 137 }
 58                                                   138 
 59         flush_dcache_range((unsigned long)empt !! 139 void *kmap_coherent(struct page *page, unsigned long addr)
 60                         (unsigned long)empty_z !! 140 {
                                                   >> 141         return __kmap_pgprot(page, addr, PAGE_KERNEL);
 61 }                                                 142 }
 62                                                   143 
 63 void __init mem_init(void)                     !! 144 void *kmap_noncoherent(struct page *page, unsigned long addr)
 64 {                                                 145 {
 65         unsigned long end_mem   = memory_end;  !! 146         return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
 66                                                !! 147 }
 67                                                   148 
 68         pr_debug("mem_init: start=%lx, end=%lx !! 149 void kunmap_coherent(void)
                                                   >> 150 {
                                                   >> 151         unsigned int wired;
                                                   >> 152         unsigned long flags, old_ctx;
 69                                                   153 
 70         end_mem &= PAGE_MASK;                  !! 154         local_irq_save(flags);
 71         high_memory = __va(end_mem);           !! 155         old_ctx = read_c0_entryhi();
                                                   >> 156         wired = num_wired_entries() - 1;
                                                   >> 157         write_c0_wired(wired);
                                                   >> 158         write_c0_index(wired);
                                                   >> 159         write_c0_entryhi(UNIQUE_ENTRYHI(wired));
                                                   >> 160         write_c0_entrylo0(0);
                                                   >> 161         write_c0_entrylo1(0);
                                                   >> 162         mtc0_tlbw_hazard();
                                                   >> 163         tlb_write_indexed();
                                                   >> 164         tlbw_use_hazard();
                                                   >> 165         write_c0_entryhi(old_ctx);
                                                   >> 166         local_irq_restore(flags);
                                                   >> 167         pagefault_enable();
                                                   >> 168         preempt_enable();
                                                   >> 169 }
 72                                                   170 
 73         /* this will put all memory onto the f !! 171 void copy_user_highpage(struct page *to, struct page *from,
 74         memblock_free_all();                   !! 172         unsigned long vaddr, struct vm_area_struct *vma)
                                                   >> 173 {
                                                   >> 174         struct folio *src = page_folio(from);
                                                   >> 175         void *vfrom, *vto;
                                                   >> 176 
                                                   >> 177         vto = kmap_atomic(to);
                                                   >> 178         if (cpu_has_dc_aliases &&
                                                   >> 179             folio_mapped(src) && !folio_test_dcache_dirty(src)) {
                                                   >> 180                 vfrom = kmap_coherent(from, vaddr);
                                                   >> 181                 copy_page(vto, vfrom);
                                                   >> 182                 kunmap_coherent();
                                                   >> 183         } else {
                                                   >> 184                 vfrom = kmap_atomic(from);
                                                   >> 185                 copy_page(vto, vfrom);
                                                   >> 186                 kunmap_atomic(vfrom);
                                                   >> 187         }
                                                   >> 188         if ((!cpu_has_ic_fills_f_dc) ||
                                                   >> 189             pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
                                                   >> 190                 flush_data_cache_page((unsigned long)vto);
                                                   >> 191         kunmap_atomic(vto);
                                                   >> 192         /* Make sure this page is cleared on other CPU's too before using it */
                                                   >> 193         smp_wmb();
                                                   >> 194 }
                                                   >> 195 
                                                   >> 196 void copy_to_user_page(struct vm_area_struct *vma,
                                                   >> 197         struct page *page, unsigned long vaddr, void *dst, const void *src,
                                                   >> 198         unsigned long len)
                                                   >> 199 {
                                                   >> 200         struct folio *folio = page_folio(page);
                                                   >> 201 
                                                   >> 202         if (cpu_has_dc_aliases &&
                                                   >> 203             folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
                                                   >> 204                 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
                                                   >> 205                 memcpy(vto, src, len);
                                                   >> 206                 kunmap_coherent();
                                                   >> 207         } else {
                                                   >> 208                 memcpy(dst, src, len);
                                                   >> 209                 if (cpu_has_dc_aliases)
                                                   >> 210                         folio_set_dcache_dirty(folio);
                                                   >> 211         }
                                                   >> 212         if (vma->vm_flags & VM_EXEC)
                                                   >> 213                 flush_cache_page(vma, vaddr, page_to_pfn(page));
 75 }                                                 214 }
 76                                                   215 
 77 void __init mmu_init(void)                     !! 216 void copy_from_user_page(struct vm_area_struct *vma,
                                                   >> 217         struct page *page, unsigned long vaddr, void *dst, const void *src,
                                                   >> 218         unsigned long len)
 78 {                                                 219 {
 79         flush_tlb_all();                       !! 220         struct folio *folio = page_folio(page);
                                                   >> 221 
                                                   >> 222         if (cpu_has_dc_aliases &&
                                                   >> 223             folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
                                                   >> 224                 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
                                                   >> 225                 memcpy(dst, vfrom, len);
                                                   >> 226                 kunmap_coherent();
                                                   >> 227         } else {
                                                   >> 228                 memcpy(dst, src, len);
                                                   >> 229                 if (cpu_has_dc_aliases)
                                                   >> 230                         folio_set_dcache_dirty(folio);
                                                   >> 231         }
 80 }                                                 232 }
                                                   >> 233 EXPORT_SYMBOL_GPL(copy_from_user_page);
 81                                                   234 
 82 pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(P !! 235 void __init fixrange_init(unsigned long start, unsigned long end,
 83 pte_t invalid_pte_table[PTRS_PER_PTE] __aligne !! 236         pgd_t *pgd_base)
 84 static struct page *kuser_page[1];             !! 237 {
 85 static struct vm_special_mapping vdso_mapping  !! 238 #ifdef CONFIG_HIGHMEM
 86         .name = "[vdso]",                      !! 239         pgd_t *pgd;
 87         .pages = kuser_page,                   !! 240         pud_t *pud;
                                                   >> 241         pmd_t *pmd;
                                                   >> 242         pte_t *pte;
                                                   >> 243         int i, j, k;
                                                   >> 244         unsigned long vaddr;
                                                   >> 245 
                                                   >> 246         vaddr = start;
                                                   >> 247         i = pgd_index(vaddr);
                                                   >> 248         j = pud_index(vaddr);
                                                   >> 249         k = pmd_index(vaddr);
                                                   >> 250         pgd = pgd_base + i;
                                                   >> 251 
                                                   >> 252         for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
                                                   >> 253                 pud = (pud_t *)pgd;
                                                   >> 254                 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
                                                   >> 255                         pmd = (pmd_t *)pud;
                                                   >> 256                         for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
                                                   >> 257                                 if (pmd_none(*pmd)) {
                                                   >> 258                                         pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
                                                   >> 259                                                                            PAGE_SIZE);
                                                   >> 260                                         if (!pte)
                                                   >> 261                                                 panic("%s: Failed to allocate %lu bytes align=%lx\n",
                                                   >> 262                                                       __func__, PAGE_SIZE,
                                                   >> 263                                                       PAGE_SIZE);
                                                   >> 264 
                                                   >> 265                                         set_pmd(pmd, __pmd((unsigned long)pte));
                                                   >> 266                                         BUG_ON(pte != pte_offset_kernel(pmd, 0));
                                                   >> 267                                 }
                                                   >> 268                                 vaddr += PMD_SIZE;
                                                   >> 269                         }
                                                   >> 270                         k = 0;
                                                   >> 271                 }
                                                   >> 272                 j = 0;
                                                   >> 273         }
                                                   >> 274 #endif
                                                   >> 275 }
                                                   >> 276 
                                                   >> 277 struct maar_walk_info {
                                                   >> 278         struct maar_config cfg[16];
                                                   >> 279         unsigned int num_cfg;
 88 };                                                280 };
 89                                                   281 
 90 static int alloc_kuser_page(void)              !! 282 static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
                                                   >> 283                          void *data)
 91 {                                                 284 {
 92         extern char __kuser_helper_start[], __ !! 285         struct maar_walk_info *wi = data;
 93         int kuser_sz = __kuser_helper_end - __ !! 286         struct maar_config *cfg = &wi->cfg[wi->num_cfg];
 94         unsigned long vpage;                   !! 287         unsigned int maar_align;
                                                   >> 288 
                                                   >> 289         /* MAAR registers hold physical addresses right shifted by 4 bits */
                                                   >> 290         maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
                                                   >> 291 
                                                   >> 292         /* Fill in the MAAR config entry */
                                                   >> 293         cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
                                                   >> 294         cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
                                                   >> 295         cfg->attrs = MIPS_MAAR_S;
                                                   >> 296 
                                                   >> 297         /* Ensure we don't overflow the cfg array */
                                                   >> 298         if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
                                                   >> 299                 wi->num_cfg++;
                                                   >> 300 
                                                   >> 301         return 0;
                                                   >> 302 }
 95                                                   303 
 96         vpage = get_zeroed_page(GFP_ATOMIC);   << 
 97         if (!vpage)                            << 
 98                 return -ENOMEM;                << 
 99                                                   304 
100         /* Copy kuser helpers */               !! 305 unsigned __weak platform_maar_init(unsigned num_pairs)
101         memcpy((void *)vpage, __kuser_helper_s !! 306 {
                                                   >> 307         unsigned int num_configured;
                                                   >> 308         struct maar_walk_info wi;
102                                                   309 
103         flush_icache_range(vpage, vpage + KUSE !! 310         wi.num_cfg = 0;
104         kuser_page[0] = virt_to_page(vpage);   !! 311         walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
105                                                   312 
106         return 0;                              !! 313         num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
                                                   >> 314         if (num_configured < wi.num_cfg)
                                                   >> 315                 pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
                                                   >> 316                         num_pairs, wi.num_cfg);
                                                   >> 317 
                                                   >> 318         return num_configured;
107 }                                                 319 }
108 arch_initcall(alloc_kuser_page);               << 
109                                                   320 
110 int arch_setup_additional_pages(struct linux_b !! 321 void maar_init(void)
111 {                                                 322 {
112         struct mm_struct *mm = current->mm;    !! 323         unsigned num_maars, used, i;
113         struct vm_area_struct *vma;            !! 324         phys_addr_t lower, upper, attr;
                                                   >> 325         static struct {
                                                   >> 326                 struct maar_config cfgs[3];
                                                   >> 327                 unsigned used;
                                                   >> 328         } recorded = { { { 0 } }, 0 };
                                                   >> 329 
                                                   >> 330         if (!cpu_has_maar)
                                                   >> 331                 return;
                                                   >> 332 
                                                   >> 333         /* Detect the number of MAARs */
                                                   >> 334         write_c0_maari(~0);
                                                   >> 335         back_to_back_c0_hazard();
                                                   >> 336         num_maars = read_c0_maari() + 1;
                                                   >> 337 
                                                   >> 338         /* MAARs should be in pairs */
                                                   >> 339         WARN_ON(num_maars % 2);
                                                   >> 340 
                                                   >> 341         /* Set MAARs using values we recorded already */
                                                   >> 342         if (recorded.used) {
                                                   >> 343                 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
                                                   >> 344                 BUG_ON(used != recorded.used);
                                                   >> 345         } else {
                                                   >> 346                 /* Configure the required MAARs */
                                                   >> 347                 used = platform_maar_init(num_maars / 2);
                                                   >> 348         }
                                                   >> 349 
                                                   >> 350         /* Disable any further MAARs */
                                                   >> 351         for (i = (used * 2); i < num_maars; i++) {
                                                   >> 352                 write_c0_maari(i);
                                                   >> 353                 back_to_back_c0_hazard();
                                                   >> 354                 write_c0_maar(0);
                                                   >> 355                 back_to_back_c0_hazard();
                                                   >> 356         }
                                                   >> 357 
                                                   >> 358         if (recorded.used)
                                                   >> 359                 return;
                                                   >> 360 
                                                   >> 361         pr_info("MAAR configuration:\n");
                                                   >> 362         for (i = 0; i < num_maars; i += 2) {
                                                   >> 363                 write_c0_maari(i);
                                                   >> 364                 back_to_back_c0_hazard();
                                                   >> 365                 upper = read_c0_maar();
                                                   >> 366 #ifdef CONFIG_XPA
                                                   >> 367                 upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
                                                   >> 368 #endif
                                                   >> 369 
                                                   >> 370                 write_c0_maari(i + 1);
                                                   >> 371                 back_to_back_c0_hazard();
                                                   >> 372                 lower = read_c0_maar();
                                                   >> 373 #ifdef CONFIG_XPA
                                                   >> 374                 lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
                                                   >> 375 #endif
                                                   >> 376 
                                                   >> 377                 attr = lower & upper;
                                                   >> 378                 lower = (lower & MIPS_MAAR_ADDR) << 4;
                                                   >> 379                 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
                                                   >> 380 
                                                   >> 381                 pr_info("  [%d]: ", i / 2);
                                                   >> 382                 if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
                                                   >> 383                         pr_cont("disabled\n");
                                                   >> 384                         continue;
                                                   >> 385                 }
                                                   >> 386 
                                                   >> 387                 pr_cont("%pa-%pa", &lower, &upper);
                                                   >> 388 
                                                   >> 389                 if (attr & MIPS_MAAR_S)
                                                   >> 390                         pr_cont(" speculate");
                                                   >> 391 
                                                   >> 392                 pr_cont("\n");
                                                   >> 393 
                                                   >> 394                 /* Record the setup for use on secondary CPUs */
                                                   >> 395                 if (used <= ARRAY_SIZE(recorded.cfgs)) {
                                                   >> 396                         recorded.cfgs[recorded.used].lower = lower;
                                                   >> 397                         recorded.cfgs[recorded.used].upper = upper;
                                                   >> 398                         recorded.cfgs[recorded.used].attrs = attr;
                                                   >> 399                         recorded.used++;
                                                   >> 400                 }
                                                   >> 401         }
                                                   >> 402 }
114                                                   403 
115         mmap_write_lock(mm);                   !! 404 #ifndef CONFIG_NUMA
                                                   >> 405 void __init paging_init(void)
                                                   >> 406 {
                                                   >> 407         unsigned long max_zone_pfns[MAX_NR_ZONES];
116                                                   408 
117         /* Map kuser helpers to user space add !! 409         pagetable_init();
118         vma = _install_special_mapping(mm, KUS << 
119                                       VM_READ  << 
120                                       VM_MAYEX << 
121                                                   410 
122         mmap_write_unlock(mm);                 !! 411 #ifdef CONFIG_ZONE_DMA
                                                   >> 412         max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
                                                   >> 413 #endif
                                                   >> 414 #ifdef CONFIG_ZONE_DMA32
                                                   >> 415         max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
                                                   >> 416 #endif
                                                   >> 417         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
                                                   >> 418 #ifdef CONFIG_HIGHMEM
                                                   >> 419         max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
                                                   >> 420 
                                                   >> 421         if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
                                                   >> 422                 printk(KERN_WARNING "This processor doesn't support highmem."
                                                   >> 423                        " %ldk highmem ignored\n",
                                                   >> 424                        (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
                                                   >> 425                 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
                                                   >> 426 
                                                   >> 427                 max_mapnr = max_low_pfn;
                                                   >> 428         } else if (highend_pfn) {
                                                   >> 429                 max_mapnr = highend_pfn;
                                                   >> 430         } else {
                                                   >> 431                 max_mapnr = max_low_pfn;
                                                   >> 432         }
                                                   >> 433 #else
                                                   >> 434         max_mapnr = max_low_pfn;
                                                   >> 435 #endif
                                                   >> 436         high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
123                                                   437 
124         return IS_ERR(vma) ? PTR_ERR(vma) : 0; !! 438         free_area_init(max_zone_pfns);
125 }                                                 439 }
126                                                   440 
127 const char *arch_vma_name(struct vm_area_struc !! 441 #ifdef CONFIG_64BIT
                                                   >> 442 static struct kcore_list kcore_kseg0;
                                                   >> 443 #endif
                                                   >> 444 
                                                   >> 445 static inline void __init mem_init_free_highmem(void)
128 {                                                 446 {
129         return (vma->vm_start == KUSER_BASE) ? !! 447 #ifdef CONFIG_HIGHMEM
                                                   >> 448         unsigned long tmp;
                                                   >> 449 
                                                   >> 450         if (cpu_has_dc_aliases)
                                                   >> 451                 return;
                                                   >> 452 
                                                   >> 453         for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
                                                   >> 454                 struct page *page = pfn_to_page(tmp);
                                                   >> 455 
                                                   >> 456                 if (!memblock_is_memory(PFN_PHYS(tmp)))
                                                   >> 457                         SetPageReserved(page);
                                                   >> 458                 else
                                                   >> 459                         free_highmem_page(page);
                                                   >> 460         }
                                                   >> 461 #endif
130 }                                                 462 }
131                                                   463 
132 static const pgprot_t protection_map[16] = {   !! 464 void __init mem_init(void)
133         [VM_NONE]                              !! 465 {
134         [VM_READ]                              !! 466         /*
135         [VM_WRITE]                             !! 467          * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
136         [VM_WRITE | VM_READ]                   !! 468          * bits to hold a full 32b physical address on MIPS32 systems.
137         [VM_EXEC]                              !! 469          */
138         [VM_EXEC | VM_READ]                    !! 470         BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
139         [VM_EXEC | VM_WRITE]                   !! 471 
140         [VM_EXEC | VM_WRITE | VM_READ]         !! 472         maar_init();
141         [VM_SHARED]                            !! 473         memblock_free_all();
142         [VM_SHARED | VM_READ]                  !! 474         setup_zero_pages();     /* Setup zeroed pages.  */
143         [VM_SHARED | VM_WRITE]                 !! 475         mem_init_free_highmem();
144         [VM_SHARED | VM_WRITE | VM_READ]       !! 476 
145         [VM_SHARED | VM_EXEC]                  !! 477 #ifdef CONFIG_64BIT
146         [VM_SHARED | VM_EXEC | VM_READ]        !! 478         if ((unsigned long) &_text > (unsigned long) CKSEG0)
147         [VM_SHARED | VM_EXEC | VM_WRITE]       !! 479                 /* The -4 is a hack so that user tools don't have to handle
148         [VM_SHARED | VM_EXEC | VM_WRITE | VM_R !! 480                    the overflow.  */
149 };                                             !! 481                 kclist_add(&kcore_kseg0, (void *) CKSEG0,
150 DECLARE_VM_GET_PAGE_PROT                       !! 482                                 0x80000000 - 4, KCORE_TEXT);
                                                   >> 483 #endif
                                                   >> 484 }
                                                   >> 485 #endif /* !CONFIG_NUMA */
                                                   >> 486 
                                                   >> 487 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
                                                   >> 488 {
                                                   >> 489         unsigned long pfn;
                                                   >> 490 
                                                   >> 491         for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
                                                   >> 492                 struct page *page = pfn_to_page(pfn);
                                                   >> 493                 void *addr = phys_to_virt(PFN_PHYS(pfn));
                                                   >> 494 
                                                   >> 495                 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
                                                   >> 496                 free_reserved_page(page);
                                                   >> 497         }
                                                   >> 498         printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
                                                   >> 499 }
                                                   >> 500 
                                                   >> 501 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
                                                   >> 502 
                                                   >> 503 void __weak __init prom_free_prom_memory(void)
                                                   >> 504 {
                                                   >> 505         /* nothing to do */
                                                   >> 506 }
                                                   >> 507 
                                                   >> 508 void __ref free_initmem(void)
                                                   >> 509 {
                                                   >> 510         prom_free_prom_memory();
                                                   >> 511         /*
                                                   >> 512          * Let the platform define a specific function to free the
                                                   >> 513          * init section since EVA may have used any possible mapping
                                                   >> 514          * between virtual and physical addresses.
                                                   >> 515          */
                                                   >> 516         if (free_init_pages_eva)
                                                   >> 517                 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
                                                   >> 518         else
                                                   >> 519                 free_initmem_default(POISON_FREE_INITMEM);
                                                   >> 520 }
                                                   >> 521 
                                                   >> 522 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
                                                   >> 523 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
                                                   >> 524 EXPORT_SYMBOL(__per_cpu_offset);
                                                   >> 525 
                                                   >> 526 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
                                                   >> 527 {
                                                   >> 528         return node_distance(cpu_to_node(from), cpu_to_node(to));
                                                   >> 529 }
                                                   >> 530 
                                                   >> 531 static int __init pcpu_cpu_to_node(int cpu)
                                                   >> 532 {
                                                   >> 533         return cpu_to_node(cpu);
                                                   >> 534 }
                                                   >> 535 
                                                   >> 536 void __init setup_per_cpu_areas(void)
                                                   >> 537 {
                                                   >> 538         unsigned long delta;
                                                   >> 539         unsigned int cpu;
                                                   >> 540         int rc;
                                                   >> 541 
                                                   >> 542         /*
                                                   >> 543          * Always reserve area for module percpu variables.  That's
                                                   >> 544          * what the legacy allocator did.
                                                   >> 545          */
                                                   >> 546         rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
                                                   >> 547                                     PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
                                                   >> 548                                     pcpu_cpu_distance,
                                                   >> 549                                     pcpu_cpu_to_node);
                                                   >> 550         if (rc < 0)
                                                   >> 551                 panic("Failed to initialize percpu areas.");
                                                   >> 552 
                                                   >> 553         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
                                                   >> 554         for_each_possible_cpu(cpu)
                                                   >> 555                 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
                                                   >> 556 }
                                                   >> 557 #endif
                                                   >> 558 
                                                   >> 559 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
                                                   >> 560 unsigned long pgd_current[NR_CPUS];
                                                   >> 561 #endif
                                                   >> 562 
                                                   >> 563 /*
                                                   >> 564  * Align swapper_pg_dir in to 64K, allows its address to be loaded
                                                   >> 565  * with a single LUI instruction in the TLB handlers.  If we used
                                                   >> 566  * __aligned(64K), its size would get rounded up to the alignment
                                                   >> 567  * size, and waste space.  So we place it in its own section and align
                                                   >> 568  * it in the linker script.
                                                   >> 569  */
                                                   >> 570 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
                                                   >> 571 #ifndef __PAGETABLE_PUD_FOLDED
                                                   >> 572 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
                                                   >> 573 #endif
                                                   >> 574 #ifndef __PAGETABLE_PMD_FOLDED
                                                   >> 575 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
                                                   >> 576 EXPORT_SYMBOL_GPL(invalid_pmd_table);
                                                   >> 577 #endif
                                                   >> 578 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
                                                   >> 579 EXPORT_SYMBOL(invalid_pte_table);
151                                                   580 
152 #ifdef CONFIG_EXECMEM                             581 #ifdef CONFIG_EXECMEM
                                                   >> 582 #ifdef MODULES_VADDR
153 static struct execmem_info execmem_info __ro_a    583 static struct execmem_info execmem_info __ro_after_init;
154                                                   584 
155 struct execmem_info __init *execmem_arch_setup    585 struct execmem_info __init *execmem_arch_setup(void)
156 {                                                 586 {
157         execmem_info = (struct execmem_info){     587         execmem_info = (struct execmem_info){
158                 .ranges = {                       588                 .ranges = {
159                         [EXECMEM_DEFAULT] = {     589                         [EXECMEM_DEFAULT] = {
160                                 .start  = MODU    590                                 .start  = MODULES_VADDR,
161                                 .end    = MODU    591                                 .end    = MODULES_END,
162                                 .pgprot = PAGE !! 592                                 .pgprot = PAGE_KERNEL,
163                                 .alignment = 1    593                                 .alignment = 1,
164                         },                        594                         },
165                 },                                595                 },
166         };                                        596         };
167                                                   597 
168         return &execmem_info;                     598         return &execmem_info;
169 }                                                 599 }
                                                   >> 600 #endif
170 #endif /* CONFIG_EXECMEM */                       601 #endif /* CONFIG_EXECMEM */
171                                                   602 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php