1 /* 1 /* 2 * This file is subject to the terms and condi !! 2 * linux/arch/i386/mm/pgtable.c 3 * License. See the file "COPYING" in the mai << 4 * for more details. << 5 */ 3 */ 6 #include <linux/export.h> !! 4 >> 5 #include <linux/config.h> >> 6 #include <linux/sched.h> >> 7 #include <linux/kernel.h> >> 8 #include <linux/errno.h> 7 #include <linux/mm.h> 9 #include <linux/mm.h> 8 #include <linux/string.h> !! 10 #include <linux/swap.h> >> 11 #include <linux/smp.h> >> 12 #include <linux/highmem.h> >> 13 #include <linux/slab.h> >> 14 #include <linux/pagemap.h> >> 15 #include <linux/spinlock.h> >> 16 >> 17 #include <asm/system.h> >> 18 #include <asm/pgtable.h> 9 #include <asm/pgalloc.h> 19 #include <asm/pgalloc.h> >> 20 #include <asm/fixmap.h> >> 21 #include <asm/e820.h> >> 22 #include <asm/tlb.h> >> 23 #include <asm/tlbflush.h> >> 24 >> 25 void show_mem(void) >> 26 { >> 27 int total = 0, reserved = 0; >> 28 int shared = 0, cached = 0; >> 29 int highmem = 0; >> 30 struct page *page; >> 31 pg_data_t *pgdat; >> 32 unsigned long i; >> 33 >> 34 printk("Mem-info:\n"); >> 35 show_free_areas(); >> 36 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); >> 37 for_each_pgdat(pgdat) { >> 38 for (i = 0; i < pgdat->node_spanned_pages; ++i) { >> 39 page = pgdat->node_mem_map + i; >> 40 total++; >> 41 if (PageHighMem(page)) >> 42 highmem++; >> 43 if (PageReserved(page)) >> 44 reserved++; >> 45 else if (PageSwapCache(page)) >> 46 cached++; >> 47 else if (page_count(page)) >> 48 shared += page_count(page) - 1; >> 49 } >> 50 } >> 51 printk("%d pages of RAM\n", total); >> 52 printk("%d pages of HIGHMEM\n",highmem); >> 53 printk("%d reserved pages\n",reserved); >> 54 printk("%d pages shared\n",shared); >> 55 printk("%d pages swap cached\n",cached); >> 56 } >> 57 >> 58 /* >> 59 * Associate a virtual page frame with a given physical page frame >> 60 * and protection flags for that frame. >> 61 */ >> 62 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) >> 63 { >> 64 pgd_t *pgd; >> 65 pmd_t *pmd; >> 66 pte_t *pte; >> 67 >> 68 pgd = swapper_pg_dir + pgd_index(vaddr); >> 69 if (pgd_none(*pgd)) { >> 70 BUG(); >> 71 return; >> 72 } >> 73 pmd = pmd_offset(pgd, vaddr); >> 74 if (pmd_none(*pmd)) { >> 75 BUG(); >> 76 return; >> 77 } >> 78 pte = pte_offset_kernel(pmd, vaddr); >> 79 /* <pfn,flags> stored as-is, to permit clearing entries */ >> 80 set_pte(pte, pfn_pte(pfn, flags)); >> 81 >> 82 /* >> 83 * It's enough to flush this one mapping. >> 84 * (PGE mappings get flushed as well) >> 85 */ >> 86 __flush_tlb_one(vaddr); >> 87 } >> 88 >> 89 /* >> 90 * Associate a large virtual page frame with a given physical page frame >> 91 * and protection flags for that frame. pfn is for the base of the page, >> 92 * vaddr is what the page gets mapped to - both must be properly aligned. >> 93 * The pmd must already be instantiated. Assumes PAE mode. >> 94 */ >> 95 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) >> 96 { >> 97 pgd_t *pgd; >> 98 pmd_t *pmd; >> 99 >> 100 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ >> 101 printk ("set_pmd_pfn: vaddr misaligned\n"); >> 102 return; /* BUG(); */ >> 103 } >> 104 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ >> 105 printk ("set_pmd_pfn: pfn misaligned\n"); >> 106 return; /* BUG(); */ >> 107 } >> 108 pgd = swapper_pg_dir + pgd_index(vaddr); >> 109 if (pgd_none(*pgd)) { >> 110 printk ("set_pmd_pfn: pgd_none\n"); >> 111 return; /* BUG(); */ >> 112 } >> 113 pmd = pmd_offset(pgd, vaddr); >> 114 set_pmd(pmd, pfn_pmd(pfn, flags)); >> 115 /* >> 116 * It's enough to flush this one mapping. >> 117 * (PGE mappings get flushed as well) >> 118 */ >> 119 __flush_tlb_one(vaddr); >> 120 } >> 121 >> 122 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) >> 123 { >> 124 unsigned long address = __fix_to_virt(idx); >> 125 >> 126 if (idx >= __end_of_fixed_addresses) { >> 127 BUG(); >> 128 return; >> 129 } >> 130 set_pte_pfn(address, phys >> PAGE_SHIFT, flags); >> 131 } >> 132 >> 133 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) >> 134 { >> 135 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); >> 136 if (pte) >> 137 clear_page(pte); >> 138 return pte; >> 139 } >> 140 >> 141 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) >> 142 { >> 143 struct page *pte; >> 144 >> 145 #ifdef CONFIG_HIGHPTE >> 146 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); >> 147 #else >> 148 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); >> 149 #endif >> 150 if (pte) >> 151 clear_highpage(pte); >> 152 return pte; >> 153 } >> 154 >> 155 void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags) >> 156 { >> 157 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); >> 158 } >> 159 >> 160 /* >> 161 * List of all pgd's needed for non-PAE so it can invalidate entries >> 162 * in both cached and uncached pgd's; not needed for PAE since the >> 163 * kernel pmd is shared. If PAE were not to share the pmd a similar >> 164 * tactic would be needed. This is essentially codepath-based locking >> 165 * against pageattr.c; it is the unique case in which a valid change >> 166 * of kernel pagetables can't be lazily synchronized by vmalloc faults. >> 167 * vmalloc faults work because attached pagetables are never freed. >> 168 * If the locking proves to be non-performant, a ticketing scheme with >> 169 * checks at dup_mmap(), exec(), and other mmlist addition points >> 170 * could be used. The locking scheme was chosen on the basis of >> 171 * manfred's recommendations and having no core impact whatsoever. >> 172 * -- wli >> 173 */ >> 174 spinlock_t pgd_lock = SPIN_LOCK_UNLOCKED; >> 175 LIST_HEAD(pgd_list); >> 176 >> 177 void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) >> 178 { >> 179 unsigned long flags; >> 180 >> 181 if (PTRS_PER_PMD == 1) >> 182 spin_lock_irqsave(&pgd_lock, flags); >> 183 >> 184 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, >> 185 swapper_pg_dir + USER_PTRS_PER_PGD, >> 186 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); >> 187 >> 188 if (PTRS_PER_PMD > 1) >> 189 return; >> 190 >> 191 list_add(&virt_to_page(pgd)->lru, &pgd_list); >> 192 spin_unlock_irqrestore(&pgd_lock, flags); >> 193 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); >> 194 } >> 195 >> 196 /* never called when PTRS_PER_PMD > 1 */ >> 197 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) >> 198 { >> 199 unsigned long flags; /* can be called from interrupt context */ >> 200 >> 201 spin_lock_irqsave(&pgd_lock, flags); >> 202 list_del(&virt_to_page(pgd)->lru); >> 203 spin_unlock_irqrestore(&pgd_lock, flags); >> 204 } 10 205 11 pgd_t *pgd_alloc(struct mm_struct *mm) 206 pgd_t *pgd_alloc(struct mm_struct *mm) 12 { 207 { 13 pgd_t *init, *ret = NULL; !! 208 int i; 14 struct ptdesc *ptdesc = pagetable_allo !! 209 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); 15 PGD_TABLE_ORDER); !! 210 >> 211 if (PTRS_PER_PMD == 1 || !pgd) >> 212 return pgd; 16 213 17 if (ptdesc) { !! 214 for (i = 0; i < USER_PTRS_PER_PGD; ++i) { 18 ret = ptdesc_address(ptdesc); !! 215 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 19 init = pgd_offset(&init_mm, 0U !! 216 if (!pmd) 20 pgd_init(ret); !! 217 goto out_oom; 21 memcpy(ret + USER_PTRS_PER_PGD !! 218 set_pgd(&pgd[i], __pgd(1 + __pa((u64)((u32)pmd)))); 22 (PTRS_PER_PGD - USER_PT << 23 } 219 } >> 220 return pgd; >> 221 >> 222 out_oom: >> 223 for (i--; i >= 0; i--) >> 224 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); >> 225 kmem_cache_free(pgd_cache, pgd); >> 226 return NULL; >> 227 } >> 228 >> 229 void pgd_free(pgd_t *pgd) >> 230 { >> 231 int i; 24 232 25 return ret; !! 233 /* in the PAE case user pgd entries are overwritten before usage */ >> 234 if (PTRS_PER_PMD > 1) >> 235 for (i = 0; i < USER_PTRS_PER_PGD; ++i) >> 236 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); >> 237 /* in the non-PAE case, clear_page_tables() clears user pgd entries */ >> 238 kmem_cache_free(pgd_cache, pgd); 26 } 239 } 27 EXPORT_SYMBOL_GPL(pgd_alloc); << 28 240
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.