~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/mips/mm/hugetlbpage.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/mips/mm/hugetlbpage.c (Version linux-6.11-rc3) and /arch/sparc64/mm/hugetlbpage.c (Version linux-2.6.0)


  1 /*                                                  1 /*
  2  * MIPS Huge TLB Page Support for Kernel.      !!   2  * SPARC64 Huge TLB page support.
  3  *                                                  3  *
  4  * This file is subject to the terms and condi !!   4  * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
  5  * License.  See the file "COPYING" in the mai << 
  6  * for more details.                           << 
  7  *                                             << 
  8  * Copyright (C) 2002, Rohit Seth <rohit.seth@ << 
  9  * Copyright 2005, Embedded Alley Solutions, I << 
 10  * Matt Porter <mporter@embeddedalley.com>     << 
 11  * Copyright (C) 2008, 2009 Cavium Networks, I << 
 12  */                                                 5  */
 13                                                     6 
                                                   >>   7 #include <linux/config.h>
                                                   >>   8 #include <linux/init.h>
 14 #include <linux/fs.h>                               9 #include <linux/fs.h>
 15 #include <linux/mm.h>                              10 #include <linux/mm.h>
 16 #include <linux/hugetlb.h>                         11 #include <linux/hugetlb.h>
 17 #include <linux/pagemap.h>                         12 #include <linux/pagemap.h>
 18 #include <linux/err.h>                         !!  13 #include <linux/smp_lock.h>
                                                   >>  14 #include <linux/slab.h>
 19 #include <linux/sysctl.h>                          15 #include <linux/sysctl.h>
                                                   >>  16 
 20 #include <asm/mman.h>                              17 #include <asm/mman.h>
                                                   >>  18 #include <asm/pgalloc.h>
 21 #include <asm/tlb.h>                               19 #include <asm/tlb.h>
 22 #include <asm/tlbflush.h>                          20 #include <asm/tlbflush.h>
                                                   >>  21 #include <asm/cacheflush.h>
                                                   >>  22 
                                                   >>  23 static long     htlbpagemem;
                                                   >>  24 int             htlbpage_max;
                                                   >>  25 static long     htlbzone_pages;
                                                   >>  26 
                                                   >>  27 static struct list_head hugepage_freelists[MAX_NUMNODES];
                                                   >>  28 static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
                                                   >>  29 
                                                   >>  30 static void enqueue_huge_page(struct page *page)
                                                   >>  31 {
                                                   >>  32         list_add(&page->list,
                                                   >>  33                  &hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
                                                   >>  34 }
 23                                                    35 
 24 pte_t *huge_pte_alloc(struct mm_struct *mm, st !!  36 static struct page *dequeue_huge_page(void)
 25                       unsigned long addr, unsi !!  37 {
                                                   >>  38         int nid = numa_node_id();
                                                   >>  39         struct page *page = NULL;
                                                   >>  40 
                                                   >>  41         if (list_empty(&hugepage_freelists[nid])) {
                                                   >>  42                 for (nid = 0; nid < MAX_NUMNODES; ++nid)
                                                   >>  43                         if (!list_empty(&hugepage_freelists[nid]))
                                                   >>  44                                 break;
                                                   >>  45         }
                                                   >>  46         if (nid >= 0 && nid < MAX_NUMNODES &&
                                                   >>  47             !list_empty(&hugepage_freelists[nid])) {
                                                   >>  48                 page = list_entry(hugepage_freelists[nid].next,
                                                   >>  49                                   struct page, list);
                                                   >>  50                 list_del(&page->list);
                                                   >>  51         }
                                                   >>  52         return page;
                                                   >>  53 }
                                                   >>  54 
                                                   >>  55 static struct page *alloc_fresh_huge_page(void)
                                                   >>  56 {
                                                   >>  57         static int nid = 0;
                                                   >>  58         struct page *page;
                                                   >>  59         page = alloc_pages_node(nid, GFP_HIGHUSER, HUGETLB_PAGE_ORDER);
                                                   >>  60         nid = (nid + 1) % numnodes;
                                                   >>  61         return page;
                                                   >>  62 }
                                                   >>  63 
                                                   >>  64 static void free_huge_page(struct page *page);
                                                   >>  65 
                                                   >>  66 static struct page *alloc_hugetlb_page(void)
                                                   >>  67 {
                                                   >>  68         struct page *page;
                                                   >>  69 
                                                   >>  70         spin_lock(&htlbpage_lock);
                                                   >>  71         page = dequeue_huge_page();
                                                   >>  72         if (!page) {
                                                   >>  73                 spin_unlock(&htlbpage_lock);
                                                   >>  74                 return NULL;
                                                   >>  75         }
                                                   >>  76         htlbpagemem--;
                                                   >>  77         spin_unlock(&htlbpage_lock);
                                                   >>  78         set_page_count(page, 1);
                                                   >>  79         page->lru.prev = (void *)free_huge_page;
                                                   >>  80         memset(page_address(page), 0, HPAGE_SIZE);
                                                   >>  81         return page;
                                                   >>  82 }
                                                   >>  83 
                                                   >>  84 static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 26 {                                                  85 {
 27         pgd_t *pgd;                                86         pgd_t *pgd;
 28         p4d_t *p4d;                            !!  87         pmd_t *pmd;
 29         pud_t *pud;                            << 
 30         pte_t *pte = NULL;                         88         pte_t *pte = NULL;
 31                                                    89 
 32         pgd = pgd_offset(mm, addr);                90         pgd = pgd_offset(mm, addr);
 33         p4d = p4d_alloc(mm, pgd, addr);        !!  91         if (pgd) {
 34         pud = pud_alloc(mm, p4d, addr);        !!  92                 pmd = pmd_alloc(mm, pgd, addr);
 35         if (pud)                               !!  93                 if (pmd)
 36                 pte = (pte_t *)pmd_alloc(mm, p !!  94                         pte = pte_alloc_map(mm, pmd, addr);
 37                                                !!  95         }
 38         return pte;                                96         return pte;
 39 }                                                  97 }
 40                                                    98 
 41 pte_t *huge_pte_offset(struct mm_struct *mm, u !!  99 static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 42                        unsigned long sz)       << 
 43 {                                                 100 {
 44         pgd_t *pgd;                               101         pgd_t *pgd;
 45         p4d_t *p4d;                            !! 102         pmd_t *pmd;
 46         pud_t *pud;                            !! 103         pte_t *pte = NULL;
 47         pmd_t *pmd = NULL;                     << 
 48                                                   104 
 49         pgd = pgd_offset(mm, addr);               105         pgd = pgd_offset(mm, addr);
 50         if (pgd_present(*pgd)) {               !! 106         if (pgd) {
 51                 p4d = p4d_offset(pgd, addr);   !! 107                 pmd = pmd_offset(pgd, addr);
 52                 if (p4d_present(*p4d)) {       !! 108                 if (pmd)
 53                         pud = pud_offset(p4d,  !! 109                         pte = pte_offset_map(pmd, addr);
 54                         if (pud_present(*pud)) !! 110         }
 55                                 pmd = pmd_offs !! 111         return pte;
                                                   >> 112 }
                                                   >> 113 
                                                   >> 114 #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
                                                   >> 115 
                                                   >> 116 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
                                                   >> 117                          struct page *page, pte_t * page_table, int write_access)
                                                   >> 118 {
                                                   >> 119         unsigned long i;
                                                   >> 120         pte_t entry;
                                                   >> 121 
                                                   >> 122         mm->rss += (HPAGE_SIZE / PAGE_SIZE);
                                                   >> 123 
                                                   >> 124         if (write_access)
                                                   >> 125                 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
                                                   >> 126                                                        vma->vm_page_prot)));
                                                   >> 127         else
                                                   >> 128                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
                                                   >> 129         entry = pte_mkyoung(entry);
                                                   >> 130         mk_pte_huge(entry);
                                                   >> 131 
                                                   >> 132         for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
                                                   >> 133                 set_pte(page_table, entry);
                                                   >> 134                 page_table++;
                                                   >> 135 
                                                   >> 136                 pte_val(entry) += PAGE_SIZE;
                                                   >> 137         }
                                                   >> 138 }
                                                   >> 139 
                                                   >> 140 /*
                                                   >> 141  * This function checks for proper alignment of input addr and len parameters.
                                                   >> 142  */
                                                   >> 143 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
                                                   >> 144 {
                                                   >> 145         if (len & ~HPAGE_MASK)
                                                   >> 146                 return -EINVAL;
                                                   >> 147         if (addr & ~HPAGE_MASK)
                                                   >> 148                 return -EINVAL;
                                                   >> 149         return 0;
                                                   >> 150 }
                                                   >> 151 
                                                   >> 152 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                                                   >> 153                             struct vm_area_struct *vma)
                                                   >> 154 {
                                                   >> 155         pte_t *src_pte, *dst_pte, entry;
                                                   >> 156         struct page *ptepage;
                                                   >> 157         unsigned long addr = vma->vm_start;
                                                   >> 158         unsigned long end = vma->vm_end;
                                                   >> 159         int i;
                                                   >> 160 
                                                   >> 161         while (addr < end) {
                                                   >> 162                 dst_pte = huge_pte_alloc(dst, addr);
                                                   >> 163                 if (!dst_pte)
                                                   >> 164                         goto nomem;
                                                   >> 165                 src_pte = huge_pte_offset(src, addr);
                                                   >> 166                 BUG_ON(!src_pte || pte_none(*src_pte));
                                                   >> 167                 entry = *src_pte;
                                                   >> 168                 ptepage = pte_page(entry);
                                                   >> 169                 get_page(ptepage);
                                                   >> 170                 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
                                                   >> 171                         set_pte(dst_pte, entry);
                                                   >> 172                         pte_val(entry) += PAGE_SIZE;
                                                   >> 173                         dst_pte++;
                                                   >> 174                 }
                                                   >> 175                 dst->rss += (HPAGE_SIZE / PAGE_SIZE);
                                                   >> 176                 addr += HPAGE_SIZE;
                                                   >> 177         }
                                                   >> 178         return 0;
                                                   >> 179 
                                                   >> 180 nomem:
                                                   >> 181         return -ENOMEM;
                                                   >> 182 }
                                                   >> 183 
                                                   >> 184 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                                   >> 185                         struct page **pages, struct vm_area_struct **vmas,
                                                   >> 186                         unsigned long *position, int *length, int i)
                                                   >> 187 {
                                                   >> 188         unsigned long vaddr = *position;
                                                   >> 189         int remainder = *length;
                                                   >> 190 
                                                   >> 191         WARN_ON(!is_vm_hugetlb_page(vma));
                                                   >> 192 
                                                   >> 193         while (vaddr < vma->vm_end && remainder) {
                                                   >> 194                 if (pages) {
                                                   >> 195                         pte_t *pte;
                                                   >> 196                         struct page *page;
                                                   >> 197 
                                                   >> 198                         pte = huge_pte_offset(mm, vaddr);
                                                   >> 199 
                                                   >> 200                         /* hugetlb should be locked, and hence, prefaulted */
                                                   >> 201                         BUG_ON(!pte || pte_none(*pte));
                                                   >> 202 
                                                   >> 203                         page = pte_page(*pte);
                                                   >> 204 
                                                   >> 205                         WARN_ON(!PageCompound(page));
                                                   >> 206 
                                                   >> 207                         get_page(page);
                                                   >> 208                         pages[i] = page;
                                                   >> 209                 }
                                                   >> 210 
                                                   >> 211                 if (vmas)
                                                   >> 212                         vmas[i] = vma;
                                                   >> 213 
                                                   >> 214                 vaddr += PAGE_SIZE;
                                                   >> 215                 --remainder;
                                                   >> 216                 ++i;
                                                   >> 217         }
                                                   >> 218 
                                                   >> 219         *length = remainder;
                                                   >> 220         *position = vaddr;
                                                   >> 221 
                                                   >> 222         return i;
                                                   >> 223 }
                                                   >> 224 
                                                   >> 225 struct page *follow_huge_addr(struct mm_struct *mm,
                                                   >> 226                               struct vm_area_struct *vma,
                                                   >> 227                               unsigned long address, int write)
                                                   >> 228 {
                                                   >> 229         return NULL;
                                                   >> 230 }
                                                   >> 231 
                                                   >> 232 struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr)
                                                   >> 233 {
                                                   >> 234         return NULL;
                                                   >> 235 }
                                                   >> 236 
                                                   >> 237 int pmd_huge(pmd_t pmd)
                                                   >> 238 {
                                                   >> 239         return 0;
                                                   >> 240 }
                                                   >> 241 
                                                   >> 242 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                                                   >> 243                              pmd_t *pmd, int write)
                                                   >> 244 {
                                                   >> 245         return NULL;
                                                   >> 246 }
                                                   >> 247 
                                                   >> 248 static void free_huge_page(struct page *page)
                                                   >> 249 {
                                                   >> 250         BUG_ON(page_count(page));
                                                   >> 251         BUG_ON(page->mapping);
                                                   >> 252 
                                                   >> 253         INIT_LIST_HEAD(&page->list);
                                                   >> 254 
                                                   >> 255         spin_lock(&htlbpage_lock);
                                                   >> 256         enqueue_huge_page(page);
                                                   >> 257         htlbpagemem++;
                                                   >> 258         spin_unlock(&htlbpage_lock);
                                                   >> 259 }
                                                   >> 260 
                                                   >> 261 void huge_page_release(struct page *page)
                                                   >> 262 {
                                                   >> 263         if (!put_page_testzero(page))
                                                   >> 264                 return;
                                                   >> 265 
                                                   >> 266         free_huge_page(page);
                                                   >> 267 }
                                                   >> 268 
                                                   >> 269 void unmap_hugepage_range(struct vm_area_struct *vma,
                                                   >> 270                           unsigned long start, unsigned long end)
                                                   >> 271 {
                                                   >> 272         struct mm_struct *mm = vma->vm_mm;
                                                   >> 273         unsigned long address;
                                                   >> 274         pte_t *pte;
                                                   >> 275         struct page *page;
                                                   >> 276         int i;
                                                   >> 277 
                                                   >> 278         BUG_ON(start & (HPAGE_SIZE - 1));
                                                   >> 279         BUG_ON(end & (HPAGE_SIZE - 1));
                                                   >> 280 
                                                   >> 281         for (address = start; address < end; address += HPAGE_SIZE) {
                                                   >> 282                 pte = huge_pte_offset(mm, address);
                                                   >> 283                 BUG_ON(!pte);
                                                   >> 284                 if (pte_none(*pte))
                                                   >> 285                         continue;
                                                   >> 286                 page = pte_page(*pte);
                                                   >> 287                 huge_page_release(page);
                                                   >> 288                 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
                                                   >> 289                         pte_clear(pte);
                                                   >> 290                         pte++;
 56                 }                                 291                 }
 57         }                                         292         }
 58         return (pte_t *) pmd;                  !! 293         mm->rss -= (end - start) >> PAGE_SHIFT;
                                                   >> 294         flush_tlb_range(vma, start, end);
 59 }                                                 295 }
                                                   >> 296 
                                                   >> 297 void zap_hugepage_range(struct vm_area_struct *vma,
                                                   >> 298                         unsigned long start, unsigned long length)
                                                   >> 299 {
                                                   >> 300         struct mm_struct *mm = vma->vm_mm;
                                                   >> 301 
                                                   >> 302         spin_lock(&mm->page_table_lock);
                                                   >> 303         unmap_hugepage_range(vma, start, start + length);
                                                   >> 304         spin_unlock(&mm->page_table_lock);
                                                   >> 305 }
                                                   >> 306 
                                                   >> 307 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
                                                   >> 308 {
                                                   >> 309         struct mm_struct *mm = current->mm;
                                                   >> 310         unsigned long addr;
                                                   >> 311         int ret = 0;
                                                   >> 312 
                                                   >> 313         BUG_ON(vma->vm_start & ~HPAGE_MASK);
                                                   >> 314         BUG_ON(vma->vm_end & ~HPAGE_MASK);
                                                   >> 315 
                                                   >> 316         spin_lock(&mm->page_table_lock);
                                                   >> 317         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
                                                   >> 318                 unsigned long idx;
                                                   >> 319                 pte_t *pte = huge_pte_alloc(mm, addr);
                                                   >> 320                 struct page *page;
                                                   >> 321 
                                                   >> 322                 if (!pte) {
                                                   >> 323                         ret = -ENOMEM;
                                                   >> 324                         goto out;
                                                   >> 325                 }
                                                   >> 326                 if (!pte_none(*pte))
                                                   >> 327                         continue;
                                                   >> 328 
                                                   >> 329                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
                                                   >> 330                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
                                                   >> 331                 page = find_get_page(mapping, idx);
                                                   >> 332                 if (!page) {
                                                   >> 333                         /* charge the fs quota first */
                                                   >> 334                         if (hugetlb_get_quota(mapping)) {
                                                   >> 335                                 ret = -ENOMEM;
                                                   >> 336                                 goto out;
                                                   >> 337                         }
                                                   >> 338                         page = alloc_hugetlb_page();
                                                   >> 339                         if (!page) {
                                                   >> 340                                 hugetlb_put_quota(mapping);
                                                   >> 341                                 ret = -ENOMEM;
                                                   >> 342                                 goto out;
                                                   >> 343                         }
                                                   >> 344                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
                                                   >> 345                         unlock_page(page);
                                                   >> 346                         if (ret) {
                                                   >> 347                                 hugetlb_put_quota(mapping);
                                                   >> 348                                 free_huge_page(page);
                                                   >> 349                                 goto out;
                                                   >> 350                         }
                                                   >> 351                 }
                                                   >> 352                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
                                                   >> 353         }
                                                   >> 354 out:
                                                   >> 355         spin_unlock(&mm->page_table_lock);
                                                   >> 356         return ret;
                                                   >> 357 }
                                                   >> 358 
                                                   >> 359 static void update_and_free_page(struct page *page)
                                                   >> 360 {
                                                   >> 361         int j;
                                                   >> 362         struct page *map;
                                                   >> 363 
                                                   >> 364         map = page;
                                                   >> 365         htlbzone_pages--;
                                                   >> 366         for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
                                                   >> 367                 map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
                                                   >> 368                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
                                                   >> 369                                 1 << PG_private | 1<< PG_writeback);
                                                   >> 370                 set_page_count(map, 0);
                                                   >> 371                 map++;
                                                   >> 372         }
                                                   >> 373         set_page_count(page, 1);
                                                   >> 374         __free_pages(page, HUGETLB_PAGE_ORDER);
                                                   >> 375 }
                                                   >> 376 
                                                   >> 377 static int try_to_free_low(int count)
                                                   >> 378 {
                                                   >> 379         struct list_head *p;
                                                   >> 380         struct page *page, *map;
                                                   >> 381 
                                                   >> 382         map = NULL;
                                                   >> 383         spin_lock(&htlbpage_lock);
                                                   >> 384         /* all lowmem is on node 0 */
                                                   >> 385         list_for_each(p, &hugepage_freelists[0]) {
                                                   >> 386                 if (map) {
                                                   >> 387                         list_del(&map->list);
                                                   >> 388                         update_and_free_page(map);
                                                   >> 389                         htlbpagemem--;
                                                   >> 390                         map = NULL;
                                                   >> 391                         if (++count == 0)
                                                   >> 392                                 break;
                                                   >> 393                 }
                                                   >> 394                 page = list_entry(p, struct page, list);
                                                   >> 395                 if (!PageHighMem(page))
                                                   >> 396                         map = page;
                                                   >> 397         }
                                                   >> 398         if (map) {
                                                   >> 399                 list_del(&map->list);
                                                   >> 400                 update_and_free_page(map);
                                                   >> 401                 htlbpagemem--;
                                                   >> 402                 count++;
                                                   >> 403         }
                                                   >> 404         spin_unlock(&htlbpage_lock);
                                                   >> 405         return count;
                                                   >> 406 }
                                                   >> 407 
                                                   >> 408 static int set_hugetlb_mem_size(int count)
                                                   >> 409 {
                                                   >> 410         int lcount;
                                                   >> 411         struct page *page;
                                                   >> 412 
                                                   >> 413         if (count < 0)
                                                   >> 414                 lcount = count;
                                                   >> 415         else
                                                   >> 416                 lcount = count - htlbzone_pages;
                                                   >> 417 
                                                   >> 418         if (lcount == 0)
                                                   >> 419                 return (int)htlbzone_pages;
                                                   >> 420         if (lcount > 0) {       /* Increase the mem size. */
                                                   >> 421                 while (lcount--) {
                                                   >> 422                         page = alloc_fresh_huge_page();
                                                   >> 423                         if (page == NULL)
                                                   >> 424                                 break;
                                                   >> 425                         spin_lock(&htlbpage_lock);
                                                   >> 426                         enqueue_huge_page(page);
                                                   >> 427                         htlbpagemem++;
                                                   >> 428                         htlbzone_pages++;
                                                   >> 429                         spin_unlock(&htlbpage_lock);
                                                   >> 430                 }
                                                   >> 431                 return (int) htlbzone_pages;
                                                   >> 432         }
                                                   >> 433         /* Shrink the memory size. */
                                                   >> 434         lcount = try_to_free_low(lcount);
                                                   >> 435         while (lcount++) {
                                                   >> 436                 page = alloc_hugetlb_page();
                                                   >> 437                 if (page == NULL)
                                                   >> 438                         break;
                                                   >> 439                 spin_lock(&htlbpage_lock);
                                                   >> 440                 update_and_free_page(page);
                                                   >> 441                 spin_unlock(&htlbpage_lock);
                                                   >> 442         }
                                                   >> 443         return (int) htlbzone_pages;
                                                   >> 444 }
                                                   >> 445 
                                                   >> 446 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
                                                   >> 447                            struct file *file, void *buffer, size_t *length)
                                                   >> 448 {
                                                   >> 449         proc_dointvec(table, write, file, buffer, length);
                                                   >> 450         htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
                                                   >> 451         return 0;
                                                   >> 452 }
                                                   >> 453 
                                                   >> 454 static int __init hugetlb_setup(char *s)
                                                   >> 455 {
                                                   >> 456         if (sscanf(s, "%d", &htlbpage_max) <= 0)
                                                   >> 457                 htlbpage_max = 0;
                                                   >> 458         return 1;
                                                   >> 459 }
                                                   >> 460 __setup("hugepages=", hugetlb_setup);
                                                   >> 461 
                                                   >> 462 static int __init hugetlb_init(void)
                                                   >> 463 {
                                                   >> 464         int i;
                                                   >> 465         struct page *page;
                                                   >> 466 
                                                   >> 467         for (i = 0; i < MAX_NUMNODES; ++i)
                                                   >> 468                 INIT_LIST_HEAD(&hugepage_freelists[i]);
                                                   >> 469 
                                                   >> 470         for (i = 0; i < htlbpage_max; ++i) {
                                                   >> 471                 page = alloc_fresh_huge_page();
                                                   >> 472                 if (!page)
                                                   >> 473                         break;
                                                   >> 474                 spin_lock(&htlbpage_lock);
                                                   >> 475                 enqueue_huge_page(page);
                                                   >> 476                 spin_unlock(&htlbpage_lock);
                                                   >> 477         }
                                                   >> 478         htlbpage_max = htlbpagemem = htlbzone_pages = i;
                                                   >> 479         printk("Total HugeTLB memory allocated, %ld\n", htlbpagemem);
                                                   >> 480         return 0;
                                                   >> 481 }
                                                   >> 482 module_init(hugetlb_init);
                                                   >> 483 
                                                   >> 484 int hugetlb_report_meminfo(char *buf)
                                                   >> 485 {
                                                   >> 486         return sprintf(buf,
                                                   >> 487                         "HugePages_Total: %5lu\n"
                                                   >> 488                         "HugePages_Free:  %5lu\n"
                                                   >> 489                         "Hugepagesize:    %5lu kB\n",
                                                   >> 490                         htlbzone_pages,
                                                   >> 491                         htlbpagemem,
                                                   >> 492                         HPAGE_SIZE/1024);
                                                   >> 493 }
                                                   >> 494 
                                                   >> 495 int is_hugepage_mem_enough(size_t size)
                                                   >> 496 {
                                                   >> 497         return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpagemem;
                                                   >> 498 }
                                                   >> 499 
                                                   >> 500 /*
                                                   >> 501  * We cannot handle pagefaults against hugetlb pages at all.  They cause
                                                   >> 502  * handle_mm_fault() to try to instantiate regular-sized pages in the
                                                   >> 503  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
                                                   >> 504  * this far.
                                                   >> 505  */
                                                   >> 506 static struct page *hugetlb_nopage(struct vm_area_struct *vma,
                                                   >> 507                                    unsigned long address, int unused)
                                                   >> 508 {
                                                   >> 509         BUG();
                                                   >> 510         return NULL;
                                                   >> 511 }
                                                   >> 512 
                                                   >> 513 struct vm_operations_struct hugetlb_vm_ops = {
                                                   >> 514         .nopage = hugetlb_nopage,
                                                   >> 515 };
 60                                                   516 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php