~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/tlb.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm/mm/tlb.c (Architecture sparc64) and /arch/sparc/mm/tlb.c (Architecture sparc)


  1 // SPDX-License-Identifier: GPL-2.0-only       !!   1 // SPDX-License-Identifier: GPL-2.0
  2 // Copyright 2024 Google LLC                   !!   2 /* arch/sparc64/mm/tlb.c
  3 // Author: Ard Biesheuvel <ardb@google.com>    !!   3  *
                                                   >>   4  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
                                                   >>   5  */
                                                   >>   6 
                                                   >>   7 #include <linux/kernel.h>
                                                   >>   8 #include <linux/percpu.h>
                                                   >>   9 #include <linux/mm.h>
                                                   >>  10 #include <linux/swap.h>
                                                   >>  11 #include <linux/preempt.h>
                                                   >>  12 #include <linux/pagemap.h>
  4                                                    13 
  5 #include <linux/types.h>                       << 
  6 #include <asm/tlbflush.h>                          14 #include <asm/tlbflush.h>
  7                                                !!  15 #include <asm/cacheflush.h>
  8 #ifdef CONFIG_CPU_TLB_V4WT                     !!  16 #include <asm/mmu_context.h>
  9 void v4_flush_user_tlb_range(unsigned long, un !!  17 #include <asm/tlb.h>
 10 void v4_flush_kern_tlb_range(unsigned long, un !!  18 
 11                                                !!  19 /* Heavily inspired by the ppc64 code.  */
 12 struct cpu_tlb_fns v4_tlb_fns __initconst = {  !!  20 
 13         .flush_user_range       = v4_flush_use !!  21 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
 14         .flush_kern_range       = v4_flush_ker !!  22 
 15         .tlb_flags              = v4_tlb_flags !!  23 void flush_tlb_pending(void)
 16 };                                             !!  24 {
 17 #endif                                         !!  25         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
 18                                                !!  26         struct mm_struct *mm = tb->mm;
 19 #ifdef CONFIG_CPU_TLB_V4WB                     !!  27 
 20 void v4wb_flush_user_tlb_range(unsigned long,  !!  28         if (!tb->tlb_nr)
 21 void v4wb_flush_kern_tlb_range(unsigned long,  !!  29                 goto out;
 22                                                !!  30 
 23 struct cpu_tlb_fns v4wb_tlb_fns __initconst =  !!  31         flush_tsb_user(tb);
 24         .flush_user_range       = v4wb_flush_u !!  32 
 25         .flush_kern_range       = v4wb_flush_k !!  33         if (CTX_VALID(mm->context)) {
 26         .tlb_flags              = v4wb_tlb_fla !!  34                 if (tb->tlb_nr == 1) {
 27 };                                             !!  35                         global_flush_tlb_page(mm, tb->vaddrs[0]);
 28 #endif                                         !!  36                 } else {
 29                                                !!  37 #ifdef CONFIG_SMP
 30 #if defined(CONFIG_CPU_TLB_V4WBI) || defined(C !!  38                         smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
 31 void v4wbi_flush_user_tlb_range(unsigned long, !!  39                                               &tb->vaddrs[0]);
 32 void v4wbi_flush_kern_tlb_range(unsigned long, !!  40 #else
 33                                                !!  41                         __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
 34 struct cpu_tlb_fns v4wbi_tlb_fns __initconst = !!  42                                             tb->tlb_nr, &tb->vaddrs[0]);
 35         .flush_user_range       = v4wbi_flush_ << 
 36         .flush_kern_range       = v4wbi_flush_ << 
 37         .tlb_flags              = v4wbi_tlb_fl << 
 38 };                                             << 
 39 #endif                                             43 #endif
                                                   >>  44                 }
                                                   >>  45         }
 40                                                    46 
 41 #ifdef CONFIG_CPU_TLB_V6                       !!  47         tb->tlb_nr = 0;
 42 void v6wbi_flush_user_tlb_range(unsigned long, << 
 43 void v6wbi_flush_kern_tlb_range(unsigned long, << 
 44                                                << 
 45 struct cpu_tlb_fns v6wbi_tlb_fns __initconst = << 
 46         .flush_user_range       = v6wbi_flush_ << 
 47         .flush_kern_range       = v6wbi_flush_ << 
 48         .tlb_flags              = v6wbi_tlb_fl << 
 49 };                                             << 
 50 #endif                                         << 
 51                                                    48 
 52 #ifdef CONFIG_CPU_TLB_V7                       !!  49 out:
 53 void v7wbi_flush_user_tlb_range(unsigned long, !!  50         put_cpu_var(tlb_batch);
 54 void v7wbi_flush_kern_tlb_range(unsigned long, !!  51 }
 55                                                !!  52 
 56 struct cpu_tlb_fns v7wbi_tlb_fns __initconst = !!  53 void arch_enter_lazy_mmu_mode(void)
 57         .flush_user_range       = v7wbi_flush_ !!  54 {
 58         .flush_kern_range       = v7wbi_flush_ !!  55         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
 59         .tlb_flags              = IS_ENABLED(C !!  56 
 60                                                !!  57         tb->active = 1;
 61 };                                             !!  58 }
 62                                                !!  59 
 63 #ifdef CONFIG_SMP_ON_UP                        !!  60 void arch_leave_lazy_mmu_mode(void)
 64 /* This will be run-time patched so the offset !!  61 {
 65 static_assert(offsetof(struct cpu_tlb_fns, tlb !!  62         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
 66                                                !!  63 
 67 asm("   .pushsection    \".alt.smp.init\", \"a !!  64         if (tb->tlb_nr)
 68     "   .align          2                      !!  65                 flush_tlb_pending();
 69     "   .long           v7wbi_tlb_fns + 8 - .  !!  66         tb->active = 0;
 70     "   .long "         __stringify(v7wbi_tlb_ !!  67 }
 71     "   .popsection                            !!  68 
 72 #endif                                         !!  69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
 73 #endif                                         !!  70                               bool exec, unsigned int hugepage_shift)
 74                                                !!  71 {
 75 #ifdef CONFIG_CPU_TLB_FA                       !!  72         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
 76 void fa_flush_user_tlb_range(unsigned long, un !!  73         unsigned long nr;
 77 void fa_flush_kern_tlb_range(unsigned long, un !!  74 
 78                                                !!  75         vaddr &= PAGE_MASK;
 79 struct cpu_tlb_fns fa_tlb_fns __initconst = {  !!  76         if (exec)
 80         .flush_user_range       = fa_flush_use !!  77                 vaddr |= 0x1UL;
 81         .flush_kern_range       = fa_flush_ker !!  78 
 82         .tlb_flags              = fa_tlb_flags !!  79         nr = tb->tlb_nr;
 83 };                                             !!  80 
 84 #endif                                         !!  81         if (unlikely(nr != 0 && mm != tb->mm)) {
                                                   >>  82                 flush_tlb_pending();
                                                   >>  83                 nr = 0;
                                                   >>  84         }
                                                   >>  85 
                                                   >>  86         if (!tb->active) {
                                                   >>  87                 flush_tsb_user_page(mm, vaddr, hugepage_shift);
                                                   >>  88                 global_flush_tlb_page(mm, vaddr);
                                                   >>  89                 goto out;
                                                   >>  90         }
                                                   >>  91 
                                                   >>  92         if (nr == 0) {
                                                   >>  93                 tb->mm = mm;
                                                   >>  94                 tb->hugepage_shift = hugepage_shift;
                                                   >>  95         }
                                                   >>  96 
                                                   >>  97         if (tb->hugepage_shift != hugepage_shift) {
                                                   >>  98                 flush_tlb_pending();
                                                   >>  99                 tb->hugepage_shift = hugepage_shift;
                                                   >> 100                 nr = 0;
                                                   >> 101         }
                                                   >> 102 
                                                   >> 103         tb->vaddrs[nr] = vaddr;
                                                   >> 104         tb->tlb_nr = ++nr;
                                                   >> 105         if (nr >= TLB_BATCH_NR)
                                                   >> 106                 flush_tlb_pending();
                                                   >> 107 
                                                   >> 108 out:
                                                   >> 109         put_cpu_var(tlb_batch);
                                                   >> 110 }
                                                   >> 111 
                                                   >> 112 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
                                                   >> 113                    pte_t *ptep, pte_t orig, int fullmm,
                                                   >> 114                    unsigned int hugepage_shift)
                                                   >> 115 {
                                                   >> 116         if (tlb_type != hypervisor &&
                                                   >> 117             pte_dirty(orig)) {
                                                   >> 118                 unsigned long paddr, pfn = pte_pfn(orig);
                                                   >> 119                 struct address_space *mapping;
                                                   >> 120                 struct page *page;
                                                   >> 121                 struct folio *folio;
                                                   >> 122 
                                                   >> 123                 if (!pfn_valid(pfn))
                                                   >> 124                         goto no_cache_flush;
                                                   >> 125 
                                                   >> 126                 page = pfn_to_page(pfn);
                                                   >> 127                 if (PageReserved(page))
                                                   >> 128                         goto no_cache_flush;
                                                   >> 129 
                                                   >> 130                 /* A real file page? */
                                                   >> 131                 folio = page_folio(page);
                                                   >> 132                 mapping = folio_flush_mapping(folio);
                                                   >> 133                 if (!mapping)
                                                   >> 134                         goto no_cache_flush;
                                                   >> 135 
                                                   >> 136                 paddr = (unsigned long) page_address(page);
                                                   >> 137                 if ((paddr ^ vaddr) & (1 << 13))
                                                   >> 138                         flush_dcache_folio_all(mm, folio);
                                                   >> 139         }
                                                   >> 140 
                                                   >> 141 no_cache_flush:
                                                   >> 142         if (!fullmm)
                                                   >> 143                 tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
                                                   >> 144 }
                                                   >> 145 
                                                   >> 146 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
                                                   >> 147 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
                                                   >> 148                                pmd_t pmd)
                                                   >> 149 {
                                                   >> 150         unsigned long end;
                                                   >> 151         pte_t *pte;
                                                   >> 152 
                                                   >> 153         pte = pte_offset_map(&pmd, vaddr);
                                                   >> 154         if (!pte)
                                                   >> 155                 return;
                                                   >> 156         end = vaddr + HPAGE_SIZE;
                                                   >> 157         while (vaddr < end) {
                                                   >> 158                 if (pte_val(*pte) & _PAGE_VALID) {
                                                   >> 159                         bool exec = pte_exec(*pte);
                                                   >> 160 
                                                   >> 161                         tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
                                                   >> 162                 }
                                                   >> 163                 pte++;
                                                   >> 164                 vaddr += PAGE_SIZE;
                                                   >> 165         }
                                                   >> 166         pte_unmap(pte);
                                                   >> 167 }
                                                   >> 168 
                                                   >> 169 
                                                   >> 170 static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
                                                   >> 171                            pmd_t orig, pmd_t pmd)
                                                   >> 172 {
                                                   >> 173         if (mm == &init_mm)
                                                   >> 174                 return;
                                                   >> 175 
                                                   >> 176         if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
                                                   >> 177                 /*
                                                   >> 178                  * Note that this routine only sets pmds for THP pages.
                                                   >> 179                  * Hugetlb pages are handled elsewhere.  We need to check
                                                   >> 180                  * for huge zero page.  Huge zero pages are like hugetlb
                                                   >> 181                  * pages in that there is no RSS, but there is the need
                                                   >> 182                  * for TSB entries.  So, huge zero page counts go into
                                                   >> 183                  * hugetlb_pte_count.
                                                   >> 184                  */
                                                   >> 185                 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
                                                   >> 186                         if (is_huge_zero_pmd(pmd))
                                                   >> 187                                 mm->context.hugetlb_pte_count++;
                                                   >> 188                         else
                                                   >> 189                                 mm->context.thp_pte_count++;
                                                   >> 190                 } else {
                                                   >> 191                         if (is_huge_zero_pmd(orig))
                                                   >> 192                                 mm->context.hugetlb_pte_count--;
                                                   >> 193                         else
                                                   >> 194                                 mm->context.thp_pte_count--;
                                                   >> 195                 }
                                                   >> 196 
                                                   >> 197                 /* Do not try to allocate the TSB hash table if we
                                                   >> 198                  * don't have one already.  We have various locks held
                                                   >> 199                  * and thus we'll end up doing a GFP_KERNEL allocation
                                                   >> 200                  * in an atomic context.
                                                   >> 201                  *
                                                   >> 202                  * Instead, we let the first TLB miss on a hugepage
                                                   >> 203                  * take care of this.
                                                   >> 204                  */
                                                   >> 205         }
                                                   >> 206 
                                                   >> 207         if (!pmd_none(orig)) {
                                                   >> 208                 addr &= HPAGE_MASK;
                                                   >> 209                 if (pmd_trans_huge(orig)) {
                                                   >> 210                         pte_t orig_pte = __pte(pmd_val(orig));
                                                   >> 211                         bool exec = pte_exec(orig_pte);
                                                   >> 212 
                                                   >> 213                         tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
                                                   >> 214                         tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
                                                   >> 215                                           REAL_HPAGE_SHIFT);
                                                   >> 216                 } else {
                                                   >> 217                         tlb_batch_pmd_scan(mm, addr, orig);
                                                   >> 218                 }
                                                   >> 219         }
                                                   >> 220 }
                                                   >> 221 
                                                   >> 222 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                                                   >> 223                 pmd_t *pmdp, pmd_t pmd)
                                                   >> 224 {
                                                   >> 225         pmd_t orig = *pmdp;
                                                   >> 226 
                                                   >> 227         *pmdp = pmd;
                                                   >> 228         __set_pmd_acct(mm, addr, orig, pmd);
                                                   >> 229 }
                                                   >> 230 
                                                   >> 231 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
                                                   >> 232                 unsigned long address, pmd_t *pmdp, pmd_t pmd)
                                                   >> 233 {
                                                   >> 234         pmd_t old;
                                                   >> 235 
                                                   >> 236         do {
                                                   >> 237                 old = *pmdp;
                                                   >> 238         } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
                                                   >> 239         __set_pmd_acct(vma->vm_mm, address, old, pmd);
                                                   >> 240 
                                                   >> 241         return old;
                                                   >> 242 }
                                                   >> 243 
                                                   >> 244 /*
                                                   >> 245  * This routine is only called when splitting a THP
                                                   >> 246  */
                                                   >> 247 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                                                   >> 248                      pmd_t *pmdp)
                                                   >> 249 {
                                                   >> 250         pmd_t old, entry;
                                                   >> 251 
                                                   >> 252         VM_WARN_ON_ONCE(!pmd_present(*pmdp));
                                                   >> 253         entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
                                                   >> 254         old = pmdp_establish(vma, address, pmdp, entry);
                                                   >> 255         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
                                                   >> 256 
                                                   >> 257         /*
                                                   >> 258          * set_pmd_at() will not be called in a way to decrement
                                                   >> 259          * thp_pte_count when splitting a THP, so do it now.
                                                   >> 260          * Sanity check pmd before doing the actual decrement.
                                                   >> 261          */
                                                   >> 262         if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
                                                   >> 263             !is_huge_zero_pmd(entry))
                                                   >> 264                 (vma->vm_mm)->context.thp_pte_count--;
                                                   >> 265 
                                                   >> 266         return old;
                                                   >> 267 }
                                                   >> 268 
                                                   >> 269 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
                                                   >> 270                                 pgtable_t pgtable)
                                                   >> 271 {
                                                   >> 272         struct list_head *lh = (struct list_head *) pgtable;
                                                   >> 273 
                                                   >> 274         assert_spin_locked(&mm->page_table_lock);
                                                   >> 275 
                                                   >> 276         /* FIFO */
                                                   >> 277         if (!pmd_huge_pte(mm, pmdp))
                                                   >> 278                 INIT_LIST_HEAD(lh);
                                                   >> 279         else
                                                   >> 280                 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
                                                   >> 281         pmd_huge_pte(mm, pmdp) = pgtable;
                                                   >> 282 }
                                                   >> 283 
                                                   >> 284 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
                                                   >> 285 {
                                                   >> 286         struct list_head *lh;
                                                   >> 287         pgtable_t pgtable;
                                                   >> 288 
                                                   >> 289         assert_spin_locked(&mm->page_table_lock);
                                                   >> 290 
                                                   >> 291         /* FIFO */
                                                   >> 292         pgtable = pmd_huge_pte(mm, pmdp);
                                                   >> 293         lh = (struct list_head *) pgtable;
                                                   >> 294         if (list_empty(lh))
                                                   >> 295                 pmd_huge_pte(mm, pmdp) = NULL;
                                                   >> 296         else {
                                                   >> 297                 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
                                                   >> 298                 list_del(lh);
                                                   >> 299         }
                                                   >> 300         pte_val(pgtable[0]) = 0;
                                                   >> 301         pte_val(pgtable[1]) = 0;
                                                   >> 302 
                                                   >> 303         return pgtable;
                                                   >> 304 }
                                                   >> 305 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 85                                                   306 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php