~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/mm/book3s64/hash_tlb.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * This file contains the routines for flushing entries from the
  4  * TLB and MMU hash table.
  5  *
  6  *  Derived from arch/ppc64/mm/init.c:
  7  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8  *
  9  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 10  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 11  *    Copyright (C) 1996 Paul Mackerras
 12  *
 13  *  Derived from "arch/i386/mm/init.c"
 14  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 15  *
 16  *  Dave Engebretsen <engebret@us.ibm.com>
 17  *      Rework for PPC64 port.
 18  */
 19 
 20 #include <linux/kernel.h>
 21 #include <linux/mm.h>
 22 #include <linux/percpu.h>
 23 #include <linux/hardirq.h>
 24 #include <asm/tlbflush.h>
 25 #include <asm/tlb.h>
 26 #include <asm/bug.h>
 27 #include <asm/pte-walk.h>
 28 
 29 
 30 #include <trace/events/thp.h>
 31 
 32 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
 33 
 34 /*
 35  * A linux PTE was changed and the corresponding hash table entry
 36  * neesd to be flushed. This function will either perform the flush
 37  * immediately or will batch it up if the current CPU has an active
 38  * batch on it.
 39  */
 40 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 41                      pte_t *ptep, unsigned long pte, int huge)
 42 {
 43         unsigned long vpn;
 44         struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
 45         unsigned long vsid;
 46         unsigned int psize;
 47         int ssize;
 48         real_pte_t rpte;
 49         int i, offset;
 50 
 51         i = batch->index;
 52 
 53         /*
 54          * Get page size (maybe move back to caller).
 55          *
 56          * NOTE: when using special 64K mappings in 4K environment like
 57          * for SPEs, we obtain the page size from the slice, which thus
 58          * must still exist (and thus the VMA not reused) at the time
 59          * of this call
 60          */
 61         if (huge) {
 62 #ifdef CONFIG_HUGETLB_PAGE
 63                 psize = get_slice_psize(mm, addr);
 64                 /* Mask the address for the correct page size */
 65                 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
 66                 if (unlikely(psize == MMU_PAGE_16G))
 67                         offset = PTRS_PER_PUD;
 68                 else
 69                         offset = PTRS_PER_PMD;
 70 #else
 71                 BUG();
 72                 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
 73 #endif
 74         } else {
 75                 psize = pte_pagesize_index(mm, addr, pte);
 76                 /*
 77                  * Mask the address for the standard page size.  If we
 78                  * have a 64k page kernel, but the hardware does not
 79                  * support 64k pages, this might be different from the
 80                  * hardware page size encoded in the slice table.
 81                  */
 82                 addr &= PAGE_MASK;
 83                 offset = PTRS_PER_PTE;
 84         }
 85 
 86 
 87         /* Build full vaddr */
 88         if (!is_kernel_addr(addr)) {
 89                 ssize = user_segment_size(addr);
 90                 vsid = get_user_vsid(&mm->context, addr, ssize);
 91         } else {
 92                 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
 93                 ssize = mmu_kernel_ssize;
 94         }
 95         WARN_ON(vsid == 0);
 96         vpn = hpt_vpn(addr, vsid, ssize);
 97         rpte = __real_pte(__pte(pte), ptep, offset);
 98 
 99         /*
100          * Check if we have an active batch on this CPU. If not, just
101          * flush now and return.
102          */
103         if (!batch->active) {
104                 flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
105                 put_cpu_var(ppc64_tlb_batch);
106                 return;
107         }
108 
109         /*
110          * This can happen when we are in the middle of a TLB batch and
111          * we encounter memory pressure (eg copy_page_range when it tries
112          * to allocate a new pte). If we have to reclaim memory and end
113          * up scanning and resetting referenced bits then our batch context
114          * will change mid stream.
115          *
116          * We also need to ensure only one page size is present in a given
117          * batch
118          */
119         if (i != 0 && (mm != batch->mm || batch->psize != psize ||
120                        batch->ssize != ssize)) {
121                 __flush_tlb_pending(batch);
122                 i = 0;
123         }
124         if (i == 0) {
125                 batch->mm = mm;
126                 batch->psize = psize;
127                 batch->ssize = ssize;
128         }
129         batch->pte[i] = rpte;
130         batch->vpn[i] = vpn;
131         batch->index = ++i;
132         if (i >= PPC64_TLB_BATCH_NR)
133                 __flush_tlb_pending(batch);
134         put_cpu_var(ppc64_tlb_batch);
135 }
136 
137 /*
138  * This function is called when terminating an mmu batch or when a batch
139  * is full. It will perform the flush of all the entries currently stored
140  * in a batch.
141  *
142  * Must be called from within some kind of spinlock/non-preempt region...
143  */
144 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
145 {
146         int i, local;
147 
148         i = batch->index;
149         local = mm_is_thread_local(batch->mm);
150         if (i == 1)
151                 flush_hash_page(batch->vpn[0], batch->pte[0],
152                                 batch->psize, batch->ssize, local);
153         else
154                 flush_hash_range(i, local);
155         batch->index = 0;
156 }
157 
158 void hash__tlb_flush(struct mmu_gather *tlb)
159 {
160         struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
161 
162         /*
163          * If there's a TLB batch pending, then we must flush it because the
164          * pages are going to be freed and we really don't want to have a CPU
165          * access a freed page because it has a stale TLB
166          */
167         if (tlbbatch->index)
168                 __flush_tlb_pending(tlbbatch);
169 
170         put_cpu_var(ppc64_tlb_batch);
171 }
172 
173 /**
174  * __flush_hash_table_range - Flush all HPTEs for a given address range
175  *                            from the hash table (and the TLB). But keeps
176  *                            the linux PTEs intact.
177  *
178  * @start       : starting address
179  * @end         : ending address (not included in the flush)
180  *
181  * This function is mostly to be used by some IO hotplug code in order
182  * to remove all hash entries from a given address range used to map IO
183  * space on a removed PCI-PCI bidge without tearing down the full mapping
184  * since 64K pages may overlap with other bridges when using 64K pages
185  * with 4K HW pages on IO space.
186  *
187  * Because of that usage pattern, it is implemented for small size rather
188  * than speed.
189  */
190 void __flush_hash_table_range(unsigned long start, unsigned long end)
191 {
192         int hugepage_shift;
193         unsigned long flags;
194 
195         start = ALIGN_DOWN(start, PAGE_SIZE);
196         end = ALIGN(end, PAGE_SIZE);
197 
198 
199         /*
200          * Note: Normally, we should only ever use a batch within a
201          * PTE locked section. This violates the rule, but will work
202          * since we don't actually modify the PTEs, we just flush the
203          * hash while leaving the PTEs intact (including their reference
204          * to being hashed). This is not the most performance oriented
205          * way to do things but is fine for our needs here.
206          */
207         local_irq_save(flags);
208         arch_enter_lazy_mmu_mode();
209         for (; start < end; start += PAGE_SIZE) {
210                 pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
211                 unsigned long pte;
212 
213                 if (ptep == NULL)
214                         continue;
215                 pte = pte_val(*ptep);
216                 if (!(pte & H_PAGE_HASHPTE))
217                         continue;
218                 hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
219         }
220         arch_leave_lazy_mmu_mode();
221         local_irq_restore(flags);
222 }
223 
224 void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
225 {
226         pte_t *pte;
227         pte_t *start_pte;
228         unsigned long flags;
229 
230         addr = ALIGN_DOWN(addr, PMD_SIZE);
231         /*
232          * Note: Normally, we should only ever use a batch within a
233          * PTE locked section. This violates the rule, but will work
234          * since we don't actually modify the PTEs, we just flush the
235          * hash while leaving the PTEs intact (including their reference
236          * to being hashed). This is not the most performance oriented
237          * way to do things but is fine for our needs here.
238          */
239         local_irq_save(flags);
240         arch_enter_lazy_mmu_mode();
241         start_pte = pte_offset_map(pmd, addr);
242         if (!start_pte)
243                 goto out;
244         for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
245                 unsigned long pteval = pte_val(*pte);
246                 if (pteval & H_PAGE_HASHPTE)
247                         hpte_need_flush(mm, addr, pte, pteval, 0);
248                 addr += PAGE_SIZE;
249         }
250         pte_unmap(start_pte);
251 out:
252         arch_leave_lazy_mmu_mode();
253         local_irq_restore(flags);
254 }
255 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php