~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/mm/book3s32/hash_low.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-or-later */
  2 /*
  3  *  PowerPC version
  4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5  *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
  6  *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
  7  *  Adapted for Power Macintosh by Paul Mackerras.
  8  *  Low-level exception handlers and MMU support
  9  *  rewritten by Paul Mackerras.
 10  *    Copyright (C) 1996 Paul Mackerras.
 11  *
 12  *  This file contains low-level assembler routines for managing
 13  *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
 14  *  hash table, so this file is not used on them.)
 15  */
 16 
 17 #include <linux/export.h>
 18 #include <linux/pgtable.h>
 19 #include <linux/init.h>
 20 #include <asm/reg.h>
 21 #include <asm/page.h>
 22 #include <asm/cputable.h>
 23 #include <asm/ppc_asm.h>
 24 #include <asm/thread_info.h>
 25 #include <asm/asm-offsets.h>
 26 #include <asm/feature-fixups.h>
 27 #include <asm/code-patching-asm.h>
 28 
 29 #ifdef CONFIG_PTE_64BIT
 30 #define PTE_T_SIZE              8
 31 #define PTE_FLAGS_OFFSET        4       /* offset of PTE flags, in bytes */
 32 #else
 33 #define PTE_T_SIZE              4
 34 #define PTE_FLAGS_OFFSET        0
 35 #endif
 36 
 37 /*
 38  * Load a PTE into the hash table, if possible.
 39  * The address is in r4, and r3 contains required access flags:
 40  *   - For ISI: _PAGE_PRESENT | _PAGE_EXEC
 41  *   - For DSI: _PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE if a write.
 42  * r9 contains the SRR1 value, from which we use the MSR_PR bit.
 43  * SPRG_THREAD contains the physical address of the current task's thread.
 44  *
 45  * Returns to the caller if the access is illegal or there is no
 46  * mapping for the address.  Otherwise it places an appropriate PTE
 47  * in the hash table and returns from the exception.
 48  * Uses r0, r3 - r6, r8, r10, ctr, lr.
 49  */
 50         .text
 51 _GLOBAL(hash_page)
 52 #ifdef CONFIG_SMP
 53         lis     r8, (mmu_hash_lock - PAGE_OFFSET)@h
 54         ori     r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
 55         lis     r0,0x0fff
 56         b       10f
 57 11:     lwz     r6,0(r8)
 58         cmpwi   0,r6,0
 59         bne     11b
 60 10:     lwarx   r6,0,r8
 61         cmpwi   0,r6,0
 62         bne-    11b
 63         stwcx.  r0,0,r8
 64         bne-    10b
 65         isync
 66 #endif
 67         /* Get PTE (linux-style) and check access */
 68         lis     r0, TASK_SIZE@h         /* check if kernel address */
 69         cmplw   0,r4,r0
 70         mfspr   r8,SPRN_SPRG_THREAD     /* current task's THREAD (phys) */
 71         lwz     r5,PGDIR(r8)            /* virt page-table root */
 72         blt+    112f                    /* assume user more likely */
 73         lis     r5,swapper_pg_dir@ha    /* if kernel address, use */
 74         andi.   r0,r9,MSR_PR            /* Check usermode */
 75         addi    r5,r5,swapper_pg_dir@l  /* kernel page table */
 76 #ifdef CONFIG_SMP
 77         bne-    .Lhash_page_out         /* return if usermode */
 78 #else
 79         bnelr-
 80 #endif
 81 112:    tophys(r5, r5)
 82 #ifndef CONFIG_PTE_64BIT
 83         rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
 84         lwz     r8,0(r5)                /* get pmd entry */
 85         rlwinm. r8,r8,0,0,19            /* extract address of pte page */
 86 #else
 87         rlwinm  r8,r4,13,19,29          /* Compute pgdir/pmd offset */
 88         lwzx    r8,r8,r5                /* Get L1 entry */
 89         rlwinm. r8,r8,0,0,20            /* extract pt base address */
 90 #endif
 91 #ifdef CONFIG_SMP
 92         beq-    .Lhash_page_out         /* return if no mapping */
 93 #else
 94         /* XXX it seems like the 601 will give a machine fault on the
 95            rfi if its alignment is wrong (bottom 4 bits of address are
 96            8 or 0xc) and we have had a not-taken conditional branch
 97            to the address following the rfi. */
 98         beqlr-
 99 #endif
100 #ifndef CONFIG_PTE_64BIT
101         rlwimi  r8,r4,22,20,29          /* insert next 10 bits of address */
102 #else
103         rlwimi  r8,r4,23,20,28          /* compute pte address */
104         /*
105          * If PTE_64BIT is set, the low word is the flags word; use that
106          * word for locking since it contains all the interesting bits.
107          */
108         addi    r8,r8,PTE_FLAGS_OFFSET
109 #endif
110 
111         /*
112          * Update the linux PTE atomically.  We do the lwarx up-front
113          * because almost always, there won't be a permission violation
114          * and there won't already be an HPTE, and thus we will have
115          * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
116          */
117 .Lretry:
118         lwarx   r6,0,r8                 /* get linux-style pte, flag word */
119 #ifdef CONFIG_PPC_KUAP
120         mfsrin  r5,r4
121         rlwinm  r0,r9,28,_PAGE_WRITE    /* MSR[PR] => _PAGE_WRITE */
122         rlwinm  r5,r5,12,_PAGE_WRITE    /* Ks => _PAGE_WRITE */
123         andc    r5,r5,r0                /* Ks & ~MSR[PR] */
124         andc    r5,r6,r5                /* Clear _PAGE_WRITE when Ks = 1 && MSR[PR] = 0 */
125         andc.   r5,r3,r5                /* check access & ~permission */
126 #else
127         andc.   r5,r3,r6                /* check access & ~permission */
128 #endif
129         rlwinm  r0,r3,32-3,24,24        /* _PAGE_WRITE access -> _PAGE_DIRTY */
130         ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
131 #ifdef CONFIG_SMP
132         bne-    .Lhash_page_out         /* return if access not permitted */
133 #else
134         bnelr-
135 #endif
136         or      r5,r0,r6                /* set accessed/dirty bits */
137 #ifdef CONFIG_PTE_64BIT
138 #ifdef CONFIG_SMP
139         subf    r10,r6,r8               /* create false data dependency */
140         subi    r10,r10,PTE_FLAGS_OFFSET
141         lwzx    r10,r6,r10              /* Get upper PTE word */
142 #else
143         lwz     r10,-PTE_FLAGS_OFFSET(r8)
144 #endif /* CONFIG_SMP */
145 #endif /* CONFIG_PTE_64BIT */
146         stwcx.  r5,0,r8                 /* attempt to update PTE */
147         bne-    .Lretry                 /* retry if someone got there first */
148 
149         mfsrin  r3,r4                   /* get segment reg for segment */
150         bl      create_hpte             /* add the hash table entry */
151 
152 #ifdef CONFIG_SMP
153         eieio
154         lis     r8, (mmu_hash_lock - PAGE_OFFSET)@ha
155         li      r0,0
156         stw     r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
157 #endif
158         b       fast_hash_page_return
159 
160 #ifdef CONFIG_SMP
161 .Lhash_page_out:
162         eieio
163         lis     r8, (mmu_hash_lock - PAGE_OFFSET)@ha
164         li      r0,0
165         stw     r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
166         blr
167 #endif /* CONFIG_SMP */
168 _ASM_NOKPROBE_SYMBOL(hash_page)
169 
170 /*
171  * Add an entry for a particular page to the hash table.
172  *
173  * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
174  *
175  * We assume any necessary modifications to the pte (e.g. setting
176  * the accessed bit) have already been done and that there is actually
177  * a hash table in use (i.e. we're not on a 603).
178  */
179 _GLOBAL(add_hash_page)
180         mflr    r0
181         stw     r0,4(r1)
182 
183 #ifdef CONFIG_SMP
184         lwz     r8,TASK_CPU(r2)         /* to go in mmu_hash_lock */
185         oris    r8,r8,12
186 #endif /* CONFIG_SMP */
187 
188         /*
189          * We disable interrupts here, even on UP, because we don't
190          * want to race with hash_page, and because we want the
191          * _PAGE_HASHPTE bit to be a reliable indication of whether
192          * the HPTE exists (or at least whether one did once).
193          * We also turn off the MMU for data accesses so that we
194          * we can't take a hash table miss (assuming the code is
195          * covered by a BAT).  -- paulus
196          */
197         mfmsr   r9
198         rlwinm  r0,r9,0,17,15           /* clear bit 16 (MSR_EE) */
199         rlwinm  r0,r0,0,28,26           /* clear MSR_DR */
200         mtmsr   r0
201         isync
202 
203 #ifdef CONFIG_SMP
204         lis     r6, (mmu_hash_lock - PAGE_OFFSET)@ha
205         addi    r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
206 10:     lwarx   r0,0,r6                 /* take the mmu_hash_lock */
207         cmpwi   0,r0,0
208         bne-    11f
209         stwcx.  r8,0,r6
210         beq+    12f
211 11:     lwz     r0,0(r6)
212         cmpwi   0,r0,0
213         beq     10b
214         b       11b
215 12:     isync
216 #endif
217 
218         /*
219          * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
220          * If _PAGE_HASHPTE was already set, we don't replace the existing
221          * HPTE, so we just unlock and return.
222          */
223         mr      r8,r5
224 #ifndef CONFIG_PTE_64BIT
225         rlwimi  r8,r4,22,20,29
226 #else
227         rlwimi  r8,r4,23,20,28
228         addi    r8,r8,PTE_FLAGS_OFFSET
229 #endif
230 1:      lwarx   r6,0,r8
231         andi.   r0,r6,_PAGE_HASHPTE
232         bne     9f                      /* if HASHPTE already set, done */
233 #ifdef CONFIG_PTE_64BIT
234 #ifdef CONFIG_SMP
235         subf    r10,r6,r8               /* create false data dependency */
236         subi    r10,r10,PTE_FLAGS_OFFSET
237         lwzx    r10,r6,r10              /* Get upper PTE word */
238 #else
239         lwz     r10,-PTE_FLAGS_OFFSET(r8)
240 #endif /* CONFIG_SMP */
241 #endif /* CONFIG_PTE_64BIT */
242         ori     r5,r6,_PAGE_HASHPTE
243         stwcx.  r5,0,r8
244         bne-    1b
245 
246         /* Convert context and va to VSID */
247         mulli   r3,r3,897*16            /* multiply context by context skew */
248         rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
249         mulli   r0,r0,0x111             /* multiply by ESID skew */
250         add     r3,r3,r0                /* note create_hpte trims to 24 bits */
251 
252         bl      create_hpte
253 
254 9:
255 #ifdef CONFIG_SMP
256         lis     r6, (mmu_hash_lock - PAGE_OFFSET)@ha
257         addi    r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
258         eieio
259         li      r0,0
260         stw     r0,0(r6)                /* clear mmu_hash_lock */
261 #endif
262 
263         /* reenable interrupts and DR */
264         mtmsr   r9
265         isync
266 
267         lwz     r0,4(r1)
268         mtlr    r0
269         blr
270 _ASM_NOKPROBE_SYMBOL(add_hash_page)
271 
272 /*
273  * This routine adds a hardware PTE to the hash table.
274  * It is designed to be called with the MMU either on or off.
275  * r3 contains the VSID, r4 contains the virtual address,
276  * r5 contains the linux PTE, r6 contains the old value of the
277  * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
278  * upper half of the PTE if CONFIG_PTE_64BIT.
279  * On SMP, the caller should have the mmu_hash_lock held.
280  * We assume that the caller has (or will) set the _PAGE_HASHPTE
281  * bit in the linux PTE in memory.  The value passed in r6 should
282  * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
283  * this routine will skip the search for an existing HPTE.
284  * This procedure modifies r0, r3 - r6, r8, cr0.
285  *  -- paulus.
286  *
287  * For speed, 4 of the instructions get patched once the size and
288  * physical address of the hash table are known.  These definitions
289  * of Hash_base and Hash_bits below are for the early hash table.
290  */
291 Hash_base = early_hash
292 Hash_bits = 12                          /* e.g. 256kB hash table */
293 Hash_msk = (((1 << Hash_bits) - 1) * 64)
294 
295 /* defines for the PTE format for 32-bit PPCs */
296 #define HPTE_SIZE       8
297 #define PTEG_SIZE       64
298 #define LG_PTEG_SIZE    6
299 #define LDPTEu          lwzu
300 #define LDPTE           lwz
301 #define STPTE           stw
302 #define CMPPTE          cmpw
303 #define PTE_H           0x40
304 #define PTE_V           0x80000000
305 #define TST_V(r)        rlwinm. r,r,0,0,0
306 #define SET_V(r)        oris r,r,PTE_V@h
307 #define CLR_V(r,t)      rlwinm r,r,0,1,31
308 
309 #define HASH_LEFT       31-(LG_PTEG_SIZE+Hash_bits-1)
310 #define HASH_RIGHT      31-LG_PTEG_SIZE
311 
312 __REF
313 _GLOBAL(create_hpte)
314         /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
315         lis     r0, TASK_SIZE@h
316         rlwinm  r5,r5,0,~3              /* Clear PP bits */
317         cmplw   r4,r0
318         rlwinm  r8,r5,32-9,30,30        /* _PAGE_WRITE -> PP msb */
319         rlwinm  r0,r5,32-6,30,30        /* _PAGE_DIRTY -> PP msb */
320         and     r8,r8,r0                /* writable if _RW & _DIRTY */
321         bge-    1f                      /* Kernelspace ? Skip */
322         ori     r5,r5,3                 /* Userspace ? PP = 3 */
323 1:      ori     r8,r8,0xe04             /* clear out reserved bits */
324         andc    r8,r5,r8                /* PP = user? (rw&dirty? 1: 3): 0 */
325 BEGIN_FTR_SECTION
326         rlwinm  r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
327 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
328 #ifdef CONFIG_PTE_64BIT
329         /* Put the XPN bits into the PTE */
330         rlwimi  r8,r10,8,20,22
331         rlwimi  r8,r10,2,29,29
332 #endif
333 
334         /* Construct the high word of the PPC-style PTE (r5) */
335         rlwinm  r5,r3,7,1,24            /* put VSID in 0x7fffff80 bits */
336         rlwimi  r5,r4,10,26,31          /* put in API (abbrev page index) */
337         SET_V(r5)                       /* set V (valid) bit */
338 
339         patch_site      0f, patch__hash_page_A0
340         patch_site      1f, patch__hash_page_A1
341         patch_site      2f, patch__hash_page_A2
342         /* Get the address of the primary PTE group in the hash table (r3) */
343 0:      lis     r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
344 1:      rlwimi  r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
345 2:      rlwinm  r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
346         xor     r3,r3,r0                /* make primary hash */
347         li      r0,8                    /* PTEs/group */
348 
349         /*
350          * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
351          * if it is clear, meaning that the HPTE isn't there already...
352          */
353         andi.   r6,r6,_PAGE_HASHPTE
354         beq+    10f                     /* no PTE: go look for an empty slot */
355         tlbie   r4
356 
357         /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
358         mtctr   r0
359         addi    r4,r3,-HPTE_SIZE
360 1:      LDPTEu  r6,HPTE_SIZE(r4)        /* get next PTE */
361         CMPPTE  0,r6,r5
362         bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
363         beq+    .Lfound_slot
364 
365         patch_site      0f, patch__hash_page_B
366         /* Search the secondary PTEG for a matching PTE */
367         ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
368 0:      xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
369         xori    r4,r4,(-PTEG_SIZE & 0xffff)
370         addi    r4,r4,-HPTE_SIZE
371         mtctr   r0
372 2:      LDPTEu  r6,HPTE_SIZE(r4)
373         CMPPTE  0,r6,r5
374         bdnzf   2,2b
375         beq+    .Lfound_slot
376         xori    r5,r5,PTE_H             /* clear H bit again */
377 
378         /* Search the primary PTEG for an empty slot */
379 10:     mtctr   r0
380         addi    r4,r3,-HPTE_SIZE        /* search primary PTEG */
381 1:      LDPTEu  r6,HPTE_SIZE(r4)        /* get next PTE */
382         TST_V(r6)                       /* test valid bit */
383         bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
384         beq+    .Lfound_empty
385 
386         patch_site      0f, patch__hash_page_C
387         /* Search the secondary PTEG for an empty slot */
388         ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
389 0:      xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
390         xori    r4,r4,(-PTEG_SIZE & 0xffff)
391         addi    r4,r4,-HPTE_SIZE
392         mtctr   r0
393 2:      LDPTEu  r6,HPTE_SIZE(r4)
394         TST_V(r6)
395         bdnzf   2,2b
396         beq+    .Lfound_empty
397         xori    r5,r5,PTE_H             /* clear H bit again */
398 
399         /*
400          * Choose an arbitrary slot in the primary PTEG to overwrite.
401          * Since both the primary and secondary PTEGs are full, and we
402          * have no information that the PTEs in the primary PTEG are
403          * more important or useful than those in the secondary PTEG,
404          * and we know there is a definite (although small) speed
405          * advantage to putting the PTE in the primary PTEG, we always
406          * put the PTE in the primary PTEG.
407          */
408 
409         lis     r4, (next_slot - PAGE_OFFSET)@ha        /* get next evict slot */
410         lwz     r6, (next_slot - PAGE_OFFSET)@l(r4)
411         addi    r6,r6,HPTE_SIZE                 /* search for candidate */
412         andi.   r6,r6,7*HPTE_SIZE
413         stw     r6,next_slot@l(r4)
414         add     r4,r3,r6
415 
416 #ifndef CONFIG_SMP
417         /* Store PTE in PTEG */
418 .Lfound_empty:
419         STPTE   r5,0(r4)
420 .Lfound_slot:
421         STPTE   r8,HPTE_SIZE/2(r4)
422 
423 #else /* CONFIG_SMP */
424 /*
425  * Between the tlbie above and updating the hash table entry below,
426  * another CPU could read the hash table entry and put it in its TLB.
427  * There are 3 cases:
428  * 1. using an empty slot
429  * 2. updating an earlier entry to change permissions (i.e. enable write)
430  * 3. taking over the PTE for an unrelated address
431  *
432  * In each case it doesn't really matter if the other CPUs have the old
433  * PTE in their TLB.  So we don't need to bother with another tlbie here,
434  * which is convenient as we've overwritten the register that had the
435  * address. :-)  The tlbie above is mainly to make sure that this CPU comes
436  * and gets the new PTE from the hash table.
437  *
438  * We do however have to make sure that the PTE is never in an invalid
439  * state with the V bit set.
440  */
441 .Lfound_empty:
442 .Lfound_slot:
443         CLR_V(r5,r0)            /* clear V (valid) bit in PTE */
444         STPTE   r5,0(r4)
445         sync
446         TLBSYNC
447         STPTE   r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
448         sync
449         SET_V(r5)
450         STPTE   r5,0(r4)        /* finally set V bit in PTE */
451 #endif /* CONFIG_SMP */
452 
453         sync            /* make sure pte updates get to memory */
454         blr
455         .previous
456 _ASM_NOKPROBE_SYMBOL(create_hpte)
457 
458         .section .bss
459         .align  2
460 next_slot:
461         .space  4
462         .previous
463 
464 /*
465  * Flush the entry for a particular page from the hash table.
466  *
467  * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
468  *                  int count)
469  *
470  * We assume that there is a hash table in use (Hash != 0).
471  */
472 __REF
473 _GLOBAL(flush_hash_pages)
474         /*
475          * We disable interrupts here, even on UP, because we want
476          * the _PAGE_HASHPTE bit to be a reliable indication of
477          * whether the HPTE exists (or at least whether one did once).
478          * We also turn off the MMU for data accesses so that we
479          * we can't take a hash table miss (assuming the code is
480          * covered by a BAT).  -- paulus
481          */
482         mfmsr   r10
483         rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
484         rlwinm  r0,r0,0,28,26           /* clear MSR_DR */
485         mtmsr   r0
486         isync
487 
488         /* First find a PTE in the range that has _PAGE_HASHPTE set */
489 #ifndef CONFIG_PTE_64BIT
490         rlwimi  r5,r4,22,20,29
491 #else
492         rlwimi  r5,r4,23,20,28
493         addi    r5,r5,PTE_FLAGS_OFFSET
494 #endif
495 1:      lwz     r0,0(r5)
496         cmpwi   cr1,r6,1
497         andi.   r0,r0,_PAGE_HASHPTE
498         bne     2f
499         ble     cr1,19f
500         addi    r4,r4,0x1000
501         addi    r5,r5,PTE_T_SIZE
502         addi    r6,r6,-1
503         b       1b
504 
505         /* Convert context and va to VSID */
506 2:      mulli   r3,r3,897*16            /* multiply context by context skew */
507         rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
508         mulli   r0,r0,0x111             /* multiply by ESID skew */
509         add     r3,r3,r0                /* note code below trims to 24 bits */
510 
511         /* Construct the high word of the PPC-style PTE (r11) */
512         rlwinm  r11,r3,7,1,24           /* put VSID in 0x7fffff80 bits */
513         rlwimi  r11,r4,10,26,31         /* put in API (abbrev page index) */
514         SET_V(r11)                      /* set V (valid) bit */
515 
516 #ifdef CONFIG_SMP
517         lis     r9, (mmu_hash_lock - PAGE_OFFSET)@ha
518         addi    r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
519         tophys  (r8, r2)
520         lwz     r8, TASK_CPU(r8)
521         oris    r8,r8,9
522 10:     lwarx   r0,0,r9
523         cmpwi   0,r0,0
524         bne-    11f
525         stwcx.  r8,0,r9
526         beq+    12f
527 11:     lwz     r0,0(r9)
528         cmpwi   0,r0,0
529         beq     10b
530         b       11b
531 12:     isync
532 #endif
533 
534         /*
535          * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
536          * already clear, we're done (for this pte).  If not,
537          * clear it (atomically) and proceed.  -- paulus.
538          */
539 33:     lwarx   r8,0,r5                 /* fetch the pte flags word */
540         andi.   r0,r8,_PAGE_HASHPTE
541         beq     8f                      /* done if HASHPTE is already clear */
542         rlwinm  r8,r8,0,31,29           /* clear HASHPTE bit */
543         stwcx.  r8,0,r5                 /* update the pte */
544         bne-    33b
545 
546         patch_site      0f, patch__flush_hash_A0
547         patch_site      1f, patch__flush_hash_A1
548         patch_site      2f, patch__flush_hash_A2
549         /* Get the address of the primary PTE group in the hash table (r3) */
550 0:      lis     r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
551 1:      rlwimi  r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
552 2:      rlwinm  r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
553         xor     r8,r0,r8                /* make primary hash */
554 
555         /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
556         li      r0,8                    /* PTEs/group */
557         mtctr   r0
558         addi    r12,r8,-HPTE_SIZE
559 1:      LDPTEu  r0,HPTE_SIZE(r12)       /* get next PTE */
560         CMPPTE  0,r0,r11
561         bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
562         beq+    3f
563 
564         patch_site      0f, patch__flush_hash_B
565         /* Search the secondary PTEG for a matching PTE */
566         ori     r11,r11,PTE_H           /* set H (secondary hash) bit */
567         li      r0,8                    /* PTEs/group */
568 0:      xoris   r12,r8,Hash_msk>>16     /* compute secondary hash */
569         xori    r12,r12,(-PTEG_SIZE & 0xffff)
570         addi    r12,r12,-HPTE_SIZE
571         mtctr   r0
572 2:      LDPTEu  r0,HPTE_SIZE(r12)
573         CMPPTE  0,r0,r11
574         bdnzf   2,2b
575         xori    r11,r11,PTE_H           /* clear H again */
576         bne-    4f                      /* should rarely fail to find it */
577 
578 3:      li      r0,0
579         STPTE   r0,0(r12)               /* invalidate entry */
580 4:      sync
581         tlbie   r4                      /* in hw tlb too */
582         sync
583 
584 8:      ble     cr1,9f                  /* if all ptes checked */
585 81:     addi    r6,r6,-1
586         addi    r5,r5,PTE_T_SIZE
587         addi    r4,r4,0x1000
588         lwz     r0,0(r5)                /* check next pte */
589         cmpwi   cr1,r6,1
590         andi.   r0,r0,_PAGE_HASHPTE
591         bne     33b
592         bgt     cr1,81b
593 
594 9:
595 #ifdef CONFIG_SMP
596         TLBSYNC
597         li      r0,0
598         stw     r0,0(r9)                /* clear mmu_hash_lock */
599 #endif
600 
601 19:     mtmsr   r10
602         isync
603         blr
604         .previous
605 EXPORT_SYMBOL(flush_hash_pages)
606 _ASM_NOKPROBE_SYMBOL(flush_hash_pages)

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php