~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/include/asm/book3s/64/mmu-hash.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-or-later */
  2 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
  3 #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
  4 /*
  5  * PowerPC64 memory management structures
  6  *
  7  * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
  8  *   PPC64 rework.
  9  */
 10 
 11 #include <asm/page.h>
 12 #include <asm/bug.h>
 13 #include <asm/asm-const.h>
 14 
 15 /*
 16  * This is necessary to get the definition of PGTABLE_RANGE which we
 17  * need for various slices related matters. Note that this isn't the
 18  * complete pgtable.h but only a portion of it.
 19  */
 20 #include <asm/book3s/64/pgtable.h>
 21 #include <asm/book3s/64/slice.h>
 22 #include <asm/task_size_64.h>
 23 #include <asm/cpu_has_feature.h>
 24 
 25 /*
 26  * SLB
 27  */
 28 
 29 #define SLB_NUM_BOLTED          2
 30 #define SLB_CACHE_ENTRIES       8
 31 #define SLB_MIN_SIZE            32
 32 
 33 /* Bits in the SLB ESID word */
 34 #define SLB_ESID_V              ASM_CONST(0x0000000008000000) /* valid */
 35 
 36 /* Bits in the SLB VSID word */
 37 #define SLB_VSID_SHIFT          12
 38 #define SLB_VSID_SHIFT_256M     SLB_VSID_SHIFT
 39 #define SLB_VSID_SHIFT_1T       24
 40 #define SLB_VSID_SSIZE_SHIFT    62
 41 #define SLB_VSID_B              ASM_CONST(0xc000000000000000)
 42 #define SLB_VSID_B_256M         ASM_CONST(0x0000000000000000)
 43 #define SLB_VSID_B_1T           ASM_CONST(0x4000000000000000)
 44 #define SLB_VSID_KS             ASM_CONST(0x0000000000000800)
 45 #define SLB_VSID_KP             ASM_CONST(0x0000000000000400)
 46 #define SLB_VSID_N              ASM_CONST(0x0000000000000200) /* no-execute */
 47 #define SLB_VSID_L              ASM_CONST(0x0000000000000100)
 48 #define SLB_VSID_C              ASM_CONST(0x0000000000000080) /* class */
 49 #define SLB_VSID_LP             ASM_CONST(0x0000000000000030)
 50 #define SLB_VSID_LP_00          ASM_CONST(0x0000000000000000)
 51 #define SLB_VSID_LP_01          ASM_CONST(0x0000000000000010)
 52 #define SLB_VSID_LP_10          ASM_CONST(0x0000000000000020)
 53 #define SLB_VSID_LP_11          ASM_CONST(0x0000000000000030)
 54 #define SLB_VSID_LLP            (SLB_VSID_L|SLB_VSID_LP)
 55 
 56 #define SLB_VSID_KERNEL         (SLB_VSID_KP)
 57 #define SLB_VSID_USER           (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
 58 
 59 #define SLBIE_C                 (0x08000000)
 60 #define SLBIE_SSIZE_SHIFT       25
 61 
 62 /*
 63  * Hash table
 64  */
 65 
 66 #define HPTES_PER_GROUP 8
 67 
 68 #define HPTE_V_SSIZE_SHIFT      62
 69 #define HPTE_V_AVPN_SHIFT       7
 70 #define HPTE_V_COMMON_BITS      ASM_CONST(0x000fffffffffffff)
 71 #define HPTE_V_AVPN             ASM_CONST(0x3fffffffffffff80)
 72 #define HPTE_V_AVPN_3_0         ASM_CONST(0x000fffffffffff80)
 73 #define HPTE_V_AVPN_VAL(x)      (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
 74 #define HPTE_V_COMPARE(x,y)     (!(((x) ^ (y)) & 0xffffffffffffff80UL))
 75 #define HPTE_V_BOLTED           ASM_CONST(0x0000000000000010)
 76 #define HPTE_V_LOCK             ASM_CONST(0x0000000000000008)
 77 #define HPTE_V_LARGE            ASM_CONST(0x0000000000000004)
 78 #define HPTE_V_SECONDARY        ASM_CONST(0x0000000000000002)
 79 #define HPTE_V_VALID            ASM_CONST(0x0000000000000001)
 80 
 81 /*
 82  * ISA 3.0 has a different HPTE format.
 83  */
 84 #define HPTE_R_3_0_SSIZE_SHIFT  58
 85 #define HPTE_R_3_0_SSIZE_MASK   (3ull << HPTE_R_3_0_SSIZE_SHIFT)
 86 #define HPTE_R_PP0              ASM_CONST(0x8000000000000000)
 87 #define HPTE_R_TS               ASM_CONST(0x4000000000000000)
 88 #define HPTE_R_KEY_HI           ASM_CONST(0x3000000000000000)
 89 #define HPTE_R_KEY_BIT4         ASM_CONST(0x2000000000000000)
 90 #define HPTE_R_KEY_BIT3         ASM_CONST(0x1000000000000000)
 91 #define HPTE_R_RPN_SHIFT        12
 92 #define HPTE_R_RPN              ASM_CONST(0x0ffffffffffff000)
 93 #define HPTE_R_RPN_3_0          ASM_CONST(0x01fffffffffff000)
 94 #define HPTE_R_PP               ASM_CONST(0x0000000000000003)
 95 #define HPTE_R_PPP              ASM_CONST(0x8000000000000003)
 96 #define HPTE_R_N                ASM_CONST(0x0000000000000004)
 97 #define HPTE_R_G                ASM_CONST(0x0000000000000008)
 98 #define HPTE_R_M                ASM_CONST(0x0000000000000010)
 99 #define HPTE_R_I                ASM_CONST(0x0000000000000020)
100 #define HPTE_R_W                ASM_CONST(0x0000000000000040)
101 #define HPTE_R_WIMG             ASM_CONST(0x0000000000000078)
102 #define HPTE_R_C                ASM_CONST(0x0000000000000080)
103 #define HPTE_R_R                ASM_CONST(0x0000000000000100)
104 #define HPTE_R_KEY_LO           ASM_CONST(0x0000000000000e00)
105 #define HPTE_R_KEY_BIT2         ASM_CONST(0x0000000000000800)
106 #define HPTE_R_KEY_BIT1         ASM_CONST(0x0000000000000400)
107 #define HPTE_R_KEY_BIT0         ASM_CONST(0x0000000000000200)
108 #define HPTE_R_KEY              (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
109 
110 #define HPTE_V_1TB_SEG          ASM_CONST(0x4000000000000000)
111 #define HPTE_V_VRMA_MASK        ASM_CONST(0x4001ffffff000000)
112 
113 /* Values for PP (assumes Ks=0, Kp=1) */
114 #define PP_RWXX 0       /* Supervisor read/write, User none */
115 #define PP_RWRX 1       /* Supervisor read/write, User read */
116 #define PP_RWRW 2       /* Supervisor read/write, User read/write */
117 #define PP_RXRX 3       /* Supervisor read,       User read */
118 #define PP_RXXX (HPTE_R_PP0 | 2)        /* Supervisor read, user none */
119 
120 /* Fields for tlbiel instruction in architecture 2.06 */
121 #define TLBIEL_INVAL_SEL_MASK   0xc00   /* invalidation selector */
122 #define  TLBIEL_INVAL_PAGE      0x000   /* invalidate a single page */
123 #define  TLBIEL_INVAL_SET_LPID  0x800   /* invalidate a set for current LPID */
124 #define  TLBIEL_INVAL_SET       0xc00   /* invalidate a set for all LPIDs */
125 #define TLBIEL_INVAL_SET_MASK   0xfff000        /* set number to inval. */
126 #define TLBIEL_INVAL_SET_SHIFT  12
127 
128 #define POWER7_TLB_SETS         128     /* # sets in POWER7 TLB */
129 #define POWER8_TLB_SETS         512     /* # sets in POWER8 TLB */
130 #define POWER9_TLB_SETS_HASH    256     /* # sets in POWER9 TLB Hash mode */
131 #define POWER9_TLB_SETS_RADIX   128     /* # sets in POWER9 TLB Radix mode */
132 
133 #ifndef __ASSEMBLY__
134 
135 struct mmu_hash_ops {
136         void            (*hpte_invalidate)(unsigned long slot,
137                                            unsigned long vpn,
138                                            int bpsize, int apsize,
139                                            int ssize, int local);
140         long            (*hpte_updatepp)(unsigned long slot,
141                                          unsigned long newpp,
142                                          unsigned long vpn,
143                                          int bpsize, int apsize,
144                                          int ssize, unsigned long flags);
145         void            (*hpte_updateboltedpp)(unsigned long newpp,
146                                                unsigned long ea,
147                                                int psize, int ssize);
148         long            (*hpte_insert)(unsigned long hpte_group,
149                                        unsigned long vpn,
150                                        unsigned long prpn,
151                                        unsigned long rflags,
152                                        unsigned long vflags,
153                                        int psize, int apsize,
154                                        int ssize);
155         long            (*hpte_remove)(unsigned long hpte_group);
156         int             (*hpte_removebolted)(unsigned long ea,
157                                              int psize, int ssize);
158         void            (*flush_hash_range)(unsigned long number, int local);
159         void            (*hugepage_invalidate)(unsigned long vsid,
160                                                unsigned long addr,
161                                                unsigned char *hpte_slot_array,
162                                                int psize, int ssize, int local);
163         int             (*resize_hpt)(unsigned long shift);
164         /*
165          * Special for kexec.
166          * To be called in real mode with interrupts disabled. No locks are
167          * taken as such, concurrent access on pre POWER5 hardware could result
168          * in a deadlock.
169          * The linear mapping is destroyed as well.
170          */
171         void            (*hpte_clear_all)(void);
172 };
173 extern struct mmu_hash_ops mmu_hash_ops;
174 
175 struct hash_pte {
176         __be64 v;
177         __be64 r;
178 };
179 
180 extern struct hash_pte *htab_address;
181 extern unsigned long htab_size_bytes;
182 extern unsigned long htab_hash_mask;
183 
184 
185 static inline int shift_to_mmu_psize(unsigned int shift)
186 {
187         int psize;
188 
189         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
190                 if (mmu_psize_defs[psize].shift == shift)
191                         return psize;
192         return -1;
193 }
194 
195 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
196 {
197         if (mmu_psize_defs[mmu_psize].shift)
198                 return mmu_psize_defs[mmu_psize].shift;
199         BUG();
200 }
201 
202 static inline unsigned int ap_to_shift(unsigned long ap)
203 {
204         int psize;
205 
206         for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
207                 if (mmu_psize_defs[psize].ap == ap)
208                         return mmu_psize_defs[psize].shift;
209         }
210 
211         return -1;
212 }
213 
214 static inline unsigned long get_sllp_encoding(int psize)
215 {
216         unsigned long sllp;
217 
218         sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
219                 ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
220         return sllp;
221 }
222 
223 #endif /* __ASSEMBLY__ */
224 
225 /*
226  * Segment sizes.
227  * These are the values used by hardware in the B field of
228  * SLB entries and the first dword of MMU hashtable entries.
229  * The B field is 2 bits; the values 2 and 3 are unused and reserved.
230  */
231 #define MMU_SEGSIZE_256M        0
232 #define MMU_SEGSIZE_1T          1
233 
234 /*
235  * encode page number shift.
236  * in order to fit the 78 bit va in a 64 bit variable we shift the va by
237  * 12 bits. This enable us to address upto 76 bit va.
238  * For hpt hash from a va we can ignore the page size bits of va and for
239  * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
240  * we work in all cases including 4k page size.
241  */
242 #define VPN_SHIFT       12
243 
244 /*
245  * HPTE Large Page (LP) details
246  */
247 #define LP_SHIFT        12
248 #define LP_BITS         8
249 #define LP_MASK(i)      ((0xFF >> (i)) << LP_SHIFT)
250 
251 #ifndef __ASSEMBLY__
252 
253 static inline int slb_vsid_shift(int ssize)
254 {
255         if (ssize == MMU_SEGSIZE_256M)
256                 return SLB_VSID_SHIFT;
257         return SLB_VSID_SHIFT_1T;
258 }
259 
260 static inline int segment_shift(int ssize)
261 {
262         if (ssize == MMU_SEGSIZE_256M)
263                 return SID_SHIFT;
264         return SID_SHIFT_1T;
265 }
266 
267 /*
268  * This array is indexed by the LP field of the HPTE second dword.
269  * Since this field may contain some RPN bits, some entries are
270  * replicated so that we get the same value irrespective of RPN.
271  * The top 4 bits are the page size index (MMU_PAGE_*) for the
272  * actual page size, the bottom 4 bits are the base page size.
273  */
274 extern u8 hpte_page_sizes[1 << LP_BITS];
275 
276 static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
277                                              bool is_base_size)
278 {
279         unsigned int i, lp;
280 
281         if (!(h & HPTE_V_LARGE))
282                 return 1ul << 12;
283 
284         /* Look at the 8 bit LP value */
285         lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
286         i = hpte_page_sizes[lp];
287         if (!i)
288                 return 0;
289         if (!is_base_size)
290                 i >>= 4;
291         return 1ul << mmu_psize_defs[i & 0xf].shift;
292 }
293 
294 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
295 {
296         return __hpte_page_size(h, l, 0);
297 }
298 
299 static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
300 {
301         return __hpte_page_size(h, l, 1);
302 }
303 
304 /*
305  * The current system page and segment sizes
306  */
307 extern int mmu_kernel_ssize;
308 extern int mmu_highuser_ssize;
309 extern u16 mmu_slb_size;
310 extern unsigned long tce_alloc_start, tce_alloc_end;
311 
312 /*
313  * If the processor supports 64k normal pages but not 64k cache
314  * inhibited pages, we have to be prepared to switch processes
315  * to use 4k pages when they create cache-inhibited mappings.
316  * If this is the case, mmu_ci_restrictions will be set to 1.
317  */
318 extern int mmu_ci_restrictions;
319 
320 /*
321  * This computes the AVPN and B fields of the first dword of a HPTE,
322  * for use when we want to match an existing PTE.  The bottom 7 bits
323  * of the returned value are zero.
324  */
325 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
326                                              int ssize)
327 {
328         unsigned long v;
329         /*
330          * The AVA field omits the low-order 23 bits of the 78 bits VA.
331          * These bits are not needed in the PTE, because the
332          * low-order b of these bits are part of the byte offset
333          * into the virtual page and, if b < 23, the high-order
334          * 23-b of these bits are always used in selecting the
335          * PTEGs to be searched
336          */
337         v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
338         v <<= HPTE_V_AVPN_SHIFT;
339         v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
340         return v;
341 }
342 
343 /*
344  * ISA v3.0 defines a new HPTE format, which differs from the old
345  * format in having smaller AVPN and ARPN fields, and the B field
346  * in the second dword instead of the first.
347  */
348 static inline unsigned long hpte_old_to_new_v(unsigned long v)
349 {
350         /* trim AVPN, drop B */
351         return v & HPTE_V_COMMON_BITS;
352 }
353 
354 static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
355 {
356         /* move B field from 1st to 2nd dword, trim ARPN */
357         return (r & ~HPTE_R_3_0_SSIZE_MASK) |
358                 (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
359 }
360 
361 static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
362 {
363         /* insert B field */
364         return (v & HPTE_V_COMMON_BITS) |
365                 ((r & HPTE_R_3_0_SSIZE_MASK) <<
366                  (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
367 }
368 
369 static inline unsigned long hpte_new_to_old_r(unsigned long r)
370 {
371         /* clear out B field */
372         return r & ~HPTE_R_3_0_SSIZE_MASK;
373 }
374 
375 static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
376 {
377         unsigned long hpte_v;
378 
379         hpte_v = be64_to_cpu(hptep->v);
380         if (cpu_has_feature(CPU_FTR_ARCH_300))
381                 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
382         return hpte_v;
383 }
384 
385 /*
386  * This function sets the AVPN and L fields of the HPTE  appropriately
387  * using the base page size and actual page size.
388  */
389 static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
390                                           int actual_psize, int ssize)
391 {
392         unsigned long v;
393         v = hpte_encode_avpn(vpn, base_psize, ssize);
394         if (actual_psize != MMU_PAGE_4K)
395                 v |= HPTE_V_LARGE;
396         return v;
397 }
398 
399 /*
400  * This function sets the ARPN, and LP fields of the HPTE appropriately
401  * for the page size. We assume the pa is already "clean" that is properly
402  * aligned for the requested page size
403  */
404 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
405                                           int actual_psize)
406 {
407         /* A 4K page needs no special encoding */
408         if (actual_psize == MMU_PAGE_4K)
409                 return pa & HPTE_R_RPN;
410         else {
411                 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
412                 unsigned int shift = mmu_psize_defs[actual_psize].shift;
413                 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
414         }
415 }
416 
417 /*
418  * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
419  */
420 static inline unsigned long hpt_vpn(unsigned long ea,
421                                     unsigned long vsid, int ssize)
422 {
423         unsigned long mask;
424         int s_shift = segment_shift(ssize);
425 
426         mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
427         return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
428 }
429 
430 /*
431  * This hashes a virtual address
432  */
433 static inline unsigned long hpt_hash(unsigned long vpn,
434                                      unsigned int shift, int ssize)
435 {
436         unsigned long mask;
437         unsigned long hash, vsid;
438 
439         /* VPN_SHIFT can be atmost 12 */
440         if (ssize == MMU_SEGSIZE_256M) {
441                 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
442                 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
443                         ((vpn & mask) >> (shift - VPN_SHIFT));
444         } else {
445                 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
446                 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
447                 hash = vsid ^ (vsid << 25) ^
448                         ((vpn & mask) >> (shift - VPN_SHIFT)) ;
449         }
450         return hash & 0x7fffffffffUL;
451 }
452 
453 #define HPTE_LOCAL_UPDATE       0x1
454 #define HPTE_NOHPTE_UPDATE      0x2
455 #define HPTE_USE_KERNEL_KEY     0x4
456 
457 long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa,
458                            unsigned long rlags, unsigned long vflags, int psize, int ssize);
459 extern int __hash_page_4K(unsigned long ea, unsigned long access,
460                           unsigned long vsid, pte_t *ptep, unsigned long trap,
461                           unsigned long flags, int ssize, int subpage_prot);
462 extern int __hash_page_64K(unsigned long ea, unsigned long access,
463                            unsigned long vsid, pte_t *ptep, unsigned long trap,
464                            unsigned long flags, int ssize);
465 struct mm_struct;
466 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
467 extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
468                         unsigned long access, unsigned long trap,
469                         unsigned long flags);
470 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
471                      unsigned long dsisr);
472 void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc);
473 int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, unsigned long msr);
474 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
475                      pte_t *ptep, unsigned long trap, unsigned long flags,
476                      int ssize, unsigned int shift, unsigned int mmu_psize);
477 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
478 extern int __hash_page_thp(unsigned long ea, unsigned long access,
479                            unsigned long vsid, pmd_t *pmdp, unsigned long trap,
480                            unsigned long flags, int ssize, unsigned int psize);
481 #else
482 static inline int __hash_page_thp(unsigned long ea, unsigned long access,
483                                   unsigned long vsid, pmd_t *pmdp,
484                                   unsigned long trap, unsigned long flags,
485                                   int ssize, unsigned int psize)
486 {
487         BUG();
488         return -1;
489 }
490 #endif
491 extern void hash_failure_debug(unsigned long ea, unsigned long access,
492                                unsigned long vsid, unsigned long trap,
493                                int ssize, int psize, int lpsize,
494                                unsigned long pte);
495 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
496                              unsigned long pstart, unsigned long prot,
497                              int psize, int ssize);
498 int htab_remove_mapping(unsigned long vstart, unsigned long vend,
499                         int psize, int ssize);
500 extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
501 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
502 
503 extern void hash__setup_new_exec(void);
504 
505 #ifdef CONFIG_PPC_PSERIES
506 void hpte_init_pseries(void);
507 #else
508 static inline void hpte_init_pseries(void) { }
509 #endif
510 
511 extern void hpte_init_native(void);
512 
513 struct slb_entry {
514         u64     esid;
515         u64     vsid;
516 };
517 
518 extern void slb_initialize(void);
519 void slb_flush_and_restore_bolted(void);
520 void slb_flush_all_realmode(void);
521 void __slb_restore_bolted_realmode(void);
522 void slb_restore_bolted_realmode(void);
523 void slb_save_contents(struct slb_entry *slb_ptr);
524 void slb_dump_contents(struct slb_entry *slb_ptr);
525 
526 extern void slb_vmalloc_update(void);
527 void preload_new_slb_context(unsigned long start, unsigned long sp);
528 
529 #ifdef CONFIG_PPC_64S_HASH_MMU
530 void slb_set_size(u16 size);
531 #else
532 static inline void slb_set_size(u16 size) { }
533 #endif
534 
535 #endif /* __ASSEMBLY__ */
536 
537 /*
538  * VSID allocation (256MB segment)
539  *
540  * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
541  * from mmu context id and effective segment id of the address.
542  *
543  * For user processes max context id is limited to MAX_USER_CONTEXT.
544  * more details in get_user_context
545  *
546  * For kernel space get_kernel_context
547  *
548  * The proto-VSIDs are then scrambled into real VSIDs with the
549  * multiplicative hash:
550  *
551  *      VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
552  *
553  * VSID_MULTIPLIER is prime, so in particular it is
554  * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
555  * Because the modulus is 2^n-1 we can compute it efficiently without
556  * a divide or extra multiply (see below). The scramble function gives
557  * robust scattering in the hash table (at least based on some initial
558  * results).
559  *
560  * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
561  * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
562  * will produce a VSID of 0.
563  *
564  * We also need to avoid the last segment of the last context, because that
565  * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
566  * because of the modulo operation in vsid scramble.
567  */
568 
569 /*
570  * Max Va bits we support as of now is 68 bits. We want 19 bit
571  * context ID.
572  * Restrictions:
573  * GPU has restrictions of not able to access beyond 128TB
574  * (47 bit effective address). We also cannot do more than 20bit PID.
575  * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
576  * to 16 bits (ie, we can only have 2^16 pids at the same time).
577  */
578 #define VA_BITS                 68
579 #define CONTEXT_BITS            19
580 #define ESID_BITS               (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
581 #define ESID_BITS_1T            (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
582 
583 #define ESID_BITS_MASK          ((1 << ESID_BITS) - 1)
584 #define ESID_BITS_1T_MASK       ((1 << ESID_BITS_1T) - 1)
585 
586 /*
587  * Now certain config support MAX_PHYSMEM more than 512TB. Hence we will need
588  * to use more than one context for linear mapping the kernel.
589  * For vmalloc and memmap, we use just one context with 512TB. With 64 byte
590  * struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)).
591  */
592 #if (H_MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
593 #define MAX_KERNEL_CTX_CNT      (1UL << (H_MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
594 #else
595 #define MAX_KERNEL_CTX_CNT      1
596 #endif
597 
598 #define MAX_VMALLOC_CTX_CNT     1
599 #define MAX_IO_CTX_CNT          1
600 #define MAX_VMEMMAP_CTX_CNT     1
601 
602 /*
603  * 256MB segment
604  * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
605  * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
606  * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
607  * context maps 2^49 bytes (512TB).
608  *
609  * We also need to avoid the last segment of the last context, because that
610  * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
611  * because of the modulo operation in vsid scramble.
612  *
613  */
614 #define MAX_USER_CONTEXT        ((ASM_CONST(1) << CONTEXT_BITS) - 2)
615 
616 // The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
617 #define MIN_USER_CONTEXT        (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
618                                  MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
619 
620 /*
621  * For platforms that support on 65bit VA we limit the context bits
622  */
623 #define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
624 
625 /*
626  * This should be computed such that protovosid * vsid_mulitplier
627  * doesn't overflow 64 bits. The vsid_mutliplier should also be
628  * co-prime to vsid_modulus. We also need to make sure that number
629  * of bits in multiplied result (dividend) is less than twice the number of
630  * protovsid bits for our modulus optmization to work.
631  *
632  * The below table shows the current values used.
633  * |-------+------------+----------------------+------------+-------------------|
634  * |       | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
635  * |-------+------------+----------------------+------------+-------------------|
636  * | 1T    |         24 |                   25 |         49 |                50 |
637  * |-------+------------+----------------------+------------+-------------------|
638  * | 256MB |         24 |                   37 |         61 |                74 |
639  * |-------+------------+----------------------+------------+-------------------|
640  *
641  * |-------+------------+----------------------+------------+--------------------|
642  * |       | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
643  * |-------+------------+----------------------+------------+--------------------|
644  * | 1T    |         24 |                   28 |         52 |                 56 |
645  * |-------+------------+----------------------+------------+--------------------|
646  * | 256MB |         24 |                   40 |         64 |                 80 |
647  * |-------+------------+----------------------+------------+--------------------|
648  *
649  */
650 #define VSID_MULTIPLIER_256M    ASM_CONST(12538073)     /* 24-bit prime */
651 #define VSID_BITS_256M          (VA_BITS - SID_SHIFT)
652 #define VSID_BITS_65_256M       (65 - SID_SHIFT)
653 /*
654  * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
655  */
656 #define VSID_MULINV_256M        ASM_CONST(665548017062)
657 
658 #define VSID_MULTIPLIER_1T      ASM_CONST(12538073)     /* 24-bit prime */
659 #define VSID_BITS_1T            (VA_BITS - SID_SHIFT_1T)
660 #define VSID_BITS_65_1T         (65 - SID_SHIFT_1T)
661 #define VSID_MULINV_1T          ASM_CONST(209034062)
662 
663 /* 1TB VSID reserved for VRMA */
664 #define VRMA_VSID       0x1ffffffUL
665 #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
666 
667 /* 4 bits per slice and we have one slice per 1TB */
668 #define SLICE_ARRAY_SIZE        (H_PGTABLE_RANGE >> 41)
669 #define LOW_SLICE_ARRAY_SZ      (BITS_PER_LONG / BITS_PER_BYTE)
670 #define TASK_SLICE_ARRAY_SZ(x)  ((x)->hash_context->slb_addr_limit >> 41)
671 #ifndef __ASSEMBLY__
672 
673 #ifdef CONFIG_PPC_SUBPAGE_PROT
674 /*
675  * For the sub-page protection option, we extend the PGD with one of
676  * these.  Basically we have a 3-level tree, with the top level being
677  * the protptrs array.  To optimize speed and memory consumption when
678  * only addresses < 4GB are being protected, pointers to the first
679  * four pages of sub-page protection words are stored in the low_prot
680  * array.
681  * Each page of sub-page protection words protects 1GB (4 bytes
682  * protects 64k).  For the 3-level tree, each page of pointers then
683  * protects 8TB.
684  */
685 struct subpage_prot_table {
686         unsigned long maxaddr;  /* only addresses < this are protected */
687         unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
688         unsigned int *low_prot[4];
689 };
690 
691 #define SBP_L1_BITS             (PAGE_SHIFT - 2)
692 #define SBP_L2_BITS             (PAGE_SHIFT - 3)
693 #define SBP_L1_COUNT            (1 << SBP_L1_BITS)
694 #define SBP_L2_COUNT            (1 << SBP_L2_BITS)
695 #define SBP_L2_SHIFT            (PAGE_SHIFT + SBP_L1_BITS)
696 #define SBP_L3_SHIFT            (SBP_L2_SHIFT + SBP_L2_BITS)
697 
698 extern void subpage_prot_free(struct mm_struct *mm);
699 #else
700 static inline void subpage_prot_free(struct mm_struct *mm) {}
701 #endif /* CONFIG_PPC_SUBPAGE_PROT */
702 
703 /*
704  * One bit per slice. We have lower slices which cover 256MB segments
705  * upto 4G range. That gets us 16 low slices. For the rest we track slices
706  * in 1TB size.
707  */
708 struct slice_mask {
709         u64 low_slices;
710         DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
711 };
712 
713 struct hash_mm_context {
714         u16 user_psize; /* page size index */
715 
716         /* SLB page size encodings*/
717         unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
718         unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
719         unsigned long slb_addr_limit;
720 #ifdef CONFIG_PPC_64K_PAGES
721         struct slice_mask mask_64k;
722 #endif
723         struct slice_mask mask_4k;
724 #ifdef CONFIG_HUGETLB_PAGE
725         struct slice_mask mask_16m;
726         struct slice_mask mask_16g;
727 #endif
728 
729 #ifdef CONFIG_PPC_SUBPAGE_PROT
730         struct subpage_prot_table *spt;
731 #endif /* CONFIG_PPC_SUBPAGE_PROT */
732 };
733 
734 #if 0
735 /*
736  * The code below is equivalent to this function for arguments
737  * < 2^VSID_BITS, which is all this should ever be called
738  * with.  However gcc is not clever enough to compute the
739  * modulus (2^n-1) without a second multiply.
740  */
741 #define vsid_scramble(protovsid, size) \
742         ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
743 
744 /* simplified form avoiding mod operation */
745 #define vsid_scramble(protovsid, size) \
746         ({                                                               \
747                 unsigned long x;                                         \
748                 x = (protovsid) * VSID_MULTIPLIER_##size;                \
749                 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
750                 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
751         })
752 
753 #else /* 1 */
754 static inline unsigned long vsid_scramble(unsigned long protovsid,
755                                   unsigned long vsid_multiplier, int vsid_bits)
756 {
757         unsigned long vsid;
758         unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
759         /*
760          * We have same multipler for both 256 and 1T segements now
761          */
762         vsid = protovsid * vsid_multiplier;
763         vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
764         return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
765 }
766 
767 #endif /* 1 */
768 
769 /* Returns the segment size indicator for a user address */
770 static inline int user_segment_size(unsigned long addr)
771 {
772         /* Use 1T segments if possible for addresses >= 1T */
773         if (addr >= (1UL << SID_SHIFT_1T))
774                 return mmu_highuser_ssize;
775         return MMU_SEGSIZE_256M;
776 }
777 
778 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
779                                      int ssize)
780 {
781         unsigned long va_bits = VA_BITS;
782         unsigned long vsid_bits;
783         unsigned long protovsid;
784 
785         /*
786          * Bad address. We return VSID 0 for that
787          */
788         if ((ea & EA_MASK)  >= H_PGTABLE_RANGE)
789                 return 0;
790 
791         if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
792                 va_bits = 65;
793 
794         if (ssize == MMU_SEGSIZE_256M) {
795                 vsid_bits = va_bits - SID_SHIFT;
796                 protovsid = (context << ESID_BITS) |
797                         ((ea >> SID_SHIFT) & ESID_BITS_MASK);
798                 return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
799         }
800         /* 1T segment */
801         vsid_bits = va_bits - SID_SHIFT_1T;
802         protovsid = (context << ESID_BITS_1T) |
803                 ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
804         return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
805 }
806 
807 /*
808  * For kernel space, we use context ids as
809  * below. Range is 512TB per context.
810  *
811  * 0x00001 -  [ 0xc000000000000000 - 0xc001ffffffffffff]
812  * 0x00002 -  [ 0xc002000000000000 - 0xc003ffffffffffff]
813  * 0x00003 -  [ 0xc004000000000000 - 0xc005ffffffffffff]
814  * 0x00004 -  [ 0xc006000000000000 - 0xc007ffffffffffff]
815  *
816  * vmap, IO, vmemap
817  *
818  * 0x00005 -  [ 0xc008000000000000 - 0xc009ffffffffffff]
819  * 0x00006 -  [ 0xc00a000000000000 - 0xc00bffffffffffff]
820  * 0x00007 -  [ 0xc00c000000000000 - 0xc00dffffffffffff]
821  *
822  */
823 static inline unsigned long get_kernel_context(unsigned long ea)
824 {
825         unsigned long region_id = get_region_id(ea);
826         unsigned long ctx;
827         /*
828          * Depending on Kernel config, kernel region can have one context
829          * or more.
830          */
831         if (region_id == LINEAR_MAP_REGION_ID) {
832                 /*
833                  * We already verified ea to be not beyond the addr limit.
834                  */
835                 ctx =  1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
836         } else
837                 ctx = region_id + MAX_KERNEL_CTX_CNT - 1;
838         return ctx;
839 }
840 
841 /*
842  * This is only valid for addresses >= PAGE_OFFSET
843  */
844 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
845 {
846         unsigned long context;
847 
848         if (!is_kernel_addr(ea))
849                 return 0;
850 
851         context = get_kernel_context(ea);
852         return get_vsid(context, ea, ssize);
853 }
854 
855 unsigned htab_shift_for_mem_size(unsigned long mem_size);
856 
857 enum slb_index {
858         LINEAR_INDEX    = 0, /* Kernel linear map  (0xc000000000000000) */
859         KSTACK_INDEX    = 1, /* Kernel stack map */
860 };
861 
862 #define slb_esid_mask(ssize)    \
863         (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T)
864 
865 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
866                                          enum slb_index index)
867 {
868         return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
869 }
870 
871 static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
872                                            unsigned long flags)
873 {
874         return (vsid << slb_vsid_shift(ssize)) | flags |
875                 ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT);
876 }
877 
878 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
879                                          unsigned long flags)
880 {
881         return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
882 }
883 
884 #endif /* __ASSEMBLY__ */
885 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
886 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php