~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kvm/e500_mmu_host.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
  4  *
  5  * Author: Yu Liu, yu.liu@freescale.com
  6  *         Scott Wood, scottwood@freescale.com
  7  *         Ashish Kalra, ashish.kalra@freescale.com
  8  *         Varun Sethi, varun.sethi@freescale.com
  9  *         Alexander Graf, agraf@suse.de
 10  *
 11  * Description:
 12  * This file is based on arch/powerpc/kvm/44x_tlb.c,
 13  * by Hollis Blanchard <hollisb@us.ibm.com>.
 14  */
 15 
 16 #include <linux/kernel.h>
 17 #include <linux/types.h>
 18 #include <linux/slab.h>
 19 #include <linux/string.h>
 20 #include <linux/kvm.h>
 21 #include <linux/kvm_host.h>
 22 #include <linux/highmem.h>
 23 #include <linux/log2.h>
 24 #include <linux/uaccess.h>
 25 #include <linux/sched/mm.h>
 26 #include <linux/rwsem.h>
 27 #include <linux/vmalloc.h>
 28 #include <linux/hugetlb.h>
 29 #include <asm/kvm_ppc.h>
 30 #include <asm/pte-walk.h>
 31 
 32 #include "e500.h"
 33 #include "timing.h"
 34 #include "e500_mmu_host.h"
 35 
 36 #include "trace_booke.h"
 37 
 38 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
 39 
 40 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
 41 
 42 static inline unsigned int tlb1_max_shadow_size(void)
 43 {
 44         /* reserve one entry for magic page */
 45         return host_tlb_params[1].entries - tlbcam_index - 1;
 46 }
 47 
 48 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
 49 {
 50         /* Mask off reserved bits. */
 51         mas3 &= MAS3_ATTRIB_MASK;
 52 
 53 #ifndef CONFIG_KVM_BOOKE_HV
 54         if (!usermode) {
 55                 /* Guest is in supervisor mode,
 56                  * so we need to translate guest
 57                  * supervisor permissions into user permissions. */
 58                 mas3 &= ~E500_TLB_USER_PERM_MASK;
 59                 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
 60         }
 61         mas3 |= E500_TLB_SUPER_PERM_MASK;
 62 #endif
 63         return mas3;
 64 }
 65 
 66 /*
 67  * writing shadow tlb entry to host TLB
 68  */
 69 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
 70                                      uint32_t mas0,
 71                                      uint32_t lpid)
 72 {
 73         unsigned long flags;
 74 
 75         local_irq_save(flags);
 76         mtspr(SPRN_MAS0, mas0);
 77         mtspr(SPRN_MAS1, stlbe->mas1);
 78         mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
 79         mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
 80         mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
 81 #ifdef CONFIG_KVM_BOOKE_HV
 82         mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
 83 #endif
 84         asm volatile("isync; tlbwe" : : : "memory");
 85 
 86 #ifdef CONFIG_KVM_BOOKE_HV
 87         /* Must clear mas8 for other host tlbwe's */
 88         mtspr(SPRN_MAS8, 0);
 89         isync();
 90 #endif
 91         local_irq_restore(flags);
 92 
 93         trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
 94                                       stlbe->mas2, stlbe->mas7_3);
 95 }
 96 
 97 /*
 98  * Acquire a mas0 with victim hint, as if we just took a TLB miss.
 99  *
100  * We don't care about the address we're searching for, other than that it's
101  * in the right set and is not present in the TLB.  Using a zero PID and a
102  * userspace address means we don't have to set and then restore MAS5, or
103  * calculate a proper MAS6 value.
104  */
105 static u32 get_host_mas0(unsigned long eaddr)
106 {
107         unsigned long flags;
108         u32 mas0;
109         u32 mas4;
110 
111         local_irq_save(flags);
112         mtspr(SPRN_MAS6, 0);
113         mas4 = mfspr(SPRN_MAS4);
114         mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
115         asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
116         mas0 = mfspr(SPRN_MAS0);
117         mtspr(SPRN_MAS4, mas4);
118         local_irq_restore(flags);
119 
120         return mas0;
121 }
122 
123 /* sesel is for tlb1 only */
124 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
125                 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
126 {
127         u32 mas0;
128 
129         if (tlbsel == 0) {
130                 mas0 = get_host_mas0(stlbe->mas2);
131                 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
132         } else {
133                 __write_host_tlbe(stlbe,
134                                   MAS0_TLBSEL(1) |
135                                   MAS0_ESEL(to_htlb1_esel(sesel)),
136                                   vcpu_e500->vcpu.kvm->arch.lpid);
137         }
138 }
139 
140 /* sesel is for tlb1 only */
141 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
142                         struct kvm_book3e_206_tlb_entry *gtlbe,
143                         struct kvm_book3e_206_tlb_entry *stlbe,
144                         int stlbsel, int sesel)
145 {
146         int stid;
147 
148         preempt_disable();
149         stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
150 
151         stlbe->mas1 |= MAS1_TID(stid);
152         write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
153         preempt_enable();
154 }
155 
156 #ifdef CONFIG_KVM_E500V2
157 /* XXX should be a hook in the gva2hpa translation */
158 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
159 {
160         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
161         struct kvm_book3e_206_tlb_entry magic;
162         ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
163         unsigned int stid;
164         kvm_pfn_t pfn;
165 
166         pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
167         get_page(pfn_to_page(pfn));
168 
169         preempt_disable();
170         stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
171 
172         magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
173                      MAS1_TSIZE(BOOK3E_PAGESZ_4K);
174         magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
175         magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
176                        MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
177         magic.mas8 = 0;
178 
179         __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
180         preempt_enable();
181 }
182 #endif
183 
184 void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
185                          int esel)
186 {
187         struct kvm_book3e_206_tlb_entry *gtlbe =
188                 get_entry(vcpu_e500, tlbsel, esel);
189         struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
190 
191         /* Don't bother with unmapped entries */
192         if (!(ref->flags & E500_TLB_VALID)) {
193                 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
194                      "%s: flags %x\n", __func__, ref->flags);
195                 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
196         }
197 
198         if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
199                 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
200                 int hw_tlb_indx;
201                 unsigned long flags;
202 
203                 local_irq_save(flags);
204                 while (tmp) {
205                         hw_tlb_indx = __ilog2_u64(tmp & -tmp);
206                         mtspr(SPRN_MAS0,
207                               MAS0_TLBSEL(1) |
208                               MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
209                         mtspr(SPRN_MAS1, 0);
210                         asm volatile("tlbwe");
211                         vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
212                         tmp &= tmp - 1;
213                 }
214                 mb();
215                 vcpu_e500->g2h_tlb1_map[esel] = 0;
216                 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
217                 local_irq_restore(flags);
218         }
219 
220         if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
221                 /*
222                  * TLB1 entry is backed by 4k pages. This should happen
223                  * rarely and is not worth optimizing. Invalidate everything.
224                  */
225                 kvmppc_e500_tlbil_all(vcpu_e500);
226                 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
227         }
228 
229         /*
230          * If TLB entry is still valid then it's a TLB0 entry, and thus
231          * backed by at most one host tlbe per shadow pid
232          */
233         if (ref->flags & E500_TLB_VALID)
234                 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
235 
236         /* Mark the TLB as not backed by the host anymore */
237         ref->flags = 0;
238 }
239 
240 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
241 {
242         return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
243 }
244 
245 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
246                                          struct kvm_book3e_206_tlb_entry *gtlbe,
247                                          kvm_pfn_t pfn, unsigned int wimg)
248 {
249         ref->pfn = pfn;
250         ref->flags = E500_TLB_VALID;
251 
252         /* Use guest supplied MAS2_G and MAS2_E */
253         ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
254 
255         /* Mark the page accessed */
256         kvm_set_pfn_accessed(pfn);
257 
258         if (tlbe_is_writable(gtlbe))
259                 kvm_set_pfn_dirty(pfn);
260 }
261 
262 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
263 {
264         if (ref->flags & E500_TLB_VALID) {
265                 /* FIXME: don't log bogus pfn for TLB1 */
266                 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
267                 ref->flags = 0;
268         }
269 }
270 
271 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
272 {
273         if (vcpu_e500->g2h_tlb1_map)
274                 memset(vcpu_e500->g2h_tlb1_map, 0,
275                        sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
276         if (vcpu_e500->h2g_tlb1_rmap)
277                 memset(vcpu_e500->h2g_tlb1_rmap, 0,
278                        sizeof(unsigned int) * host_tlb_params[1].entries);
279 }
280 
281 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
282 {
283         int tlbsel;
284         int i;
285 
286         for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
287                 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
288                         struct tlbe_ref *ref =
289                                 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
290                         kvmppc_e500_ref_release(ref);
291                 }
292         }
293 }
294 
295 void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
296 {
297         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
298         kvmppc_e500_tlbil_all(vcpu_e500);
299         clear_tlb_privs(vcpu_e500);
300         clear_tlb1_bitmap(vcpu_e500);
301 }
302 
303 /* TID must be supplied by the caller */
304 static void kvmppc_e500_setup_stlbe(
305         struct kvm_vcpu *vcpu,
306         struct kvm_book3e_206_tlb_entry *gtlbe,
307         int tsize, struct tlbe_ref *ref, u64 gvaddr,
308         struct kvm_book3e_206_tlb_entry *stlbe)
309 {
310         kvm_pfn_t pfn = ref->pfn;
311         u32 pr = vcpu->arch.shared->msr & MSR_PR;
312 
313         BUG_ON(!(ref->flags & E500_TLB_VALID));
314 
315         /* Force IPROT=0 for all guest mappings. */
316         stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
317         stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
318         stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
319                         e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
320 }
321 
322 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
323         u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
324         int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
325         struct tlbe_ref *ref)
326 {
327         struct kvm_memory_slot *slot;
328         unsigned long pfn = 0; /* silence GCC warning */
329         unsigned long hva;
330         int pfnmap = 0;
331         int tsize = BOOK3E_PAGESZ_4K;
332         int ret = 0;
333         unsigned long mmu_seq;
334         struct kvm *kvm = vcpu_e500->vcpu.kvm;
335         unsigned long tsize_pages = 0;
336         pte_t *ptep;
337         unsigned int wimg = 0;
338         pgd_t *pgdir;
339         unsigned long flags;
340 
341         /* used to check for invalidations in progress */
342         mmu_seq = kvm->mmu_invalidate_seq;
343         smp_rmb();
344 
345         /*
346          * Translate guest physical to true physical, acquiring
347          * a page reference if it is normal, non-reserved memory.
348          *
349          * gfn_to_memslot() must succeed because otherwise we wouldn't
350          * have gotten this far.  Eventually we should just pass the slot
351          * pointer through from the first lookup.
352          */
353         slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
354         hva = gfn_to_hva_memslot(slot, gfn);
355 
356         if (tlbsel == 1) {
357                 struct vm_area_struct *vma;
358                 mmap_read_lock(kvm->mm);
359 
360                 vma = find_vma(kvm->mm, hva);
361                 if (vma && hva >= vma->vm_start &&
362                     (vma->vm_flags & VM_PFNMAP)) {
363                         /*
364                          * This VMA is a physically contiguous region (e.g.
365                          * /dev/mem) that bypasses normal Linux page
366                          * management.  Find the overlap between the
367                          * vma and the memslot.
368                          */
369 
370                         unsigned long start, end;
371                         unsigned long slot_start, slot_end;
372 
373                         pfnmap = 1;
374 
375                         start = vma->vm_pgoff;
376                         end = start +
377                               vma_pages(vma);
378 
379                         pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
380 
381                         slot_start = pfn - (gfn - slot->base_gfn);
382                         slot_end = slot_start + slot->npages;
383 
384                         if (start < slot_start)
385                                 start = slot_start;
386                         if (end > slot_end)
387                                 end = slot_end;
388 
389                         tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
390                                 MAS1_TSIZE_SHIFT;
391 
392                         /*
393                          * e500 doesn't implement the lowest tsize bit,
394                          * or 1K pages.
395                          */
396                         tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
397 
398                         /*
399                          * Now find the largest tsize (up to what the guest
400                          * requested) that will cover gfn, stay within the
401                          * range, and for which gfn and pfn are mutually
402                          * aligned.
403                          */
404 
405                         for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
406                                 unsigned long gfn_start, gfn_end;
407                                 tsize_pages = 1UL << (tsize - 2);
408 
409                                 gfn_start = gfn & ~(tsize_pages - 1);
410                                 gfn_end = gfn_start + tsize_pages;
411 
412                                 if (gfn_start + pfn - gfn < start)
413                                         continue;
414                                 if (gfn_end + pfn - gfn > end)
415                                         continue;
416                                 if ((gfn & (tsize_pages - 1)) !=
417                                     (pfn & (tsize_pages - 1)))
418                                         continue;
419 
420                                 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
421                                 pfn &= ~(tsize_pages - 1);
422                                 break;
423                         }
424                 } else if (vma && hva >= vma->vm_start &&
425                            is_vm_hugetlb_page(vma)) {
426                         unsigned long psize = vma_kernel_pagesize(vma);
427 
428                         tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
429                                 MAS1_TSIZE_SHIFT;
430 
431                         /*
432                          * Take the largest page size that satisfies both host
433                          * and guest mapping
434                          */
435                         tsize = min(__ilog2(psize) - 10, tsize);
436 
437                         /*
438                          * e500 doesn't implement the lowest tsize bit,
439                          * or 1K pages.
440                          */
441                         tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
442                 }
443 
444                 mmap_read_unlock(kvm->mm);
445         }
446 
447         if (likely(!pfnmap)) {
448                 tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
449                 pfn = gfn_to_pfn_memslot(slot, gfn);
450                 if (is_error_noslot_pfn(pfn)) {
451                         if (printk_ratelimit())
452                                 pr_err("%s: real page not found for gfn %lx\n",
453                                        __func__, (long)gfn);
454                         return -EINVAL;
455                 }
456 
457                 /* Align guest and physical address to page map boundaries */
458                 pfn &= ~(tsize_pages - 1);
459                 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
460         }
461 
462         spin_lock(&kvm->mmu_lock);
463         if (mmu_invalidate_retry(kvm, mmu_seq)) {
464                 ret = -EAGAIN;
465                 goto out;
466         }
467 
468 
469         pgdir = vcpu_e500->vcpu.arch.pgdir;
470         /*
471          * We are just looking at the wimg bits, so we don't
472          * care much about the trans splitting bit.
473          * We are holding kvm->mmu_lock so a notifier invalidate
474          * can't run hence pfn won't change.
475          */
476         local_irq_save(flags);
477         ptep = find_linux_pte(pgdir, hva, NULL, NULL);
478         if (ptep) {
479                 pte_t pte = READ_ONCE(*ptep);
480 
481                 if (pte_present(pte)) {
482                         wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
483                                 MAS2_WIMGE_MASK;
484                         local_irq_restore(flags);
485                 } else {
486                         local_irq_restore(flags);
487                         pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
488                                            __func__, (long)gfn, pfn);
489                         ret = -EINVAL;
490                         goto out;
491                 }
492         }
493         kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
494 
495         kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
496                                 ref, gvaddr, stlbe);
497 
498         /* Clear i-cache for new pages */
499         kvmppc_mmu_flush_icache(pfn);
500 
501 out:
502         spin_unlock(&kvm->mmu_lock);
503 
504         /* Drop refcount on page, so that mmu notifiers can clear it */
505         kvm_release_pfn_clean(pfn);
506 
507         return ret;
508 }
509 
510 /* XXX only map the one-one case, for now use TLB0 */
511 static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
512                                 struct kvm_book3e_206_tlb_entry *stlbe)
513 {
514         struct kvm_book3e_206_tlb_entry *gtlbe;
515         struct tlbe_ref *ref;
516         int stlbsel = 0;
517         int sesel = 0;
518         int r;
519 
520         gtlbe = get_entry(vcpu_e500, 0, esel);
521         ref = &vcpu_e500->gtlb_priv[0][esel].ref;
522 
523         r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
524                         get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
525                         gtlbe, 0, stlbe, ref);
526         if (r)
527                 return r;
528 
529         write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
530 
531         return 0;
532 }
533 
534 static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
535                                      struct tlbe_ref *ref,
536                                      int esel)
537 {
538         unsigned int sesel = vcpu_e500->host_tlb1_nv++;
539 
540         if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
541                 vcpu_e500->host_tlb1_nv = 0;
542 
543         if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
544                 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
545                 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
546         }
547 
548         vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
549         vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
550         vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
551         WARN_ON(!(ref->flags & E500_TLB_VALID));
552 
553         return sesel;
554 }
555 
556 /* Caller must ensure that the specified guest TLB entry is safe to insert into
557  * the shadow TLB. */
558 /* For both one-one and one-to-many */
559 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
560                 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
561                 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
562 {
563         struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
564         int sesel;
565         int r;
566 
567         r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
568                                    ref);
569         if (r)
570                 return r;
571 
572         /* Use TLB0 when we can only map a page with 4k */
573         if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
574                 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
575                 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
576                 return 0;
577         }
578 
579         /* Otherwise map into TLB1 */
580         sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
581         write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
582 
583         return 0;
584 }
585 
586 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
587                     unsigned int index)
588 {
589         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
590         struct tlbe_priv *priv;
591         struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
592         int tlbsel = tlbsel_of(index);
593         int esel = esel_of(index);
594 
595         gtlbe = get_entry(vcpu_e500, tlbsel, esel);
596 
597         switch (tlbsel) {
598         case 0:
599                 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
600 
601                 /* Triggers after clear_tlb_privs or on initial mapping */
602                 if (!(priv->ref.flags & E500_TLB_VALID)) {
603                         kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
604                 } else {
605                         kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
606                                                 &priv->ref, eaddr, &stlbe);
607                         write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
608                 }
609                 break;
610 
611         case 1: {
612                 gfn_t gfn = gpaddr >> PAGE_SHIFT;
613                 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
614                                      esel);
615                 break;
616         }
617 
618         default:
619                 BUG();
620                 break;
621         }
622 }
623 
624 #ifdef CONFIG_KVM_BOOKE_HV
625 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
626                 enum instruction_fetch_type type, unsigned long *instr)
627 {
628         gva_t geaddr;
629         hpa_t addr;
630         hfn_t pfn;
631         hva_t eaddr;
632         u32 mas1, mas2, mas3;
633         u64 mas7_mas3;
634         struct page *page;
635         unsigned int addr_space, psize_shift;
636         bool pr;
637         unsigned long flags;
638 
639         /* Search TLB for guest pc to get the real address */
640         geaddr = kvmppc_get_pc(vcpu);
641 
642         addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
643 
644         local_irq_save(flags);
645         mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
646         mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
647         asm volatile("tlbsx 0, %[geaddr]\n" : :
648                      [geaddr] "r" (geaddr));
649         mtspr(SPRN_MAS5, 0);
650         mtspr(SPRN_MAS8, 0);
651         mas1 = mfspr(SPRN_MAS1);
652         mas2 = mfspr(SPRN_MAS2);
653         mas3 = mfspr(SPRN_MAS3);
654 #ifdef CONFIG_64BIT
655         mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
656 #else
657         mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
658 #endif
659         local_irq_restore(flags);
660 
661         /*
662          * If the TLB entry for guest pc was evicted, return to the guest.
663          * There are high chances to find a valid TLB entry next time.
664          */
665         if (!(mas1 & MAS1_VALID))
666                 return EMULATE_AGAIN;
667 
668         /*
669          * Another thread may rewrite the TLB entry in parallel, don't
670          * execute from the address if the execute permission is not set
671          */
672         pr = vcpu->arch.shared->msr & MSR_PR;
673         if (unlikely((pr && !(mas3 & MAS3_UX)) ||
674                      (!pr && !(mas3 & MAS3_SX)))) {
675                 pr_err_ratelimited(
676                         "%s: Instruction emulation from guest address %08lx without execute permission\n",
677                         __func__, geaddr);
678                 return EMULATE_AGAIN;
679         }
680 
681         /*
682          * The real address will be mapped by a cacheable, memory coherent,
683          * write-back page. Check for mismatches when LRAT is used.
684          */
685         if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
686             unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
687                 pr_err_ratelimited(
688                         "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n",
689                         __func__, geaddr);
690                 return EMULATE_AGAIN;
691         }
692 
693         /* Get pfn */
694         psize_shift = MAS1_GET_TSIZE(mas1) + 10;
695         addr = (mas7_mas3 & (~0ULL << psize_shift)) |
696                (geaddr & ((1ULL << psize_shift) - 1ULL));
697         pfn = addr >> PAGE_SHIFT;
698 
699         /* Guard against emulation from devices area */
700         if (unlikely(!page_is_ram(pfn))) {
701                 pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n",
702                          __func__, addr);
703                 return EMULATE_AGAIN;
704         }
705 
706         /* Map a page and get guest's instruction */
707         page = pfn_to_page(pfn);
708         eaddr = (unsigned long)kmap_atomic(page);
709         *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
710         kunmap_atomic((u32 *)eaddr);
711 
712         return EMULATE_DONE;
713 }
714 #else
715 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
716                 enum instruction_fetch_type type, unsigned long *instr)
717 {
718         return EMULATE_AGAIN;
719 }
720 #endif
721 
722 /************* MMU Notifiers *************/
723 
724 static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
725 {
726         /*
727          * Flush all shadow tlb entries everywhere. This is slow, but
728          * we are 100% sure that we catch the to be unmapped page
729          */
730         return true;
731 }
732 
733 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
734 {
735         return kvm_e500_mmu_unmap_gfn(kvm, range);
736 }
737 
738 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
739 {
740         /* XXX could be more clever ;) */
741         return false;
742 }
743 
744 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
745 {
746         /* XXX could be more clever ;) */
747         return false;
748 }
749 
750 /*****************************************/
751 
752 int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
753 {
754         host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
755         host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
756 
757         /*
758          * This should never happen on real e500 hardware, but is
759          * architecturally possible -- e.g. in some weird nested
760          * virtualization case.
761          */
762         if (host_tlb_params[0].entries == 0 ||
763             host_tlb_params[1].entries == 0) {
764                 pr_err("%s: need to know host tlb size\n", __func__);
765                 return -ENODEV;
766         }
767 
768         host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
769                                   TLBnCFG_ASSOC_SHIFT;
770         host_tlb_params[1].ways = host_tlb_params[1].entries;
771 
772         if (!is_power_of_2(host_tlb_params[0].entries) ||
773             !is_power_of_2(host_tlb_params[0].ways) ||
774             host_tlb_params[0].entries < host_tlb_params[0].ways ||
775             host_tlb_params[0].ways == 0) {
776                 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
777                        __func__, host_tlb_params[0].entries,
778                        host_tlb_params[0].ways);
779                 return -ENODEV;
780         }
781 
782         host_tlb_params[0].sets =
783                 host_tlb_params[0].entries / host_tlb_params[0].ways;
784         host_tlb_params[1].sets = 1;
785         vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries,
786                                            sizeof(*vcpu_e500->h2g_tlb1_rmap),
787                                            GFP_KERNEL);
788         if (!vcpu_e500->h2g_tlb1_rmap)
789                 return -EINVAL;
790 
791         return 0;
792 }
793 
794 void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
795 {
796         kfree(vcpu_e500->h2g_tlb1_rmap);
797 }
798 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php