1 // SPDX-License-Identifier: GPL-2.0 << 2 /* 1 /* 3 * Copyright (c) 2022 Ventana Micro Systems In !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. >> 5 * >> 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that >> 7 * TLB handlers run from KSEG0 >> 8 * >> 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. >> 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 4 */ 11 */ 5 12 6 #include <linux/bitmap.h> !! 13 #include <linux/sched.h> 7 #include <linux/cpumask.h> << 8 #include <linux/errno.h> << 9 #include <linux/err.h> << 10 #include <linux/module.h> << 11 #include <linux/smp.h> 14 #include <linux/smp.h> >> 15 #include <linux/mm.h> >> 16 #include <linux/delay.h> >> 17 #include <linux/export.h> 12 #include <linux/kvm_host.h> 18 #include <linux/kvm_host.h> 13 #include <asm/cacheflush.h> !! 19 #include <linux/srcu.h> 14 #include <asm/csr.h> << 15 #include <asm/cpufeature.h> << 16 #include <asm/insn-def.h> << 17 << 18 #define has_svinval() riscv_has_extension_un << 19 << 20 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsi << 21 gpa_ << 22 unsi << 23 { << 24 gpa_t pos; << 25 20 26 if (PTRS_PER_PTE < (gpsz >> order)) { !! 21 #include <asm/cpu.h> 27 kvm_riscv_local_hfence_gvma_vm !! 22 #include <asm/bootinfo.h> 28 return; !! 23 #include <asm/mipsregs.h> 29 } !! 24 #include <asm/mmu_context.h> >> 25 #include <asm/cacheflush.h> >> 26 #include <asm/tlb.h> >> 27 #include <asm/tlbdebug.h> 30 28 31 if (has_svinval()) { !! 29 #undef CONFIG_MIPS_MT 32 asm volatile (SFENCE_W_INVAL() !! 30 #include <asm/r4kcache.h> 33 for (pos = gpa; pos < (gpa + g !! 31 #define CONFIG_MIPS_MT 34 asm volatile (HINVAL_G << 35 : : "r" (pos >> 2), "r << 36 asm volatile (SFENCE_INVAL_IR( << 37 } else { << 38 for (pos = gpa; pos < (gpa + g << 39 asm volatile (HFENCE_G << 40 : : "r" (pos >> 2), "r << 41 } << 42 } << 43 32 44 void kvm_riscv_local_hfence_gvma_vmid_all(unsi !! 33 unsigned long GUESTID_MASK; 45 { !! 34 EXPORT_SYMBOL_GPL(GUESTID_MASK); 46 asm volatile(HFENCE_GVMA(zero, %0) : : !! 35 unsigned long GUESTID_FIRST_VERSION; 47 } !! 36 EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION); >> 37 unsigned long GUESTID_VERSION_MASK; >> 38 EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK); 48 39 49 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa !! 40 static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) 50 unsigned << 51 { 41 { 52 gpa_t pos; !! 42 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm; 53 43 54 if (PTRS_PER_PTE < (gpsz >> order)) { !! 44 if (cpu_has_guestid) 55 kvm_riscv_local_hfence_gvma_al !! 45 return 0; 56 return; !! 46 else 57 } !! 47 return cpu_asid(smp_processor_id(), gpa_mm); 58 << 59 if (has_svinval()) { << 60 asm volatile (SFENCE_W_INVAL() << 61 for (pos = gpa; pos < (gpa + g << 62 asm volatile(HINVAL_GV << 63 : : "r" (pos >> 2) : " << 64 asm volatile (SFENCE_INVAL_IR( << 65 } else { << 66 for (pos = gpa; pos < (gpa + g << 67 asm volatile(HFENCE_GV << 68 : : "r" (pos >> 2) : " << 69 } << 70 } 48 } 71 49 72 void kvm_riscv_local_hfence_gvma_all(void) !! 50 static int _kvm_mips_host_tlb_inv(unsigned long entryhi) 73 { 51 { 74 asm volatile(HFENCE_GVMA(zero, zero) : !! 52 int idx; 75 } << 76 53 77 void kvm_riscv_local_hfence_vvma_asid_gva(unsi !! 54 write_c0_entryhi(entryhi); 78 unsi !! 55 mtc0_tlbw_hazard(); 79 unsi << 80 unsi << 81 unsi << 82 { << 83 unsigned long pos, hgatp; << 84 56 85 if (PTRS_PER_PTE < (gvsz >> order)) { !! 57 tlb_probe(); 86 kvm_riscv_local_hfence_vvma_as !! 58 tlb_probe_hazard(); 87 return; !! 59 idx = read_c0_index(); 88 } !! 60 >> 61 BUG_ON(idx >= current_cpu_data.tlbsize); 89 62 90 hgatp = csr_swap(CSR_HGATP, vmid << HG !! 63 if (idx >= 0) { >> 64 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); >> 65 write_c0_entrylo0(0); >> 66 write_c0_entrylo1(0); >> 67 mtc0_tlbw_hazard(); 91 68 92 if (has_svinval()) { !! 69 tlb_write_indexed(); 93 asm volatile (SFENCE_W_INVAL() !! 70 tlbw_use_hazard(); 94 for (pos = gva; pos < (gva + g << 95 asm volatile(HINVAL_VV << 96 : : "r" (pos), "r" (as << 97 asm volatile (SFENCE_INVAL_IR( << 98 } else { << 99 for (pos = gva; pos < (gva + g << 100 asm volatile(HFENCE_VV << 101 : : "r" (pos), "r" (as << 102 } 71 } 103 72 104 csr_write(CSR_HGATP, hgatp); !! 73 return idx; 105 } 74 } 106 75 107 void kvm_riscv_local_hfence_vvma_asid_all(unsi !! 76 /* GuestID management */ 108 unsi << 109 { << 110 unsigned long hgatp; << 111 << 112 hgatp = csr_swap(CSR_HGATP, vmid << HG << 113 77 114 asm volatile(HFENCE_VVMA(zero, %0) : : !! 78 /** 115 !! 79 * clear_root_gid() - Set GuestCtl1.RID for normal root operation. 116 csr_write(CSR_HGATP, hgatp); !! 80 */ 117 } !! 81 static inline void clear_root_gid(void) 118 << 119 void kvm_riscv_local_hfence_vvma_gva(unsigned << 120 unsigned << 121 unsigned << 122 { 82 { 123 unsigned long pos, hgatp; !! 83 if (cpu_has_guestid) { 124 !! 84 clear_c0_guestctl1(MIPS_GCTL1_RID); 125 if (PTRS_PER_PTE < (gvsz >> order)) { !! 85 mtc0_tlbw_hazard(); 126 kvm_riscv_local_hfence_vvma_al << 127 return; << 128 } 86 } >> 87 } 129 88 130 hgatp = csr_swap(CSR_HGATP, vmid << HG !! 89 /** >> 90 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID. >> 91 * >> 92 * Sets the root GuestID to match the current guest GuestID, for TLB operation >> 93 * on the GPA->RPA mappings in the root TLB. >> 94 * >> 95 * The caller must be sure to disable HTW while the root GID is set, and >> 96 * possibly longer if TLB registers are modified. >> 97 */ >> 98 static inline void set_root_gid_to_guest_gid(void) >> 99 { >> 100 unsigned int guestctl1; 131 101 132 if (has_svinval()) { !! 102 if (cpu_has_guestid) { 133 asm volatile (SFENCE_W_INVAL() !! 103 back_to_back_c0_hazard(); 134 for (pos = gva; pos < (gva + g !! 104 guestctl1 = read_c0_guestctl1(); 135 asm volatile(HINVAL_VV !! 105 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) | 136 : : "r" (pos) : "memor !! 106 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT) 137 asm volatile (SFENCE_INVAL_IR( !! 107 << MIPS_GCTL1_RID_SHIFT; 138 } else { !! 108 write_c0_guestctl1(guestctl1); 139 for (pos = gva; pos < (gva + g !! 109 mtc0_tlbw_hazard(); 140 asm volatile(HFENCE_VV << 141 : : "r" (pos) : "memor << 142 } 110 } 143 << 144 csr_write(CSR_HGATP, hgatp); << 145 } 111 } 146 112 147 void kvm_riscv_local_hfence_vvma_all(unsigned !! 113 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 148 { 114 { 149 unsigned long hgatp; !! 115 int idx; >> 116 unsigned long flags, old_entryhi; 150 117 151 hgatp = csr_swap(CSR_HGATP, vmid << HG !! 118 local_irq_save(flags); >> 119 htw_stop(); 152 120 153 asm volatile(HFENCE_VVMA(zero, zero) : !! 121 /* Set root GuestID for root probe and write of guest TLB entry */ >> 122 set_root_gid_to_guest_gid(); 154 123 155 csr_write(CSR_HGATP, hgatp); !! 124 old_entryhi = read_c0_entryhi(); 156 } << 157 125 158 void kvm_riscv_local_tlb_sanitize(struct kvm_v !! 126 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 159 { !! 127 kvm_mips_get_root_asid(vcpu)); 160 unsigned long vmid; << 161 128 162 if (!kvm_riscv_gstage_vmid_bits() || !! 129 write_c0_entryhi(old_entryhi); 163 vcpu->arch.last_exit_cpu == vcpu-> !! 130 clear_root_gid(); 164 return; !! 131 mtc0_tlbw_hazard(); >> 132 >> 133 htw_start(); >> 134 local_irq_restore(flags); 165 135 166 /* 136 /* 167 * On RISC-V platforms with hardware V !! 137 * We don't want to get reserved instruction exceptions for missing tlb 168 * VMID for all VCPUs of a particular !! 138 * entries. 169 * have stale G-stage TLB entries on t << 170 * some other VCPU of the same Guest w << 171 * current Host CPU. << 172 * << 173 * To cleanup stale TLB entries, we si << 174 * entries by VMID whenever underlying << 175 */ 139 */ >> 140 if (cpu_has_vtag_icache) >> 141 flush_icache_all(); 176 142 177 vmid = READ_ONCE(vcpu->kvm->arch.vmid. !! 143 if (idx > 0) 178 kvm_riscv_local_hfence_gvma_vmid_all(v !! 144 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n", 179 } !! 145 __func__, (va & VPN2_MASK) | 180 !! 146 kvm_mips_get_root_asid(vcpu), idx); 181 void kvm_riscv_fence_i_process(struct kvm_vcpu !! 147 182 { !! 148 return 0; 183 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_P !! 149 } 184 local_flush_icache_all(); !! 150 EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv); 185 } !! 151 186 !! 152 /** 187 void kvm_riscv_hfence_gvma_vmid_all_process(st !! 153 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping. >> 154 * @vcpu: KVM VCPU pointer. >> 155 * @gpa: Guest virtual address in a TLB mapped guest segment. >> 156 * @gpa: Pointer to output guest physical address it maps to. >> 157 * >> 158 * Converts a guest virtual address in a guest TLB mapped segment to a guest >> 159 * physical address, by probing the guest TLB. >> 160 * >> 161 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been >> 162 * written. >> 163 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not >> 164 * have been written. >> 165 */ >> 166 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, >> 167 unsigned long *gpa) 188 { 168 { 189 struct kvm_vmid *vmid; !! 169 unsigned long o_entryhi, o_entrylo[2], o_pagemask; >> 170 unsigned int o_index; >> 171 unsigned long entrylo[2], pagemask, pagemaskbit, pa; >> 172 unsigned long flags; >> 173 int index; >> 174 >> 175 /* Probe the guest TLB for a mapping */ >> 176 local_irq_save(flags); >> 177 /* Set root GuestID for root probe of guest TLB entry */ >> 178 htw_stop(); >> 179 set_root_gid_to_guest_gid(); >> 180 >> 181 o_entryhi = read_gc0_entryhi(); >> 182 o_index = read_gc0_index(); >> 183 >> 184 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); >> 185 mtc0_tlbw_hazard(); >> 186 guest_tlb_probe(); >> 187 tlb_probe_hazard(); >> 188 >> 189 index = read_gc0_index(); >> 190 if (index < 0) { >> 191 /* No match, fail */ >> 192 write_gc0_entryhi(o_entryhi); >> 193 write_gc0_index(o_index); >> 194 >> 195 clear_root_gid(); >> 196 htw_start(); >> 197 local_irq_restore(flags); >> 198 return -EFAULT; >> 199 } >> 200 >> 201 /* Match! read the TLB entry */ >> 202 o_entrylo[0] = read_gc0_entrylo0(); >> 203 o_entrylo[1] = read_gc0_entrylo1(); >> 204 o_pagemask = read_gc0_pagemask(); >> 205 >> 206 mtc0_tlbr_hazard(); >> 207 guest_tlb_read(); >> 208 tlb_read_hazard(); >> 209 >> 210 entrylo[0] = read_gc0_entrylo0(); >> 211 entrylo[1] = read_gc0_entrylo1(); >> 212 pagemask = ~read_gc0_pagemask() & ~0x1fffl; >> 213 >> 214 write_gc0_entryhi(o_entryhi); >> 215 write_gc0_index(o_index); >> 216 write_gc0_entrylo0(o_entrylo[0]); >> 217 write_gc0_entrylo1(o_entrylo[1]); >> 218 write_gc0_pagemask(o_pagemask); >> 219 >> 220 clear_root_gid(); >> 221 htw_start(); >> 222 local_irq_restore(flags); >> 223 >> 224 /* Select one of the EntryLo values and interpret the GPA */ >> 225 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1; >> 226 pa = entrylo[!!(gva & pagemaskbit)]; 190 227 191 vmid = &vcpu->kvm->arch.vmid; !! 228 /* 192 kvm_riscv_local_hfence_gvma_vmid_all(R !! 229 * TLB entry may have become invalid since TLB probe if physical FTLB 193 } !! 230 * entries are shared between threads (e.g. I6400). >> 231 */ >> 232 if (!(pa & ENTRYLO_V)) >> 233 return -EFAULT; 194 234 195 void kvm_riscv_hfence_vvma_all_process(struct !! 235 /* 196 { !! 236 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is 197 struct kvm_vmid *vmid; !! 237 * split with XI/RI in the middle. >> 238 */ >> 239 pa = (pa << 6) & ~0xfffl; >> 240 pa |= gva & ~(pagemask | pagemaskbit); 198 241 199 vmid = &vcpu->kvm->arch.vmid; !! 242 *gpa = pa; 200 kvm_riscv_local_hfence_vvma_all(READ_O !! 243 return 0; 201 } 244 } >> 245 EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup); 202 246 203 static bool vcpu_hfence_dequeue(struct kvm_vcp !! 247 /** 204 struct kvm_ris !! 248 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for >> 249 * guests. >> 250 * >> 251 * Invalidate all entries in root tlb which are GPA mappings. >> 252 */ >> 253 void kvm_vz_local_flush_roottlb_all_guests(void) 205 { 254 { 206 bool ret = false; !! 255 unsigned long flags; 207 struct kvm_vcpu_arch *varch = &vcpu->a !! 256 unsigned long old_entryhi, old_pagemask, old_guestctl1; 208 !! 257 int entry; 209 spin_lock(&varch->hfence_lock); << 210 258 211 if (varch->hfence_queue[varch->hfence_ !! 259 if (WARN_ON(!cpu_has_guestid)) 212 memcpy(out_data, &varch->hfenc !! 260 return; 213 sizeof(*out_data)); << 214 varch->hfence_queue[varch->hfe << 215 261 216 varch->hfence_head++; !! 262 local_irq_save(flags); 217 if (varch->hfence_head == KVM_ !! 263 htw_stop(); 218 varch->hfence_head = 0 << 219 264 220 ret = true; !! 265 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */ 221 } !! 266 old_entryhi = read_c0_entryhi(); >> 267 old_pagemask = read_c0_pagemask(); >> 268 old_guestctl1 = read_c0_guestctl1(); 222 269 223 spin_unlock(&varch->hfence_lock); !! 270 /* >> 271 * Invalidate guest entries in root TLB while leaving root entries >> 272 * intact when possible. >> 273 */ >> 274 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { >> 275 write_c0_index(entry); >> 276 mtc0_tlbw_hazard(); >> 277 tlb_read(); >> 278 tlb_read_hazard(); 224 279 225 return ret; !! 280 /* Don't invalidate non-guest (RVA) mappings in the root TLB */ 226 } !! 281 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID)) >> 282 continue; 227 283 228 static bool vcpu_hfence_enqueue(struct kvm_vcp !! 284 /* Make sure all entries differ. */ 229 const struct k !! 285 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); >> 286 write_c0_entrylo0(0); >> 287 write_c0_entrylo1(0); >> 288 write_c0_guestctl1(0); >> 289 mtc0_tlbw_hazard(); >> 290 tlb_write_indexed(); >> 291 } >> 292 >> 293 write_c0_entryhi(old_entryhi); >> 294 write_c0_pagemask(old_pagemask); >> 295 write_c0_guestctl1(old_guestctl1); >> 296 tlbw_use_hazard(); >> 297 >> 298 htw_start(); >> 299 local_irq_restore(flags); >> 300 } >> 301 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests); >> 302 >> 303 /** >> 304 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries. >> 305 * >> 306 * Invalidate all entries in guest tlb irrespective of guestid. >> 307 */ >> 308 void kvm_vz_local_flush_guesttlb_all(void) 230 { 309 { 231 bool ret = false; !! 310 unsigned long flags; 232 struct kvm_vcpu_arch *varch = &vcpu->a !! 311 unsigned long old_index; 233 !! 312 unsigned long old_entryhi; 234 spin_lock(&varch->hfence_lock); !! 313 unsigned long old_entrylo[2]; 235 !! 314 unsigned long old_pagemask; 236 if (!varch->hfence_queue[varch->hfence !! 315 int entry; 237 memcpy(&varch->hfence_queue[va !! 316 u64 cvmmemctl2 = 0; 238 data, sizeof(*data)); !! 317 239 !! 318 local_irq_save(flags); 240 varch->hfence_tail++; !! 319 241 if (varch->hfence_tail == KVM_ !! 320 /* Preserve all clobbered guest registers */ 242 varch->hfence_tail = 0 !! 321 old_index = read_gc0_index(); 243 !! 322 old_entryhi = read_gc0_entryhi(); 244 ret = true; !! 323 old_entrylo[0] = read_gc0_entrylo0(); 245 } !! 324 old_entrylo[1] = read_gc0_entrylo1(); 246 !! 325 old_pagemask = read_gc0_pagemask(); 247 spin_unlock(&varch->hfence_lock); !! 326 248 !! 327 switch (current_cpu_type()) { 249 return ret; !! 328 case CPU_CAVIUM_OCTEON3: 250 } !! 329 /* Inhibit machine check due to multiple matching TLB entries */ 251 !! 330 cvmmemctl2 = read_c0_cvmmemctl2(); 252 void kvm_riscv_hfence_process(struct kvm_vcpu !! 331 cvmmemctl2 |= CVMMEMCTL2_INHIBITTS; >> 332 write_c0_cvmmemctl2(cvmmemctl2); >> 333 break; >> 334 } >> 335 >> 336 /* Invalidate guest entries in guest TLB */ >> 337 write_gc0_entrylo0(0); >> 338 write_gc0_entrylo1(0); >> 339 write_gc0_pagemask(0); >> 340 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) { >> 341 /* Make sure all entries differ. */ >> 342 write_gc0_index(entry); >> 343 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry)); >> 344 mtc0_tlbw_hazard(); >> 345 guest_tlb_write_indexed(); >> 346 } >> 347 >> 348 if (cvmmemctl2) { >> 349 cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS; >> 350 write_c0_cvmmemctl2(cvmmemctl2); >> 351 } >> 352 >> 353 write_gc0_index(old_index); >> 354 write_gc0_entryhi(old_entryhi); >> 355 write_gc0_entrylo0(old_entrylo[0]); >> 356 write_gc0_entrylo1(old_entrylo[1]); >> 357 write_gc0_pagemask(old_pagemask); >> 358 tlbw_use_hazard(); >> 359 >> 360 local_irq_restore(flags); >> 361 } >> 362 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all); >> 363 >> 364 /** >> 365 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries. >> 366 * @buf: Buffer to write TLB entries into. >> 367 * @index: Start index. >> 368 * @count: Number of entries to save. >> 369 * >> 370 * Save a range of guest TLB entries. The caller must ensure interrupts are >> 371 * disabled. >> 372 */ >> 373 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, >> 374 unsigned int count) 253 { 375 { 254 struct kvm_riscv_hfence d = { 0 }; !! 376 unsigned int end = index + count; 255 struct kvm_vmid *v = &vcpu->kvm->arch. !! 377 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; 256 !! 378 unsigned int guestctl1 = 0; 257 while (vcpu_hfence_dequeue(vcpu, &d)) !! 379 int old_index, i; 258 switch (d.type) { !! 380 259 case KVM_RISCV_HFENCE_UNKNOWN: !! 381 /* Save registers we're about to clobber */ 260 break; !! 382 old_index = read_gc0_index(); 261 case KVM_RISCV_HFENCE_GVMA_VMI !! 383 old_entryhi = read_gc0_entryhi(); 262 kvm_riscv_local_hfence !! 384 old_entrylo0 = read_gc0_entrylo0(); 263 !! 385 old_entrylo1 = read_gc0_entrylo1(); 264 !! 386 old_pagemask = read_gc0_pagemask(); 265 break; !! 387 266 case KVM_RISCV_HFENCE_VVMA_ASI !! 388 /* Set root GuestID for root probe */ 267 kvm_riscv_vcpu_pmu_inc !! 389 htw_stop(); 268 kvm_riscv_local_hfence !! 390 set_root_gid_to_guest_gid(); 269 !! 391 if (cpu_has_guestid) 270 !! 392 guestctl1 = read_c0_guestctl1(); 271 break; !! 393 272 case KVM_RISCV_HFENCE_VVMA_ASI !! 394 /* Read each entry from guest TLB */ 273 kvm_riscv_vcpu_pmu_inc !! 395 for (i = index; i < end; ++i, ++buf) { 274 kvm_riscv_local_hfence !! 396 write_gc0_index(i); 275 !! 397 276 break; !! 398 mtc0_tlbr_hazard(); 277 case KVM_RISCV_HFENCE_VVMA_GVA !! 399 guest_tlb_read(); 278 kvm_riscv_vcpu_pmu_inc !! 400 tlb_read_hazard(); 279 kvm_riscv_local_hfence !! 401 280 !! 402 if (cpu_has_guestid && 281 !! 403 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) { 282 break; !! 404 /* Entry invalid or belongs to another guest */ 283 default: !! 405 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 284 break; !! 406 buf->tlb_lo[0] = 0; >> 407 buf->tlb_lo[1] = 0; >> 408 buf->tlb_mask = 0; >> 409 } else { >> 410 /* Entry belongs to the right guest */ >> 411 buf->tlb_hi = read_gc0_entryhi(); >> 412 buf->tlb_lo[0] = read_gc0_entrylo0(); >> 413 buf->tlb_lo[1] = read_gc0_entrylo1(); >> 414 buf->tlb_mask = read_gc0_pagemask(); 285 } 415 } 286 } 416 } 287 } << 288 417 289 static void make_xfence_request(struct kvm *kv !! 418 /* Clear root GuestID again */ 290 unsigned long !! 419 clear_root_gid(); 291 unsigned int r !! 420 htw_start(); 292 const struct k !! 421 293 { !! 422 /* Restore clobbered registers */ 294 unsigned long i; !! 423 write_gc0_index(old_index); 295 struct kvm_vcpu *vcpu; !! 424 write_gc0_entryhi(old_entryhi); 296 unsigned int actual_req = req; !! 425 write_gc0_entrylo0(old_entrylo0); 297 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPU !! 426 write_gc0_entrylo1(old_entrylo1); 298 !! 427 write_gc0_pagemask(old_pagemask); 299 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS); !! 428 300 kvm_for_each_vcpu(i, vcpu, kvm) { !! 429 tlbw_use_hazard(); 301 if (hbase != -1UL) { !! 430 } 302 if (vcpu->vcpu_id < hb !! 431 EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb); 303 continue; !! 432 304 if (!(hmask & (1UL << !! 433 /** 305 continue; !! 434 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries. 306 } !! 435 * @buf: Buffer to read TLB entries from. 307 !! 436 * @index: Start index. 308 bitmap_set(vcpu_mask, i, 1); !! 437 * @count: Number of entries to load. >> 438 * >> 439 * Load a range of guest TLB entries. The caller must ensure interrupts are >> 440 * disabled. >> 441 */ >> 442 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, >> 443 unsigned int count) >> 444 { >> 445 unsigned int end = index + count; >> 446 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; >> 447 int old_index, i; 309 448 310 if (!data || !data->type) !! 449 /* Save registers we're about to clobber */ 311 continue; !! 450 old_index = read_gc0_index(); >> 451 old_entryhi = read_gc0_entryhi(); >> 452 old_entrylo0 = read_gc0_entrylo0(); >> 453 old_entrylo1 = read_gc0_entrylo1(); >> 454 old_pagemask = read_gc0_pagemask(); 312 455 313 /* !! 456 /* Set root GuestID for root probe */ 314 * Enqueue hfence data to VCPU !! 457 htw_stop(); 315 * have space in the VCPU hfen !! 458 set_root_gid_to_guest_gid(); 316 * a more conservative hfence << 317 */ << 318 if (!vcpu_hfence_enqueue(vcpu, << 319 actual_req = fallback_ << 320 } << 321 459 322 kvm_make_vcpus_request_mask(kvm, actua !! 460 /* Write each entry to guest TLB */ 323 } !! 461 for (i = index; i < end; ++i, ++buf) { >> 462 write_gc0_index(i); >> 463 write_gc0_entryhi(buf->tlb_hi); >> 464 write_gc0_entrylo0(buf->tlb_lo[0]); >> 465 write_gc0_entrylo1(buf->tlb_lo[1]); >> 466 write_gc0_pagemask(buf->tlb_mask); 324 467 325 void kvm_riscv_fence_i(struct kvm *kvm, !! 468 mtc0_tlbw_hazard(); 326 unsigned long hbase, un !! 469 guest_tlb_write_indexed(); 327 { !! 470 } 328 make_xfence_request(kvm, hbase, hmask, << 329 KVM_REQ_FENCE_I, N << 330 } << 331 471 332 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm !! 472 /* Clear root GuestID again */ 333 unsigned l !! 473 clear_root_gid(); 334 gpa_t gpa, !! 474 htw_start(); 335 unsigned l << 336 { << 337 struct kvm_riscv_hfence data; << 338 475 339 data.type = KVM_RISCV_HFENCE_GVMA_VMID !! 476 /* Restore clobbered registers */ 340 data.asid = 0; !! 477 write_gc0_index(old_index); 341 data.addr = gpa; !! 478 write_gc0_entryhi(old_entryhi); 342 data.size = gpsz; !! 479 write_gc0_entrylo0(old_entrylo0); 343 data.order = order; !! 480 write_gc0_entrylo1(old_entrylo1); 344 make_xfence_request(kvm, hbase, hmask, !! 481 write_gc0_pagemask(old_pagemask); 345 KVM_REQ_HFENCE_GVM << 346 } << 347 482 348 void kvm_riscv_hfence_gvma_vmid_all(struct kvm !! 483 tlbw_use_hazard(); 349 unsigned l << 350 { << 351 make_xfence_request(kvm, hbase, hmask, << 352 KVM_REQ_HFENCE_GVM << 353 } 484 } >> 485 EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb); 354 486 355 void kvm_riscv_hfence_vvma_asid_gva(struct kvm !! 487 #ifdef CONFIG_CPU_LOONGSON64 356 unsigned l !! 488 void kvm_loongson_clear_guest_vtlb(void) 357 unsigned l << 358 unsigned l << 359 { 489 { 360 struct kvm_riscv_hfence data; !! 490 int idx = read_gc0_index(); 361 491 362 data.type = KVM_RISCV_HFENCE_VVMA_ASID !! 492 /* Set root GuestID for root probe and write of guest TLB entry */ 363 data.asid = asid; !! 493 set_root_gid_to_guest_gid(); 364 data.addr = gva; << 365 data.size = gvsz; << 366 data.order = order; << 367 make_xfence_request(kvm, hbase, hmask, << 368 KVM_REQ_HFENCE_VVM << 369 } << 370 494 371 void kvm_riscv_hfence_vvma_asid_all(struct kvm !! 495 write_gc0_index(0); 372 unsigned l !! 496 guest_tlbinvf(); 373 unsigned l !! 497 write_gc0_index(idx); 374 { << 375 struct kvm_riscv_hfence data; << 376 498 377 data.type = KVM_RISCV_HFENCE_VVMA_ASID !! 499 clear_root_gid(); 378 data.asid = asid; !! 500 set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); 379 data.addr = data.size = data.order = 0 << 380 make_xfence_request(kvm, hbase, hmask, << 381 KVM_REQ_HFENCE_VVM << 382 } 501 } >> 502 EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb); 383 503 384 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm !! 504 void kvm_loongson_clear_guest_ftlb(void) 385 unsigned long h << 386 unsigned long g << 387 unsigned long o << 388 { 505 { 389 struct kvm_riscv_hfence data; !! 506 int i; >> 507 int idx = read_gc0_index(); 390 508 391 data.type = KVM_RISCV_HFENCE_VVMA_GVA; !! 509 /* Set root GuestID for root probe and write of guest TLB entry */ 392 data.asid = 0; !! 510 set_root_gid_to_guest_gid(); 393 data.addr = gva; << 394 data.size = gvsz; << 395 data.order = order; << 396 make_xfence_request(kvm, hbase, hmask, << 397 KVM_REQ_HFENCE_VVM << 398 } << 399 511 400 void kvm_riscv_hfence_vvma_all(struct kvm *kvm !! 512 for (i = current_cpu_data.tlbsizevtlb; 401 unsigned long h !! 513 i < (current_cpu_data.tlbsizevtlb + 402 { !! 514 current_cpu_data.tlbsizeftlbsets); 403 make_xfence_request(kvm, hbase, hmask, !! 515 i++) { 404 KVM_REQ_HFENCE_VVM !! 516 write_gc0_index(i); >> 517 guest_tlbinvf(); >> 518 } >> 519 write_gc0_index(idx); >> 520 >> 521 clear_root_gid(); >> 522 set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); 405 } 523 } >> 524 EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb); >> 525 #endif 406 526
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.