1 // SPDX-License-Identifier: GPL-2.0 << 2 /* 1 /* 3 * Copyright (C) 2020-2023 Loongson Technology !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. >> 5 * >> 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that >> 7 * TLB handlers run from KSEG0 >> 8 * >> 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. >> 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 4 */ 11 */ 5 12 >> 13 #include <linux/sched.h> >> 14 #include <linux/smp.h> >> 15 #include <linux/mm.h> >> 16 #include <linux/delay.h> >> 17 #include <linux/export.h> 6 #include <linux/kvm_host.h> 18 #include <linux/kvm_host.h> >> 19 #include <linux/srcu.h> >> 20 >> 21 #include <asm/cpu.h> >> 22 #include <asm/bootinfo.h> >> 23 #include <asm/mmu_context.h> >> 24 #include <asm/pgtable.h> >> 25 #include <asm/cacheflush.h> 7 #include <asm/tlb.h> 26 #include <asm/tlb.h> 8 #include <asm/kvm_csr.h> !! 27 #include <asm/tlbdebug.h> 9 28 10 /* !! 29 #undef CONFIG_MIPS_MT 11 * kvm_flush_tlb_all() - Flush all root TLB en !! 30 #include <asm/r4kcache.h> 12 * !! 31 #define CONFIG_MIPS_MT 13 * Invalidate all entries including GVA-->GPA !! 32 14 */ !! 33 #define KVM_GUEST_PC_TLB 0 15 void kvm_flush_tlb_all(void) !! 34 #define KVM_GUEST_SP_TLB 1 >> 35 >> 36 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) >> 37 { >> 38 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; >> 39 int cpu = smp_processor_id(); >> 40 >> 41 return cpu_asid(cpu, kern_mm); >> 42 } >> 43 >> 44 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) >> 45 { >> 46 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; >> 47 int cpu = smp_processor_id(); >> 48 >> 49 return cpu_asid(cpu, user_mm); >> 50 } >> 51 >> 52 /* Structure defining an tlb entry data set. */ >> 53 >> 54 void kvm_mips_dump_host_tlbs(void) 16 { 55 { 17 unsigned long flags; 56 unsigned long flags; 18 57 19 local_irq_save(flags); 58 local_irq_save(flags); 20 invtlb_all(INVTLB_ALLGID, 0, 0); !! 59 >> 60 kvm_info("HOST TLBs:\n"); >> 61 dump_tlb_regs(); >> 62 pr_info("\n"); >> 63 dump_tlb_all(); >> 64 >> 65 local_irq_restore(flags); >> 66 } >> 67 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); >> 68 >> 69 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) >> 70 { >> 71 struct mips_coproc *cop0 = vcpu->arch.cop0; >> 72 struct kvm_mips_tlb tlb; >> 73 int i; >> 74 >> 75 kvm_info("Guest TLBs:\n"); >> 76 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); >> 77 >> 78 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { >> 79 tlb = vcpu->arch.guest_tlb[i]; >> 80 kvm_info("TLB%c%3d Hi 0x%08lx ", >> 81 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V >> 82 ? ' ' : '*', >> 83 i, tlb.tlb_hi); >> 84 kvm_info("Lo0=0x%09llx %c%c attr %lx ", >> 85 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), >> 86 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', >> 87 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', >> 88 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); >> 89 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", >> 90 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), >> 91 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', >> 92 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', >> 93 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, >> 94 tlb.tlb_mask); >> 95 } >> 96 } >> 97 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); >> 98 >> 99 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) >> 100 { >> 101 int i; >> 102 int index = -1; >> 103 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; >> 104 >> 105 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { >> 106 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && >> 107 TLB_HI_ASID_HIT(tlb[i], entryhi)) { >> 108 index = i; >> 109 break; >> 110 } >> 111 } >> 112 >> 113 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", >> 114 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); >> 115 >> 116 return index; >> 117 } >> 118 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); >> 119 >> 120 static int _kvm_mips_host_tlb_inv(unsigned long entryhi) >> 121 { >> 122 int idx; >> 123 >> 124 write_c0_entryhi(entryhi); >> 125 mtc0_tlbw_hazard(); >> 126 >> 127 tlb_probe(); >> 128 tlb_probe_hazard(); >> 129 idx = read_c0_index(); >> 130 >> 131 if (idx >= current_cpu_data.tlbsize) >> 132 BUG(); >> 133 >> 134 if (idx >= 0) { >> 135 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); >> 136 write_c0_entrylo0(0); >> 137 write_c0_entrylo1(0); >> 138 mtc0_tlbw_hazard(); >> 139 >> 140 tlb_write_indexed(); >> 141 tlbw_use_hazard(); >> 142 } >> 143 >> 144 return idx; >> 145 } >> 146 >> 147 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, >> 148 bool user, bool kernel) >> 149 { >> 150 /* >> 151 * Initialize idx_user and idx_kernel to workaround bogus >> 152 * maybe-initialized warning when using GCC 6. >> 153 */ >> 154 int idx_user = 0, idx_kernel = 0; >> 155 unsigned long flags, old_entryhi; >> 156 >> 157 local_irq_save(flags); >> 158 >> 159 old_entryhi = read_c0_entryhi(); >> 160 >> 161 if (user) >> 162 idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | >> 163 kvm_mips_get_user_asid(vcpu)); >> 164 if (kernel) >> 165 idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | >> 166 kvm_mips_get_kernel_asid(vcpu)); >> 167 >> 168 write_c0_entryhi(old_entryhi); >> 169 mtc0_tlbw_hazard(); >> 170 21 local_irq_restore(flags); 171 local_irq_restore(flags); >> 172 >> 173 if (user && idx_user >= 0) >> 174 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", >> 175 __func__, (va & VPN2_MASK) | >> 176 kvm_mips_get_user_asid(vcpu), idx_user); >> 177 if (kernel && idx_kernel >= 0) >> 178 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", >> 179 __func__, (va & VPN2_MASK) | >> 180 kvm_mips_get_kernel_asid(vcpu), idx_kernel); >> 181 >> 182 return 0; 22 } 183 } >> 184 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 23 185 24 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, !! 186 /** >> 187 * kvm_mips_suspend_mm() - Suspend the active mm. >> 188 * @cpu The CPU we're running on. >> 189 * >> 190 * Suspend the active_mm, ready for a switch to a KVM guest virtual address >> 191 * space. This is left active for the duration of guest context, including time >> 192 * with interrupts enabled, so we need to be careful not to confuse e.g. cache >> 193 * management IPIs. >> 194 * >> 195 * kvm_mips_resume_mm() should be called before context switching to a different >> 196 * process so we don't need to worry about reference counting. >> 197 * >> 198 * This needs to be in static kernel code to avoid exporting init_mm. >> 199 */ >> 200 void kvm_mips_suspend_mm(int cpu) >> 201 { >> 202 cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); >> 203 current->active_mm = &init_mm; >> 204 } >> 205 EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); >> 206 >> 207 /** >> 208 * kvm_mips_resume_mm() - Resume the current process mm. >> 209 * @cpu The CPU we're running on. >> 210 * >> 211 * Resume the mm of the current process, after a switch back from a KVM guest >> 212 * virtual address space (see kvm_mips_suspend_mm()). >> 213 */ >> 214 void kvm_mips_resume_mm(int cpu) 25 { 215 { 26 lockdep_assert_irqs_disabled(); !! 216 cpumask_set_cpu(cpu, mm_cpumask(current->mm)); 27 gpa &= (PAGE_MASK << 1); !! 217 current->active_mm = current->mm; 28 invtlb(INVTLB_GID_ADDR, read_csr_gstat << 29 } 218 } >> 219 EXPORT_SYMBOL_GPL(kvm_mips_resume_mm); 30 220
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.