1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that 7 * TLB handlers run from KSEG0 8 * 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/smp.h> 15 #include <linux/mm.h> 16 #include <linux/delay.h> 17 #include <linux/export.h> 18 #include <linux/kvm_host.h> 19 #include <linux/srcu.h> 20 21 #include <asm/cpu.h> 22 #include <asm/bootinfo.h> 23 #include <asm/mipsregs.h> 24 #include <asm/mmu_context.h> 25 #include <asm/cacheflush.h> 26 #include <asm/tlb.h> 27 #include <asm/tlbdebug.h> 28 29 #undef CONFIG_MIPS_MT 30 #include <asm/r4kcache.h> 31 #define CONFIG_MIPS_MT 32 33 unsigned long GUESTID_MASK; 34 EXPORT_SYMBOL_GPL(GUESTID_MASK); 35 unsigned long GUESTID_FIRST_VERSION; 36 EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION); 37 unsigned long GUESTID_VERSION_MASK; 38 EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK); 39 40 static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) 41 { 42 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm; 43 44 if (cpu_has_guestid) 45 return 0; 46 else 47 return cpu_asid(smp_processor_id(), gpa_mm); 48 } 49 50 static int _kvm_mips_host_tlb_inv(unsigned long entryhi) 51 { 52 int idx; 53 54 write_c0_entryhi(entryhi); 55 mtc0_tlbw_hazard(); 56 57 tlb_probe(); 58 tlb_probe_hazard(); 59 idx = read_c0_index(); 60 61 BUG_ON(idx >= current_cpu_data.tlbsize); 62 63 if (idx >= 0) { 64 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); 65 write_c0_entrylo0(0); 66 write_c0_entrylo1(0); 67 mtc0_tlbw_hazard(); 68 69 tlb_write_indexed(); 70 tlbw_use_hazard(); 71 } 72 73 return idx; 74 } 75 76 /* GuestID management */ 77 78 /** 79 * clear_root_gid() - Set GuestCtl1.RID for normal root operation. 80 */ 81 static inline void clear_root_gid(void) 82 { 83 if (cpu_has_guestid) { 84 clear_c0_guestctl1(MIPS_GCTL1_RID); 85 mtc0_tlbw_hazard(); 86 } 87 } 88 89 /** 90 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID. 91 * 92 * Sets the root GuestID to match the current guest GuestID, for TLB operation 93 * on the GPA->RPA mappings in the root TLB. 94 * 95 * The caller must be sure to disable HTW while the root GID is set, and 96 * possibly longer if TLB registers are modified. 97 */ 98 static inline void set_root_gid_to_guest_gid(void) 99 { 100 unsigned int guestctl1; 101 102 if (cpu_has_guestid) { 103 back_to_back_c0_hazard(); 104 guestctl1 = read_c0_guestctl1(); 105 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) | 106 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT) 107 << MIPS_GCTL1_RID_SHIFT; 108 write_c0_guestctl1(guestctl1); 109 mtc0_tlbw_hazard(); 110 } 111 } 112 113 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) 114 { 115 int idx; 116 unsigned long flags, old_entryhi; 117 118 local_irq_save(flags); 119 htw_stop(); 120 121 /* Set root GuestID for root probe and write of guest TLB entry */ 122 set_root_gid_to_guest_gid(); 123 124 old_entryhi = read_c0_entryhi(); 125 126 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 127 kvm_mips_get_root_asid(vcpu)); 128 129 write_c0_entryhi(old_entryhi); 130 clear_root_gid(); 131 mtc0_tlbw_hazard(); 132 133 htw_start(); 134 local_irq_restore(flags); 135 136 /* 137 * We don't want to get reserved instruction exceptions for missing tlb 138 * entries. 139 */ 140 if (cpu_has_vtag_icache) 141 flush_icache_all(); 142 143 if (idx > 0) 144 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n", 145 __func__, (va & VPN2_MASK) | 146 kvm_mips_get_root_asid(vcpu), idx); 147 148 return 0; 149 } 150 EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv); 151 152 /** 153 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping. 154 * @vcpu: KVM VCPU pointer. 155 * @gpa: Guest virtual address in a TLB mapped guest segment. 156 * @gpa: Pointer to output guest physical address it maps to. 157 * 158 * Converts a guest virtual address in a guest TLB mapped segment to a guest 159 * physical address, by probing the guest TLB. 160 * 161 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been 162 * written. 163 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not 164 * have been written. 165 */ 166 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, 167 unsigned long *gpa) 168 { 169 unsigned long o_entryhi, o_entrylo[2], o_pagemask; 170 unsigned int o_index; 171 unsigned long entrylo[2], pagemask, pagemaskbit, pa; 172 unsigned long flags; 173 int index; 174 175 /* Probe the guest TLB for a mapping */ 176 local_irq_save(flags); 177 /* Set root GuestID for root probe of guest TLB entry */ 178 htw_stop(); 179 set_root_gid_to_guest_gid(); 180 181 o_entryhi = read_gc0_entryhi(); 182 o_index = read_gc0_index(); 183 184 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); 185 mtc0_tlbw_hazard(); 186 guest_tlb_probe(); 187 tlb_probe_hazard(); 188 189 index = read_gc0_index(); 190 if (index < 0) { 191 /* No match, fail */ 192 write_gc0_entryhi(o_entryhi); 193 write_gc0_index(o_index); 194 195 clear_root_gid(); 196 htw_start(); 197 local_irq_restore(flags); 198 return -EFAULT; 199 } 200 201 /* Match! read the TLB entry */ 202 o_entrylo[0] = read_gc0_entrylo0(); 203 o_entrylo[1] = read_gc0_entrylo1(); 204 o_pagemask = read_gc0_pagemask(); 205 206 mtc0_tlbr_hazard(); 207 guest_tlb_read(); 208 tlb_read_hazard(); 209 210 entrylo[0] = read_gc0_entrylo0(); 211 entrylo[1] = read_gc0_entrylo1(); 212 pagemask = ~read_gc0_pagemask() & ~0x1fffl; 213 214 write_gc0_entryhi(o_entryhi); 215 write_gc0_index(o_index); 216 write_gc0_entrylo0(o_entrylo[0]); 217 write_gc0_entrylo1(o_entrylo[1]); 218 write_gc0_pagemask(o_pagemask); 219 220 clear_root_gid(); 221 htw_start(); 222 local_irq_restore(flags); 223 224 /* Select one of the EntryLo values and interpret the GPA */ 225 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1; 226 pa = entrylo[!!(gva & pagemaskbit)]; 227 228 /* 229 * TLB entry may have become invalid since TLB probe if physical FTLB 230 * entries are shared between threads (e.g. I6400). 231 */ 232 if (!(pa & ENTRYLO_V)) 233 return -EFAULT; 234 235 /* 236 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is 237 * split with XI/RI in the middle. 238 */ 239 pa = (pa << 6) & ~0xfffl; 240 pa |= gva & ~(pagemask | pagemaskbit); 241 242 *gpa = pa; 243 return 0; 244 } 245 EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup); 246 247 /** 248 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for 249 * guests. 250 * 251 * Invalidate all entries in root tlb which are GPA mappings. 252 */ 253 void kvm_vz_local_flush_roottlb_all_guests(void) 254 { 255 unsigned long flags; 256 unsigned long old_entryhi, old_pagemask, old_guestctl1; 257 int entry; 258 259 if (WARN_ON(!cpu_has_guestid)) 260 return; 261 262 local_irq_save(flags); 263 htw_stop(); 264 265 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */ 266 old_entryhi = read_c0_entryhi(); 267 old_pagemask = read_c0_pagemask(); 268 old_guestctl1 = read_c0_guestctl1(); 269 270 /* 271 * Invalidate guest entries in root TLB while leaving root entries 272 * intact when possible. 273 */ 274 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { 275 write_c0_index(entry); 276 mtc0_tlbw_hazard(); 277 tlb_read(); 278 tlb_read_hazard(); 279 280 /* Don't invalidate non-guest (RVA) mappings in the root TLB */ 281 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID)) 282 continue; 283 284 /* Make sure all entries differ. */ 285 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); 286 write_c0_entrylo0(0); 287 write_c0_entrylo1(0); 288 write_c0_guestctl1(0); 289 mtc0_tlbw_hazard(); 290 tlb_write_indexed(); 291 } 292 293 write_c0_entryhi(old_entryhi); 294 write_c0_pagemask(old_pagemask); 295 write_c0_guestctl1(old_guestctl1); 296 tlbw_use_hazard(); 297 298 htw_start(); 299 local_irq_restore(flags); 300 } 301 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests); 302 303 /** 304 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries. 305 * 306 * Invalidate all entries in guest tlb irrespective of guestid. 307 */ 308 void kvm_vz_local_flush_guesttlb_all(void) 309 { 310 unsigned long flags; 311 unsigned long old_index; 312 unsigned long old_entryhi; 313 unsigned long old_entrylo[2]; 314 unsigned long old_pagemask; 315 int entry; 316 u64 cvmmemctl2 = 0; 317 318 local_irq_save(flags); 319 320 /* Preserve all clobbered guest registers */ 321 old_index = read_gc0_index(); 322 old_entryhi = read_gc0_entryhi(); 323 old_entrylo[0] = read_gc0_entrylo0(); 324 old_entrylo[1] = read_gc0_entrylo1(); 325 old_pagemask = read_gc0_pagemask(); 326 327 switch (current_cpu_type()) { 328 case CPU_CAVIUM_OCTEON3: 329 /* Inhibit machine check due to multiple matching TLB entries */ 330 cvmmemctl2 = read_c0_cvmmemctl2(); 331 cvmmemctl2 |= CVMMEMCTL2_INHIBITTS; 332 write_c0_cvmmemctl2(cvmmemctl2); 333 break; 334 } 335 336 /* Invalidate guest entries in guest TLB */ 337 write_gc0_entrylo0(0); 338 write_gc0_entrylo1(0); 339 write_gc0_pagemask(0); 340 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) { 341 /* Make sure all entries differ. */ 342 write_gc0_index(entry); 343 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry)); 344 mtc0_tlbw_hazard(); 345 guest_tlb_write_indexed(); 346 } 347 348 if (cvmmemctl2) { 349 cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS; 350 write_c0_cvmmemctl2(cvmmemctl2); 351 } 352 353 write_gc0_index(old_index); 354 write_gc0_entryhi(old_entryhi); 355 write_gc0_entrylo0(old_entrylo[0]); 356 write_gc0_entrylo1(old_entrylo[1]); 357 write_gc0_pagemask(old_pagemask); 358 tlbw_use_hazard(); 359 360 local_irq_restore(flags); 361 } 362 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all); 363 364 /** 365 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries. 366 * @buf: Buffer to write TLB entries into. 367 * @index: Start index. 368 * @count: Number of entries to save. 369 * 370 * Save a range of guest TLB entries. The caller must ensure interrupts are 371 * disabled. 372 */ 373 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, 374 unsigned int count) 375 { 376 unsigned int end = index + count; 377 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; 378 unsigned int guestctl1 = 0; 379 int old_index, i; 380 381 /* Save registers we're about to clobber */ 382 old_index = read_gc0_index(); 383 old_entryhi = read_gc0_entryhi(); 384 old_entrylo0 = read_gc0_entrylo0(); 385 old_entrylo1 = read_gc0_entrylo1(); 386 old_pagemask = read_gc0_pagemask(); 387 388 /* Set root GuestID for root probe */ 389 htw_stop(); 390 set_root_gid_to_guest_gid(); 391 if (cpu_has_guestid) 392 guestctl1 = read_c0_guestctl1(); 393 394 /* Read each entry from guest TLB */ 395 for (i = index; i < end; ++i, ++buf) { 396 write_gc0_index(i); 397 398 mtc0_tlbr_hazard(); 399 guest_tlb_read(); 400 tlb_read_hazard(); 401 402 if (cpu_has_guestid && 403 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) { 404 /* Entry invalid or belongs to another guest */ 405 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i); 406 buf->tlb_lo[0] = 0; 407 buf->tlb_lo[1] = 0; 408 buf->tlb_mask = 0; 409 } else { 410 /* Entry belongs to the right guest */ 411 buf->tlb_hi = read_gc0_entryhi(); 412 buf->tlb_lo[0] = read_gc0_entrylo0(); 413 buf->tlb_lo[1] = read_gc0_entrylo1(); 414 buf->tlb_mask = read_gc0_pagemask(); 415 } 416 } 417 418 /* Clear root GuestID again */ 419 clear_root_gid(); 420 htw_start(); 421 422 /* Restore clobbered registers */ 423 write_gc0_index(old_index); 424 write_gc0_entryhi(old_entryhi); 425 write_gc0_entrylo0(old_entrylo0); 426 write_gc0_entrylo1(old_entrylo1); 427 write_gc0_pagemask(old_pagemask); 428 429 tlbw_use_hazard(); 430 } 431 EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb); 432 433 /** 434 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries. 435 * @buf: Buffer to read TLB entries from. 436 * @index: Start index. 437 * @count: Number of entries to load. 438 * 439 * Load a range of guest TLB entries. The caller must ensure interrupts are 440 * disabled. 441 */ 442 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, 443 unsigned int count) 444 { 445 unsigned int end = index + count; 446 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; 447 int old_index, i; 448 449 /* Save registers we're about to clobber */ 450 old_index = read_gc0_index(); 451 old_entryhi = read_gc0_entryhi(); 452 old_entrylo0 = read_gc0_entrylo0(); 453 old_entrylo1 = read_gc0_entrylo1(); 454 old_pagemask = read_gc0_pagemask(); 455 456 /* Set root GuestID for root probe */ 457 htw_stop(); 458 set_root_gid_to_guest_gid(); 459 460 /* Write each entry to guest TLB */ 461 for (i = index; i < end; ++i, ++buf) { 462 write_gc0_index(i); 463 write_gc0_entryhi(buf->tlb_hi); 464 write_gc0_entrylo0(buf->tlb_lo[0]); 465 write_gc0_entrylo1(buf->tlb_lo[1]); 466 write_gc0_pagemask(buf->tlb_mask); 467 468 mtc0_tlbw_hazard(); 469 guest_tlb_write_indexed(); 470 } 471 472 /* Clear root GuestID again */ 473 clear_root_gid(); 474 htw_start(); 475 476 /* Restore clobbered registers */ 477 write_gc0_index(old_index); 478 write_gc0_entryhi(old_entryhi); 479 write_gc0_entrylo0(old_entrylo0); 480 write_gc0_entrylo1(old_entrylo1); 481 write_gc0_pagemask(old_pagemask); 482 483 tlbw_use_hazard(); 484 } 485 EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb); 486 487 #ifdef CONFIG_CPU_LOONGSON64 488 void kvm_loongson_clear_guest_vtlb(void) 489 { 490 int idx = read_gc0_index(); 491 492 /* Set root GuestID for root probe and write of guest TLB entry */ 493 set_root_gid_to_guest_gid(); 494 495 write_gc0_index(0); 496 guest_tlbinvf(); 497 write_gc0_index(idx); 498 499 clear_root_gid(); 500 set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); 501 } 502 EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb); 503 504 void kvm_loongson_clear_guest_ftlb(void) 505 { 506 int i; 507 int idx = read_gc0_index(); 508 509 /* Set root GuestID for root probe and write of guest TLB entry */ 510 set_root_gid_to_guest_gid(); 511 512 for (i = current_cpu_data.tlbsizevtlb; 513 i < (current_cpu_data.tlbsizevtlb + 514 current_cpu_data.tlbsizeftlbsets); 515 i++) { 516 write_gc0_index(i); 517 guest_tlbinvf(); 518 } 519 write_gc0_index(idx); 520 521 clear_root_gid(); 522 set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); 523 } 524 EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb); 525 #endif 526
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.