1 // SPDX-License-Identifier: GPL-2.0 << 2 /* 1 /* 3 * Copyright (C) 2020-2023 Loongson Technology !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. >> 5 * >> 6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that >> 7 * TLB handlers run from KSEG0 >> 8 * >> 9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. >> 10 * Authors: Sanjay Lal <sanjayl@kymasys.com> 4 */ 11 */ 5 12 >> 13 #include <linux/sched.h> >> 14 #include <linux/smp.h> >> 15 #include <linux/mm.h> >> 16 #include <linux/delay.h> >> 17 #include <linux/export.h> 6 #include <linux/kvm_host.h> 18 #include <linux/kvm_host.h> >> 19 #include <linux/srcu.h> >> 20 >> 21 #include <asm/cpu.h> >> 22 #include <asm/bootinfo.h> >> 23 #include <asm/mmu_context.h> >> 24 #include <asm/pgtable.h> >> 25 #include <asm/cacheflush.h> 7 #include <asm/tlb.h> 26 #include <asm/tlb.h> 8 #include <asm/kvm_csr.h> !! 27 #include <asm/tlbdebug.h> 9 28 10 /* !! 29 #undef CONFIG_MIPS_MT 11 * kvm_flush_tlb_all() - Flush all root TLB en !! 30 #include <asm/r4kcache.h> >> 31 #define CONFIG_MIPS_MT >> 32 >> 33 #define KVM_GUEST_PC_TLB 0 >> 34 #define KVM_GUEST_SP_TLB 1 >> 35 >> 36 #ifdef CONFIG_KVM_MIPS_VZ >> 37 unsigned long GUESTID_MASK; >> 38 EXPORT_SYMBOL_GPL(GUESTID_MASK); >> 39 unsigned long GUESTID_FIRST_VERSION; >> 40 EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION); >> 41 unsigned long GUESTID_VERSION_MASK; >> 42 EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK); >> 43 >> 44 static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) >> 45 { >> 46 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm; >> 47 >> 48 if (cpu_has_guestid) >> 49 return 0; >> 50 else >> 51 return cpu_asid(smp_processor_id(), gpa_mm); >> 52 } >> 53 #endif >> 54 >> 55 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) >> 56 { >> 57 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; >> 58 int cpu = smp_processor_id(); >> 59 >> 60 return cpu_asid(cpu, kern_mm); >> 61 } >> 62 >> 63 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) >> 64 { >> 65 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; >> 66 int cpu = smp_processor_id(); >> 67 >> 68 return cpu_asid(cpu, user_mm); >> 69 } >> 70 >> 71 /* Structure defining an tlb entry data set. */ >> 72 >> 73 void kvm_mips_dump_host_tlbs(void) >> 74 { >> 75 unsigned long flags; >> 76 >> 77 local_irq_save(flags); >> 78 >> 79 kvm_info("HOST TLBs:\n"); >> 80 dump_tlb_regs(); >> 81 pr_info("\n"); >> 82 dump_tlb_all(); >> 83 >> 84 local_irq_restore(flags); >> 85 } >> 86 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); >> 87 >> 88 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) >> 89 { >> 90 struct mips_coproc *cop0 = vcpu->arch.cop0; >> 91 struct kvm_mips_tlb tlb; >> 92 int i; >> 93 >> 94 kvm_info("Guest TLBs:\n"); >> 95 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); >> 96 >> 97 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { >> 98 tlb = vcpu->arch.guest_tlb[i]; >> 99 kvm_info("TLB%c%3d Hi 0x%08lx ", >> 100 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V >> 101 ? ' ' : '*', >> 102 i, tlb.tlb_hi); >> 103 kvm_info("Lo0=0x%09llx %c%c attr %lx ", >> 104 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), >> 105 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', >> 106 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', >> 107 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); >> 108 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", >> 109 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), >> 110 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', >> 111 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', >> 112 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, >> 113 tlb.tlb_mask); >> 114 } >> 115 } >> 116 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); >> 117 >> 118 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) >> 119 { >> 120 int i; >> 121 int index = -1; >> 122 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; >> 123 >> 124 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { >> 125 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && >> 126 TLB_HI_ASID_HIT(tlb[i], entryhi)) { >> 127 index = i; >> 128 break; >> 129 } >> 130 } >> 131 >> 132 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", >> 133 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); >> 134 >> 135 return index; >> 136 } >> 137 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); >> 138 >> 139 static int _kvm_mips_host_tlb_inv(unsigned long entryhi) >> 140 { >> 141 int idx; >> 142 >> 143 write_c0_entryhi(entryhi); >> 144 mtc0_tlbw_hazard(); >> 145 >> 146 tlb_probe(); >> 147 tlb_probe_hazard(); >> 148 idx = read_c0_index(); >> 149 >> 150 if (idx >= current_cpu_data.tlbsize) >> 151 BUG(); >> 152 >> 153 if (idx >= 0) { >> 154 write_c0_entryhi(UNIQUE_ENTRYHI(idx)); >> 155 write_c0_entrylo0(0); >> 156 write_c0_entrylo1(0); >> 157 mtc0_tlbw_hazard(); >> 158 >> 159 tlb_write_indexed(); >> 160 tlbw_use_hazard(); >> 161 } >> 162 >> 163 return idx; >> 164 } >> 165 >> 166 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, >> 167 bool user, bool kernel) >> 168 { >> 169 /* >> 170 * Initialize idx_user and idx_kernel to workaround bogus >> 171 * maybe-initialized warning when using GCC 6. >> 172 */ >> 173 int idx_user = 0, idx_kernel = 0; >> 174 unsigned long flags, old_entryhi; >> 175 >> 176 local_irq_save(flags); >> 177 >> 178 old_entryhi = read_c0_entryhi(); >> 179 >> 180 if (user) >> 181 idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | >> 182 kvm_mips_get_user_asid(vcpu)); >> 183 if (kernel) >> 184 idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | >> 185 kvm_mips_get_kernel_asid(vcpu)); >> 186 >> 187 write_c0_entryhi(old_entryhi); >> 188 mtc0_tlbw_hazard(); >> 189 >> 190 local_irq_restore(flags); >> 191 >> 192 /* >> 193 * We don't want to get reserved instruction exceptions for missing tlb >> 194 * entries. >> 195 */ >> 196 if (cpu_has_vtag_icache) >> 197 flush_icache_all(); >> 198 >> 199 if (user && idx_user >= 0) >> 200 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", >> 201 __func__, (va & VPN2_MASK) | >> 202 kvm_mips_get_user_asid(vcpu), idx_user); >> 203 if (kernel && idx_kernel >= 0) >> 204 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", >> 205 __func__, (va & VPN2_MASK) | >> 206 kvm_mips_get_kernel_asid(vcpu), idx_kernel); >> 207 >> 208 return 0; >> 209 } >> 210 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); >> 211 >> 212 #ifdef CONFIG_KVM_MIPS_VZ >> 213 >> 214 /* GuestID management */ >> 215 >> 216 /** >> 217 * clear_root_gid() - Set GuestCtl1.RID for normal root operation. >> 218 */ >> 219 static inline void clear_root_gid(void) >> 220 { >> 221 if (cpu_has_guestid) { >> 222 clear_c0_guestctl1(MIPS_GCTL1_RID); >> 223 mtc0_tlbw_hazard(); >> 224 } >> 225 } >> 226 >> 227 /** >> 228 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID. >> 229 * >> 230 * Sets the root GuestID to match the current guest GuestID, for TLB operation >> 231 * on the GPA->RPA mappings in the root TLB. 12 * 232 * 13 * Invalidate all entries including GVA-->GPA !! 233 * The caller must be sure to disable HTW while the root GID is set, and >> 234 * possibly longer if TLB registers are modified. 14 */ 235 */ 15 void kvm_flush_tlb_all(void) !! 236 static inline void set_root_gid_to_guest_gid(void) >> 237 { >> 238 unsigned int guestctl1; >> 239 >> 240 if (cpu_has_guestid) { >> 241 back_to_back_c0_hazard(); >> 242 guestctl1 = read_c0_guestctl1(); >> 243 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) | >> 244 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT) >> 245 << MIPS_GCTL1_RID_SHIFT; >> 246 write_c0_guestctl1(guestctl1); >> 247 mtc0_tlbw_hazard(); >> 248 } >> 249 } >> 250 >> 251 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) >> 252 { >> 253 int idx; >> 254 unsigned long flags, old_entryhi; >> 255 >> 256 local_irq_save(flags); >> 257 htw_stop(); >> 258 >> 259 /* Set root GuestID for root probe and write of guest TLB entry */ >> 260 set_root_gid_to_guest_gid(); >> 261 >> 262 old_entryhi = read_c0_entryhi(); >> 263 >> 264 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | >> 265 kvm_mips_get_root_asid(vcpu)); >> 266 >> 267 write_c0_entryhi(old_entryhi); >> 268 clear_root_gid(); >> 269 mtc0_tlbw_hazard(); >> 270 >> 271 htw_start(); >> 272 local_irq_restore(flags); >> 273 >> 274 /* >> 275 * We don't want to get reserved instruction exceptions for missing tlb >> 276 * entries. >> 277 */ >> 278 if (cpu_has_vtag_icache) >> 279 flush_icache_all(); >> 280 >> 281 if (idx > 0) >> 282 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n", >> 283 __func__, (va & VPN2_MASK) | >> 284 kvm_mips_get_root_asid(vcpu), idx); >> 285 >> 286 return 0; >> 287 } >> 288 EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv); >> 289 >> 290 /** >> 291 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping. >> 292 * @vcpu: KVM VCPU pointer. >> 293 * @gpa: Guest virtual address in a TLB mapped guest segment. >> 294 * @gpa: Ponter to output guest physical address it maps to. >> 295 * >> 296 * Converts a guest virtual address in a guest TLB mapped segment to a guest >> 297 * physical address, by probing the guest TLB. >> 298 * >> 299 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been >> 300 * written. >> 301 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not >> 302 * have been written. >> 303 */ >> 304 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, >> 305 unsigned long *gpa) >> 306 { >> 307 unsigned long o_entryhi, o_entrylo[2], o_pagemask; >> 308 unsigned int o_index; >> 309 unsigned long entrylo[2], pagemask, pagemaskbit, pa; >> 310 unsigned long flags; >> 311 int index; >> 312 >> 313 /* Probe the guest TLB for a mapping */ >> 314 local_irq_save(flags); >> 315 /* Set root GuestID for root probe of guest TLB entry */ >> 316 htw_stop(); >> 317 set_root_gid_to_guest_gid(); >> 318 >> 319 o_entryhi = read_gc0_entryhi(); >> 320 o_index = read_gc0_index(); >> 321 >> 322 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); >> 323 mtc0_tlbw_hazard(); >> 324 guest_tlb_probe(); >> 325 tlb_probe_hazard(); >> 326 >> 327 index = read_gc0_index(); >> 328 if (index < 0) { >> 329 /* No match, fail */ >> 330 write_gc0_entryhi(o_entryhi); >> 331 write_gc0_index(o_index); >> 332 >> 333 clear_root_gid(); >> 334 htw_start(); >> 335 local_irq_restore(flags); >> 336 return -EFAULT; >> 337 } >> 338 >> 339 /* Match! read the TLB entry */ >> 340 o_entrylo[0] = read_gc0_entrylo0(); >> 341 o_entrylo[1] = read_gc0_entrylo1(); >> 342 o_pagemask = read_gc0_pagemask(); >> 343 >> 344 mtc0_tlbr_hazard(); >> 345 guest_tlb_read(); >> 346 tlb_read_hazard(); >> 347 >> 348 entrylo[0] = read_gc0_entrylo0(); >> 349 entrylo[1] = read_gc0_entrylo1(); >> 350 pagemask = ~read_gc0_pagemask() & ~0x1fffl; >> 351 >> 352 write_gc0_entryhi(o_entryhi); >> 353 write_gc0_index(o_index); >> 354 write_gc0_entrylo0(o_entrylo[0]); >> 355 write_gc0_entrylo1(o_entrylo[1]); >> 356 write_gc0_pagemask(o_pagemask); >> 357 >> 358 clear_root_gid(); >> 359 htw_start(); >> 360 local_irq_restore(flags); >> 361 >> 362 /* Select one of the EntryLo values and interpret the GPA */ >> 363 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1; >> 364 pa = entrylo[!!(gva & pagemaskbit)]; >> 365 >> 366 /* >> 367 * TLB entry may have become invalid since TLB probe if physical FTLB >> 368 * entries are shared between threads (e.g. I6400). >> 369 */ >> 370 if (!(pa & ENTRYLO_V)) >> 371 return -EFAULT; >> 372 >> 373 /* >> 374 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is >> 375 * split with XI/RI in the middle. >> 376 */ >> 377 pa = (pa << 6) & ~0xfffl; >> 378 pa |= gva & ~(pagemask | pagemaskbit); >> 379 >> 380 *gpa = pa; >> 381 return 0; >> 382 } >> 383 EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup); >> 384 >> 385 /** >> 386 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for >> 387 * guests. >> 388 * >> 389 * Invalidate all entries in root tlb which are GPA mappings. >> 390 */ >> 391 void kvm_vz_local_flush_roottlb_all_guests(void) >> 392 { >> 393 unsigned long flags; >> 394 unsigned long old_entryhi, old_pagemask, old_guestctl1; >> 395 int entry; >> 396 >> 397 if (WARN_ON(!cpu_has_guestid)) >> 398 return; >> 399 >> 400 local_irq_save(flags); >> 401 htw_stop(); >> 402 >> 403 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */ >> 404 old_entryhi = read_c0_entryhi(); >> 405 old_pagemask = read_c0_pagemask(); >> 406 old_guestctl1 = read_c0_guestctl1(); >> 407 >> 408 /* >> 409 * Invalidate guest entries in root TLB while leaving root entries >> 410 * intact when possible. >> 411 */ >> 412 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { >> 413 write_c0_index(entry); >> 414 mtc0_tlbw_hazard(); >> 415 tlb_read(); >> 416 tlb_read_hazard(); >> 417 >> 418 /* Don't invalidate non-guest (RVA) mappings in the root TLB */ >> 419 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID)) >> 420 continue; >> 421 >> 422 /* Make sure all entries differ. */ >> 423 write_c0_entryhi(UNIQUE_ENTRYHI(entry)); >> 424 write_c0_entrylo0(0); >> 425 write_c0_entrylo1(0); >> 426 write_c0_guestctl1(0); >> 427 mtc0_tlbw_hazard(); >> 428 tlb_write_indexed(); >> 429 } >> 430 >> 431 write_c0_entryhi(old_entryhi); >> 432 write_c0_pagemask(old_pagemask); >> 433 write_c0_guestctl1(old_guestctl1); >> 434 tlbw_use_hazard(); >> 435 >> 436 htw_start(); >> 437 local_irq_restore(flags); >> 438 } >> 439 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests); >> 440 >> 441 /** >> 442 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries. >> 443 * >> 444 * Invalidate all entries in guest tlb irrespective of guestid. >> 445 */ >> 446 void kvm_vz_local_flush_guesttlb_all(void) 16 { 447 { 17 unsigned long flags; 448 unsigned long flags; >> 449 unsigned long old_index; >> 450 unsigned long old_entryhi; >> 451 unsigned long old_entrylo[2]; >> 452 unsigned long old_pagemask; >> 453 int entry; >> 454 u64 cvmmemctl2 = 0; 18 455 19 local_irq_save(flags); 456 local_irq_save(flags); 20 invtlb_all(INVTLB_ALLGID, 0, 0); !! 457 >> 458 /* Preserve all clobbered guest registers */ >> 459 old_index = read_gc0_index(); >> 460 old_entryhi = read_gc0_entryhi(); >> 461 old_entrylo[0] = read_gc0_entrylo0(); >> 462 old_entrylo[1] = read_gc0_entrylo1(); >> 463 old_pagemask = read_gc0_pagemask(); >> 464 >> 465 switch (current_cpu_type()) { >> 466 case CPU_CAVIUM_OCTEON3: >> 467 /* Inhibit machine check due to multiple matching TLB entries */ >> 468 cvmmemctl2 = read_c0_cvmmemctl2(); >> 469 cvmmemctl2 |= CVMMEMCTL2_INHIBITTS; >> 470 write_c0_cvmmemctl2(cvmmemctl2); >> 471 break; >> 472 }; >> 473 >> 474 /* Invalidate guest entries in guest TLB */ >> 475 write_gc0_entrylo0(0); >> 476 write_gc0_entrylo1(0); >> 477 write_gc0_pagemask(0); >> 478 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) { >> 479 /* Make sure all entries differ. */ >> 480 write_gc0_index(entry); >> 481 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry)); >> 482 mtc0_tlbw_hazard(); >> 483 guest_tlb_write_indexed(); >> 484 } >> 485 >> 486 if (cvmmemctl2) { >> 487 cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS; >> 488 write_c0_cvmmemctl2(cvmmemctl2); >> 489 }; >> 490 >> 491 write_gc0_index(old_index); >> 492 write_gc0_entryhi(old_entryhi); >> 493 write_gc0_entrylo0(old_entrylo[0]); >> 494 write_gc0_entrylo1(old_entrylo[1]); >> 495 write_gc0_pagemask(old_pagemask); >> 496 tlbw_use_hazard(); >> 497 21 local_irq_restore(flags); 498 local_irq_restore(flags); 22 } 499 } >> 500 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all); >> 501 >> 502 /** >> 503 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries. >> 504 * @buf: Buffer to write TLB entries into. >> 505 * @index: Start index. >> 506 * @count: Number of entries to save. >> 507 * >> 508 * Save a range of guest TLB entries. The caller must ensure interrupts are >> 509 * disabled. >> 510 */ >> 511 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, >> 512 unsigned int count) >> 513 { >> 514 unsigned int end = index + count; >> 515 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; >> 516 unsigned int guestctl1 = 0; >> 517 int old_index, i; >> 518 >> 519 /* Save registers we're about to clobber */ >> 520 old_index = read_gc0_index(); >> 521 old_entryhi = read_gc0_entryhi(); >> 522 old_entrylo0 = read_gc0_entrylo0(); >> 523 old_entrylo1 = read_gc0_entrylo1(); >> 524 old_pagemask = read_gc0_pagemask(); >> 525 >> 526 /* Set root GuestID for root probe */ >> 527 htw_stop(); >> 528 set_root_gid_to_guest_gid(); >> 529 if (cpu_has_guestid) >> 530 guestctl1 = read_c0_guestctl1(); >> 531 >> 532 /* Read each entry from guest TLB */ >> 533 for (i = index; i < end; ++i, ++buf) { >> 534 write_gc0_index(i); >> 535 >> 536 mtc0_tlbr_hazard(); >> 537 guest_tlb_read(); >> 538 tlb_read_hazard(); >> 539 >> 540 if (cpu_has_guestid && >> 541 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) { >> 542 /* Entry invalid or belongs to another guest */ >> 543 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i); >> 544 buf->tlb_lo[0] = 0; >> 545 buf->tlb_lo[1] = 0; >> 546 buf->tlb_mask = 0; >> 547 } else { >> 548 /* Entry belongs to the right guest */ >> 549 buf->tlb_hi = read_gc0_entryhi(); >> 550 buf->tlb_lo[0] = read_gc0_entrylo0(); >> 551 buf->tlb_lo[1] = read_gc0_entrylo1(); >> 552 buf->tlb_mask = read_gc0_pagemask(); >> 553 } >> 554 } >> 555 >> 556 /* Clear root GuestID again */ >> 557 clear_root_gid(); >> 558 htw_start(); >> 559 >> 560 /* Restore clobbered registers */ >> 561 write_gc0_index(old_index); >> 562 write_gc0_entryhi(old_entryhi); >> 563 write_gc0_entrylo0(old_entrylo0); >> 564 write_gc0_entrylo1(old_entrylo1); >> 565 write_gc0_pagemask(old_pagemask); >> 566 >> 567 tlbw_use_hazard(); >> 568 } >> 569 EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb); >> 570 >> 571 /** >> 572 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries. >> 573 * @buf: Buffer to read TLB entries from. >> 574 * @index: Start index. >> 575 * @count: Number of entries to load. >> 576 * >> 577 * Load a range of guest TLB entries. The caller must ensure interrupts are >> 578 * disabled. >> 579 */ >> 580 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, >> 581 unsigned int count) >> 582 { >> 583 unsigned int end = index + count; >> 584 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; >> 585 int old_index, i; >> 586 >> 587 /* Save registers we're about to clobber */ >> 588 old_index = read_gc0_index(); >> 589 old_entryhi = read_gc0_entryhi(); >> 590 old_entrylo0 = read_gc0_entrylo0(); >> 591 old_entrylo1 = read_gc0_entrylo1(); >> 592 old_pagemask = read_gc0_pagemask(); >> 593 >> 594 /* Set root GuestID for root probe */ >> 595 htw_stop(); >> 596 set_root_gid_to_guest_gid(); >> 597 >> 598 /* Write each entry to guest TLB */ >> 599 for (i = index; i < end; ++i, ++buf) { >> 600 write_gc0_index(i); >> 601 write_gc0_entryhi(buf->tlb_hi); >> 602 write_gc0_entrylo0(buf->tlb_lo[0]); >> 603 write_gc0_entrylo1(buf->tlb_lo[1]); >> 604 write_gc0_pagemask(buf->tlb_mask); >> 605 >> 606 mtc0_tlbw_hazard(); >> 607 guest_tlb_write_indexed(); >> 608 } >> 609 >> 610 /* Clear root GuestID again */ >> 611 clear_root_gid(); >> 612 htw_start(); >> 613 >> 614 /* Restore clobbered registers */ >> 615 write_gc0_index(old_index); >> 616 write_gc0_entryhi(old_entryhi); >> 617 write_gc0_entrylo0(old_entrylo0); >> 618 write_gc0_entrylo1(old_entrylo1); >> 619 write_gc0_pagemask(old_pagemask); >> 620 >> 621 tlbw_use_hazard(); >> 622 } >> 623 EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb); 23 624 24 void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, !! 625 #endif >> 626 >> 627 /** >> 628 * kvm_mips_suspend_mm() - Suspend the active mm. >> 629 * @cpu The CPU we're running on. >> 630 * >> 631 * Suspend the active_mm, ready for a switch to a KVM guest virtual address >> 632 * space. This is left active for the duration of guest context, including time >> 633 * with interrupts enabled, so we need to be careful not to confuse e.g. cache >> 634 * management IPIs. >> 635 * >> 636 * kvm_mips_resume_mm() should be called before context switching to a different >> 637 * process so we don't need to worry about reference counting. >> 638 * >> 639 * This needs to be in static kernel code to avoid exporting init_mm. >> 640 */ >> 641 void kvm_mips_suspend_mm(int cpu) >> 642 { >> 643 cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); >> 644 current->active_mm = &init_mm; >> 645 } >> 646 EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); >> 647 >> 648 /** >> 649 * kvm_mips_resume_mm() - Resume the current process mm. >> 650 * @cpu The CPU we're running on. >> 651 * >> 652 * Resume the mm of the current process, after a switch back from a KVM guest >> 653 * virtual address space (see kvm_mips_suspend_mm()). >> 654 */ >> 655 void kvm_mips_resume_mm(int cpu) 25 { 656 { 26 lockdep_assert_irqs_disabled(); !! 657 cpumask_set_cpu(cpu, mm_cpumask(current->mm)); 27 gpa &= (PAGE_MASK << 1); !! 658 current->active_mm = current->mm; 28 invtlb(INVTLB_GID_ADDR, read_csr_gstat << 29 } 659 } >> 660 EXPORT_SYMBOL_GPL(kvm_mips_resume_mm); 30 661
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.