~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/loongarch/kvm/vcpu.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
  4  */
  5 
  6 #include <linux/kvm_host.h>
  7 #include <linux/entry-kvm.h>
  8 #include <asm/fpu.h>
  9 #include <asm/loongarch.h>
 10 #include <asm/setup.h>
 11 #include <asm/time.h>
 12 
 13 #define CREATE_TRACE_POINTS
 14 #include "trace.h"
 15 
 16 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
 17         KVM_GENERIC_VCPU_STATS(),
 18         STATS_DESC_COUNTER(VCPU, int_exits),
 19         STATS_DESC_COUNTER(VCPU, idle_exits),
 20         STATS_DESC_COUNTER(VCPU, cpucfg_exits),
 21         STATS_DESC_COUNTER(VCPU, signal_exits),
 22         STATS_DESC_COUNTER(VCPU, hypercall_exits)
 23 };
 24 
 25 const struct kvm_stats_header kvm_vcpu_stats_header = {
 26         .name_size = KVM_STATS_NAME_SIZE,
 27         .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
 28         .id_offset = sizeof(struct kvm_stats_header),
 29         .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
 30         .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
 31                        sizeof(kvm_vcpu_stats_desc),
 32 };
 33 
 34 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
 35 {
 36         u32 version;
 37         u64 steal;
 38         gpa_t gpa;
 39         struct kvm_memslots *slots;
 40         struct kvm_steal_time __user *st;
 41         struct gfn_to_hva_cache *ghc;
 42 
 43         ghc = &vcpu->arch.st.cache;
 44         gpa = vcpu->arch.st.guest_addr;
 45         if (!(gpa & KVM_STEAL_PHYS_VALID))
 46                 return;
 47 
 48         gpa &= KVM_STEAL_PHYS_MASK;
 49         slots = kvm_memslots(vcpu->kvm);
 50         if (slots->generation != ghc->generation || gpa != ghc->gpa) {
 51                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
 52                         ghc->gpa = INVALID_GPA;
 53                         return;
 54                 }
 55         }
 56 
 57         st = (struct kvm_steal_time __user *)ghc->hva;
 58         unsafe_get_user(version, &st->version, out);
 59         if (version & 1)
 60                 version += 1; /* first time write, random junk */
 61 
 62         version += 1;
 63         unsafe_put_user(version, &st->version, out);
 64         smp_wmb();
 65 
 66         unsafe_get_user(steal, &st->steal, out);
 67         steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
 68         vcpu->arch.st.last_steal = current->sched_info.run_delay;
 69         unsafe_put_user(steal, &st->steal, out);
 70 
 71         smp_wmb();
 72         version += 1;
 73         unsafe_put_user(version, &st->version, out);
 74 out:
 75         mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
 76 }
 77 
 78 /*
 79  * kvm_check_requests - check and handle pending vCPU requests
 80  *
 81  * Return: RESUME_GUEST if we should enter the guest
 82  *         RESUME_HOST  if we should exit to userspace
 83  */
 84 static int kvm_check_requests(struct kvm_vcpu *vcpu)
 85 {
 86         if (!kvm_request_pending(vcpu))
 87                 return RESUME_GUEST;
 88 
 89         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
 90                 vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
 91 
 92         if (kvm_dirty_ring_check_request(vcpu))
 93                 return RESUME_HOST;
 94 
 95         if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
 96                 kvm_update_stolen_time(vcpu);
 97 
 98         return RESUME_GUEST;
 99 }
100 
101 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
102 {
103         lockdep_assert_irqs_disabled();
104         if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
105                 if (vcpu->arch.flush_gpa != INVALID_GPA) {
106                         kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
107                         vcpu->arch.flush_gpa = INVALID_GPA;
108                 }
109 }
110 
111 /*
112  * Check and handle pending signal and vCPU requests etc
113  * Run with irq enabled and preempt enabled
114  *
115  * Return: RESUME_GUEST if we should enter the guest
116  *         RESUME_HOST  if we should exit to userspace
117  *         < 0 if we should exit to userspace, where the return value
118  *         indicates an error
119  */
120 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
121 {
122         int ret;
123 
124         /*
125          * Check conditions before entering the guest
126          */
127         ret = xfer_to_guest_mode_handle_work(vcpu);
128         if (ret < 0)
129                 return ret;
130 
131         ret = kvm_check_requests(vcpu);
132 
133         return ret;
134 }
135 
136 /*
137  * Called with irq enabled
138  *
139  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
140  *         Others if we should exit to userspace
141  */
142 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
143 {
144         int ret;
145 
146         do {
147                 ret = kvm_enter_guest_check(vcpu);
148                 if (ret != RESUME_GUEST)
149                         break;
150 
151                 /*
152                  * Handle vcpu timer, interrupts, check requests and
153                  * check vmid before vcpu enter guest
154                  */
155                 local_irq_disable();
156                 kvm_deliver_intr(vcpu);
157                 kvm_deliver_exception(vcpu);
158                 /* Make sure the vcpu mode has been written */
159                 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
160                 kvm_check_vpid(vcpu);
161 
162                 /*
163                  * Called after function kvm_check_vpid()
164                  * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
165                  * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
166                  */
167                 kvm_late_check_requests(vcpu);
168                 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
169                 /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
170                 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
171 
172                 if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
173                         /* make sure the vcpu mode has been written */
174                         smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
175                         local_irq_enable();
176                         ret = -EAGAIN;
177                 }
178         } while (ret != RESUME_GUEST);
179 
180         return ret;
181 }
182 
183 /*
184  * Return 1 for resume guest and "<= 0" for resume host.
185  */
186 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
187 {
188         int ret = RESUME_GUEST;
189         unsigned long estat = vcpu->arch.host_estat;
190         u32 intr = estat & 0x1fff; /* Ignore NMI */
191         u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
192 
193         vcpu->mode = OUTSIDE_GUEST_MODE;
194 
195         /* Set a default exit reason */
196         run->exit_reason = KVM_EXIT_UNKNOWN;
197 
198         guest_timing_exit_irqoff();
199         guest_state_exit_irqoff();
200         local_irq_enable();
201 
202         trace_kvm_exit(vcpu, ecode);
203         if (ecode) {
204                 ret = kvm_handle_fault(vcpu, ecode);
205         } else {
206                 WARN(!intr, "vm exiting with suspicious irq\n");
207                 ++vcpu->stat.int_exits;
208         }
209 
210         if (ret == RESUME_GUEST)
211                 ret = kvm_pre_enter_guest(vcpu);
212 
213         if (ret != RESUME_GUEST) {
214                 local_irq_disable();
215                 return ret;
216         }
217 
218         guest_timing_enter_irqoff();
219         guest_state_enter_irqoff();
220         trace_kvm_reenter(vcpu);
221 
222         return RESUME_GUEST;
223 }
224 
225 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
226 {
227         return !!(vcpu->arch.irq_pending) &&
228                 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
229 }
230 
231 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
232 {
233         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
234 }
235 
236 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
237 {
238         return false;
239 }
240 
241 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
242 {
243         return VM_FAULT_SIGBUS;
244 }
245 
246 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
247                                   struct kvm_translation *tr)
248 {
249         return -EINVAL;
250 }
251 
252 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
253 {
254         int ret;
255 
256         /* Protect from TOD sync and vcpu_load/put() */
257         preempt_disable();
258         ret = kvm_pending_timer(vcpu) ||
259                 kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
260         preempt_enable();
261 
262         return ret;
263 }
264 
265 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
266 {
267         int i;
268 
269         kvm_debug("vCPU Register Dump:\n");
270         kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
271         kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
272 
273         for (i = 0; i < 32; i += 4) {
274                 kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
275                        vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
276                        vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
277         }
278 
279         kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
280                   kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
281                   kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
282 
283         kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
284 
285         return 0;
286 }
287 
288 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
289                                 struct kvm_mp_state *mp_state)
290 {
291         *mp_state = vcpu->arch.mp_state;
292 
293         return 0;
294 }
295 
296 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
297                                 struct kvm_mp_state *mp_state)
298 {
299         int ret = 0;
300 
301         switch (mp_state->mp_state) {
302         case KVM_MP_STATE_RUNNABLE:
303                 vcpu->arch.mp_state = *mp_state;
304                 break;
305         default:
306                 ret = -EINVAL;
307         }
308 
309         return ret;
310 }
311 
312 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
313                                         struct kvm_guest_debug *dbg)
314 {
315         if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
316                 return -EINVAL;
317 
318         if (dbg->control & KVM_GUESTDBG_ENABLE)
319                 vcpu->guest_debug = dbg->control;
320         else
321                 vcpu->guest_debug = 0;
322 
323         return 0;
324 }
325 
326 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
327 {
328         int cpuid;
329         struct kvm_phyid_map *map;
330         struct loongarch_csrs *csr = vcpu->arch.csr;
331 
332         if (val >= KVM_MAX_PHYID)
333                 return -EINVAL;
334 
335         map = vcpu->kvm->arch.phyid_map;
336         cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
337 
338         spin_lock(&vcpu->kvm->arch.phyid_map_lock);
339         if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
340                 /* Discard duplicated CPUID set operation */
341                 if (cpuid == val) {
342                         spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
343                         return 0;
344                 }
345 
346                 /*
347                  * CPUID is already set before
348                  * Forbid changing to a different CPUID at runtime
349                  */
350                 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
351                 return -EINVAL;
352         }
353 
354         if (map->phys_map[val].enabled) {
355                 /* Discard duplicated CPUID set operation */
356                 if (vcpu == map->phys_map[val].vcpu) {
357                         spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
358                         return 0;
359                 }
360 
361                 /*
362                  * New CPUID is already set with other vcpu
363                  * Forbid sharing the same CPUID between different vcpus
364                  */
365                 spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
366                 return -EINVAL;
367         }
368 
369         kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
370         map->phys_map[val].enabled      = true;
371         map->phys_map[val].vcpu         = vcpu;
372         spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
373 
374         return 0;
375 }
376 
377 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
378 {
379         int cpuid;
380         struct kvm_phyid_map *map;
381         struct loongarch_csrs *csr = vcpu->arch.csr;
382 
383         map = vcpu->kvm->arch.phyid_map;
384         cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
385 
386         if (cpuid >= KVM_MAX_PHYID)
387                 return;
388 
389         spin_lock(&vcpu->kvm->arch.phyid_map_lock);
390         if (map->phys_map[cpuid].enabled) {
391                 map->phys_map[cpuid].vcpu = NULL;
392                 map->phys_map[cpuid].enabled = false;
393                 kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
394         }
395         spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
396 }
397 
398 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
399 {
400         struct kvm_phyid_map *map;
401 
402         if (cpuid >= KVM_MAX_PHYID)
403                 return NULL;
404 
405         map = kvm->arch.phyid_map;
406         if (!map->phys_map[cpuid].enabled)
407                 return NULL;
408 
409         return map->phys_map[cpuid].vcpu;
410 }
411 
412 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
413 {
414         unsigned long gintc;
415         struct loongarch_csrs *csr = vcpu->arch.csr;
416 
417         if (get_gcsr_flag(id) & INVALID_GCSR)
418                 return -EINVAL;
419 
420         if (id == LOONGARCH_CSR_ESTAT) {
421                 preempt_disable();
422                 vcpu_load(vcpu);
423                 /*
424                  * Sync pending interrupts into ESTAT so that interrupt
425                  * remains during VM migration stage
426                  */
427                 kvm_deliver_intr(vcpu);
428                 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
429                 vcpu_put(vcpu);
430                 preempt_enable();
431 
432                 /* ESTAT IP0~IP7 get from GINTC */
433                 gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
434                 *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
435                 return 0;
436         }
437 
438         /*
439          * Get software CSR state since software state is consistent
440          * with hardware for synchronous ioctl
441          */
442         *val = kvm_read_sw_gcsr(csr, id);
443 
444         return 0;
445 }
446 
447 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
448 {
449         int ret = 0, gintc;
450         struct loongarch_csrs *csr = vcpu->arch.csr;
451 
452         if (get_gcsr_flag(id) & INVALID_GCSR)
453                 return -EINVAL;
454 
455         if (id == LOONGARCH_CSR_CPUID)
456                 return kvm_set_cpuid(vcpu, val);
457 
458         if (id == LOONGARCH_CSR_ESTAT) {
459                 /* ESTAT IP0~IP7 inject through GINTC */
460                 gintc = (val >> 2) & 0xff;
461                 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
462 
463                 gintc = val & ~(0xffUL << 2);
464                 kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
465 
466                 return ret;
467         }
468 
469         kvm_write_sw_gcsr(csr, id, val);
470 
471         return ret;
472 }
473 
474 static int _kvm_get_cpucfg_mask(int id, u64 *v)
475 {
476         if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
477                 return -EINVAL;
478 
479         switch (id) {
480         case LOONGARCH_CPUCFG0:
481                 *v = GENMASK(31, 0);
482                 return 0;
483         case LOONGARCH_CPUCFG1:
484                 /* CPUCFG1_MSGINT is not supported by KVM */
485                 *v = GENMASK(25, 0);
486                 return 0;
487         case LOONGARCH_CPUCFG2:
488                 /* CPUCFG2 features unconditionally supported by KVM */
489                 *v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
490                      CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
491                      CPUCFG2_LSPW | CPUCFG2_LAM;
492                 /*
493                  * For the ISA extensions listed below, if one is supported
494                  * by the host, then it is also supported by KVM.
495                  */
496                 if (cpu_has_lsx)
497                         *v |= CPUCFG2_LSX;
498                 if (cpu_has_lasx)
499                         *v |= CPUCFG2_LASX;
500 
501                 return 0;
502         case LOONGARCH_CPUCFG3:
503                 *v = GENMASK(16, 0);
504                 return 0;
505         case LOONGARCH_CPUCFG4:
506         case LOONGARCH_CPUCFG5:
507                 *v = GENMASK(31, 0);
508                 return 0;
509         case LOONGARCH_CPUCFG16:
510                 *v = GENMASK(16, 0);
511                 return 0;
512         case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
513                 *v = GENMASK(30, 0);
514                 return 0;
515         default:
516                 /*
517                  * CPUCFG bits should be zero if reserved by HW or not
518                  * supported by KVM.
519                  */
520                 *v = 0;
521                 return 0;
522         }
523 }
524 
525 static int kvm_check_cpucfg(int id, u64 val)
526 {
527         int ret;
528         u64 mask = 0;
529 
530         ret = _kvm_get_cpucfg_mask(id, &mask);
531         if (ret)
532                 return ret;
533 
534         if (val & ~mask)
535                 /* Unsupported features and/or the higher 32 bits should not be set */
536                 return -EINVAL;
537 
538         switch (id) {
539         case LOONGARCH_CPUCFG2:
540                 if (!(val & CPUCFG2_LLFTP))
541                         /* Guests must have a constant timer */
542                         return -EINVAL;
543                 if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
544                         /* Single and double float point must both be set when FP is enabled */
545                         return -EINVAL;
546                 if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
547                         /* LSX architecturally implies FP but val does not satisfy that */
548                         return -EINVAL;
549                 if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
550                         /* LASX architecturally implies LSX and FP but val does not satisfy that */
551                         return -EINVAL;
552                 return 0;
553         default:
554                 /*
555                  * Values for the other CPUCFG IDs are not being further validated
556                  * besides the mask check above.
557                  */
558                 return 0;
559         }
560 }
561 
562 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
563                 const struct kvm_one_reg *reg, u64 *v)
564 {
565         int id, ret = 0;
566         u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
567 
568         switch (type) {
569         case KVM_REG_LOONGARCH_CSR:
570                 id = KVM_GET_IOC_CSR_IDX(reg->id);
571                 ret = _kvm_getcsr(vcpu, id, v);
572                 break;
573         case KVM_REG_LOONGARCH_CPUCFG:
574                 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
575                 if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
576                         *v = vcpu->arch.cpucfg[id];
577                 else
578                         ret = -EINVAL;
579                 break;
580         case KVM_REG_LOONGARCH_KVM:
581                 switch (reg->id) {
582                 case KVM_REG_LOONGARCH_COUNTER:
583                         *v = drdtime() + vcpu->kvm->arch.time_offset;
584                         break;
585                 case KVM_REG_LOONGARCH_DEBUG_INST:
586                         *v = INSN_HVCL | KVM_HCALL_SWDBG;
587                         break;
588                 default:
589                         ret = -EINVAL;
590                         break;
591                 }
592                 break;
593         default:
594                 ret = -EINVAL;
595                 break;
596         }
597 
598         return ret;
599 }
600 
601 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
602 {
603         int ret = 0;
604         u64 v, size = reg->id & KVM_REG_SIZE_MASK;
605 
606         switch (size) {
607         case KVM_REG_SIZE_U64:
608                 ret = kvm_get_one_reg(vcpu, reg, &v);
609                 if (ret)
610                         return ret;
611                 ret = put_user(v, (u64 __user *)(long)reg->addr);
612                 break;
613         default:
614                 ret = -EINVAL;
615                 break;
616         }
617 
618         return ret;
619 }
620 
621 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
622                         const struct kvm_one_reg *reg, u64 v)
623 {
624         int id, ret = 0;
625         u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
626 
627         switch (type) {
628         case KVM_REG_LOONGARCH_CSR:
629                 id = KVM_GET_IOC_CSR_IDX(reg->id);
630                 ret = _kvm_setcsr(vcpu, id, v);
631                 break;
632         case KVM_REG_LOONGARCH_CPUCFG:
633                 id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
634                 ret = kvm_check_cpucfg(id, v);
635                 if (ret)
636                         break;
637                 vcpu->arch.cpucfg[id] = (u32)v;
638                 break;
639         case KVM_REG_LOONGARCH_KVM:
640                 switch (reg->id) {
641                 case KVM_REG_LOONGARCH_COUNTER:
642                         /*
643                          * gftoffset is relative with board, not vcpu
644                          * only set for the first time for smp system
645                          */
646                         if (vcpu->vcpu_id == 0)
647                                 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
648                         break;
649                 case KVM_REG_LOONGARCH_VCPU_RESET:
650                         vcpu->arch.st.guest_addr = 0;
651                         memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
652                         memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
653                         break;
654                 default:
655                         ret = -EINVAL;
656                         break;
657                 }
658                 break;
659         default:
660                 ret = -EINVAL;
661                 break;
662         }
663 
664         return ret;
665 }
666 
667 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
668 {
669         int ret = 0;
670         u64 v, size = reg->id & KVM_REG_SIZE_MASK;
671 
672         switch (size) {
673         case KVM_REG_SIZE_U64:
674                 ret = get_user(v, (u64 __user *)(long)reg->addr);
675                 if (ret)
676                         return ret;
677                 break;
678         default:
679                 return -EINVAL;
680         }
681 
682         return kvm_set_one_reg(vcpu, reg, v);
683 }
684 
685 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
686 {
687         return -ENOIOCTLCMD;
688 }
689 
690 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
691 {
692         return -ENOIOCTLCMD;
693 }
694 
695 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
696 {
697         int i;
698 
699         for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
700                 regs->gpr[i] = vcpu->arch.gprs[i];
701 
702         regs->pc = vcpu->arch.pc;
703 
704         return 0;
705 }
706 
707 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
708 {
709         int i;
710 
711         for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
712                 vcpu->arch.gprs[i] = regs->gpr[i];
713 
714         vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
715         vcpu->arch.pc = regs->pc;
716 
717         return 0;
718 }
719 
720 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
721                                      struct kvm_enable_cap *cap)
722 {
723         /* FPU is enabled by default, will support LSX/LASX later. */
724         return -EINVAL;
725 }
726 
727 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
728                                          struct kvm_device_attr *attr)
729 {
730         switch (attr->attr) {
731         case 2:
732                 return 0;
733         default:
734                 return -ENXIO;
735         }
736 
737         return -ENXIO;
738 }
739 
740 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
741                                          struct kvm_device_attr *attr)
742 {
743         if (!kvm_pvtime_supported() ||
744                         attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
745                 return -ENXIO;
746 
747         return 0;
748 }
749 
750 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
751                                        struct kvm_device_attr *attr)
752 {
753         int ret = -ENXIO;
754 
755         switch (attr->group) {
756         case KVM_LOONGARCH_VCPU_CPUCFG:
757                 ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
758                 break;
759         case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
760                 ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
761                 break;
762         default:
763                 break;
764         }
765 
766         return ret;
767 }
768 
769 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
770                                          struct kvm_device_attr *attr)
771 {
772         int ret = 0;
773         uint64_t val;
774         uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
775 
776         ret = _kvm_get_cpucfg_mask(attr->attr, &val);
777         if (ret)
778                 return ret;
779 
780         put_user(val, uaddr);
781 
782         return ret;
783 }
784 
785 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
786                                          struct kvm_device_attr *attr)
787 {
788         u64 gpa;
789         u64 __user *user = (u64 __user *)attr->addr;
790 
791         if (!kvm_pvtime_supported() ||
792                         attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
793                 return -ENXIO;
794 
795         gpa = vcpu->arch.st.guest_addr;
796         if (put_user(gpa, user))
797                 return -EFAULT;
798 
799         return 0;
800 }
801 
802 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
803                                        struct kvm_device_attr *attr)
804 {
805         int ret = -ENXIO;
806 
807         switch (attr->group) {
808         case KVM_LOONGARCH_VCPU_CPUCFG:
809                 ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
810                 break;
811         case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
812                 ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
813                 break;
814         default:
815                 break;
816         }
817 
818         return ret;
819 }
820 
821 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
822                                          struct kvm_device_attr *attr)
823 {
824         return -ENXIO;
825 }
826 
827 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
828                                          struct kvm_device_attr *attr)
829 {
830         int idx, ret = 0;
831         u64 gpa, __user *user = (u64 __user *)attr->addr;
832         struct kvm *kvm = vcpu->kvm;
833 
834         if (!kvm_pvtime_supported() ||
835                         attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
836                 return -ENXIO;
837 
838         if (get_user(gpa, user))
839                 return -EFAULT;
840 
841         if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
842                 return -EINVAL;
843 
844         if (!(gpa & KVM_STEAL_PHYS_VALID)) {
845                 vcpu->arch.st.guest_addr = gpa;
846                 return 0;
847         }
848 
849         /* Check the address is in a valid memslot */
850         idx = srcu_read_lock(&kvm->srcu);
851         if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
852                 ret = -EINVAL;
853         srcu_read_unlock(&kvm->srcu, idx);
854 
855         if (!ret) {
856                 vcpu->arch.st.guest_addr = gpa;
857                 vcpu->arch.st.last_steal = current->sched_info.run_delay;
858                 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
859         }
860 
861         return ret;
862 }
863 
864 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
865                                        struct kvm_device_attr *attr)
866 {
867         int ret = -ENXIO;
868 
869         switch (attr->group) {
870         case KVM_LOONGARCH_VCPU_CPUCFG:
871                 ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
872                 break;
873         case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
874                 ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
875                 break;
876         default:
877                 break;
878         }
879 
880         return ret;
881 }
882 
883 long kvm_arch_vcpu_ioctl(struct file *filp,
884                          unsigned int ioctl, unsigned long arg)
885 {
886         long r;
887         struct kvm_device_attr attr;
888         void __user *argp = (void __user *)arg;
889         struct kvm_vcpu *vcpu = filp->private_data;
890 
891         /*
892          * Only software CSR should be modified
893          *
894          * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
895          * should be used. Since CSR registers owns by this vcpu, if switch
896          * to other vcpus, other vcpus need reload CSR registers.
897          *
898          * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
899          * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
900          * aux_inuse flag and reload CSR registers form software.
901          */
902 
903         switch (ioctl) {
904         case KVM_SET_ONE_REG:
905         case KVM_GET_ONE_REG: {
906                 struct kvm_one_reg reg;
907 
908                 r = -EFAULT;
909                 if (copy_from_user(&reg, argp, sizeof(reg)))
910                         break;
911                 if (ioctl == KVM_SET_ONE_REG) {
912                         r = kvm_set_reg(vcpu, &reg);
913                         vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
914                 } else
915                         r = kvm_get_reg(vcpu, &reg);
916                 break;
917         }
918         case KVM_ENABLE_CAP: {
919                 struct kvm_enable_cap cap;
920 
921                 r = -EFAULT;
922                 if (copy_from_user(&cap, argp, sizeof(cap)))
923                         break;
924                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
925                 break;
926         }
927         case KVM_HAS_DEVICE_ATTR: {
928                 r = -EFAULT;
929                 if (copy_from_user(&attr, argp, sizeof(attr)))
930                         break;
931                 r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
932                 break;
933         }
934         case KVM_GET_DEVICE_ATTR: {
935                 r = -EFAULT;
936                 if (copy_from_user(&attr, argp, sizeof(attr)))
937                         break;
938                 r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
939                 break;
940         }
941         case KVM_SET_DEVICE_ATTR: {
942                 r = -EFAULT;
943                 if (copy_from_user(&attr, argp, sizeof(attr)))
944                         break;
945                 r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
946                 break;
947         }
948         default:
949                 r = -ENOIOCTLCMD;
950                 break;
951         }
952 
953         return r;
954 }
955 
956 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
957 {
958         int i = 0;
959 
960         fpu->fcc = vcpu->arch.fpu.fcc;
961         fpu->fcsr = vcpu->arch.fpu.fcsr;
962         for (i = 0; i < NUM_FPU_REGS; i++)
963                 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
964 
965         return 0;
966 }
967 
968 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
969 {
970         int i = 0;
971 
972         vcpu->arch.fpu.fcc = fpu->fcc;
973         vcpu->arch.fpu.fcsr = fpu->fcsr;
974         for (i = 0; i < NUM_FPU_REGS; i++)
975                 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
976 
977         return 0;
978 }
979 
980 /* Enable FPU and restore context */
981 void kvm_own_fpu(struct kvm_vcpu *vcpu)
982 {
983         preempt_disable();
984 
985         /* Enable FPU */
986         set_csr_euen(CSR_EUEN_FPEN);
987 
988         kvm_restore_fpu(&vcpu->arch.fpu);
989         vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
990         trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
991 
992         preempt_enable();
993 }
994 
995 #ifdef CONFIG_CPU_HAS_LSX
996 /* Enable LSX and restore context */
997 int kvm_own_lsx(struct kvm_vcpu *vcpu)
998 {
999         if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1000                 return -EINVAL;
1001 
1002         preempt_disable();
1003 
1004         /* Enable LSX for guest */
1005         set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1006         switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1007         case KVM_LARCH_FPU:
1008                 /*
1009                  * Guest FPU state already loaded,
1010                  * only restore upper LSX state
1011                  */
1012                 _restore_lsx_upper(&vcpu->arch.fpu);
1013                 break;
1014         default:
1015                 /* Neither FP or LSX already active,
1016                  * restore full LSX state
1017                  */
1018                 kvm_restore_lsx(&vcpu->arch.fpu);
1019                 break;
1020         }
1021 
1022         trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1023         vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1024         preempt_enable();
1025 
1026         return 0;
1027 }
1028 #endif
1029 
1030 #ifdef CONFIG_CPU_HAS_LASX
1031 /* Enable LASX and restore context */
1032 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1033 {
1034         if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1035                 return -EINVAL;
1036 
1037         preempt_disable();
1038 
1039         set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1040         switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1041         case KVM_LARCH_LSX:
1042         case KVM_LARCH_LSX | KVM_LARCH_FPU:
1043                 /* Guest LSX state already loaded, only restore upper LASX state */
1044                 _restore_lasx_upper(&vcpu->arch.fpu);
1045                 break;
1046         case KVM_LARCH_FPU:
1047                 /* Guest FP state already loaded, only restore upper LSX & LASX state */
1048                 _restore_lsx_upper(&vcpu->arch.fpu);
1049                 _restore_lasx_upper(&vcpu->arch.fpu);
1050                 break;
1051         default:
1052                 /* Neither FP or LSX already active, restore full LASX state */
1053                 kvm_restore_lasx(&vcpu->arch.fpu);
1054                 break;
1055         }
1056 
1057         trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1058         vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1059         preempt_enable();
1060 
1061         return 0;
1062 }
1063 #endif
1064 
1065 /* Save context and disable FPU */
1066 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1067 {
1068         preempt_disable();
1069 
1070         if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1071                 kvm_save_lasx(&vcpu->arch.fpu);
1072                 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1073                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1074 
1075                 /* Disable LASX & LSX & FPU */
1076                 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1077         } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1078                 kvm_save_lsx(&vcpu->arch.fpu);
1079                 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1080                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1081 
1082                 /* Disable LSX & FPU */
1083                 clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1084         } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1085                 kvm_save_fpu(&vcpu->arch.fpu);
1086                 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1087                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1088 
1089                 /* Disable FPU */
1090                 clear_csr_euen(CSR_EUEN_FPEN);
1091         }
1092 
1093         preempt_enable();
1094 }
1095 
1096 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1097 {
1098         int intr = (int)irq->irq;
1099 
1100         if (intr > 0)
1101                 kvm_queue_irq(vcpu, intr);
1102         else if (intr < 0)
1103                 kvm_dequeue_irq(vcpu, -intr);
1104         else {
1105                 kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1106                 return -EINVAL;
1107         }
1108 
1109         kvm_vcpu_kick(vcpu);
1110 
1111         return 0;
1112 }
1113 
1114 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1115                                unsigned int ioctl, unsigned long arg)
1116 {
1117         void __user *argp = (void __user *)arg;
1118         struct kvm_vcpu *vcpu = filp->private_data;
1119 
1120         if (ioctl == KVM_INTERRUPT) {
1121                 struct kvm_interrupt irq;
1122 
1123                 if (copy_from_user(&irq, argp, sizeof(irq)))
1124                         return -EFAULT;
1125 
1126                 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1127 
1128                 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1129         }
1130 
1131         return -ENOIOCTLCMD;
1132 }
1133 
1134 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1135 {
1136         return 0;
1137 }
1138 
1139 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1140 {
1141         unsigned long timer_hz;
1142         struct loongarch_csrs *csr;
1143 
1144         vcpu->arch.vpid = 0;
1145         vcpu->arch.flush_gpa = INVALID_GPA;
1146 
1147         hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1148         vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
1149 
1150         vcpu->arch.handle_exit = kvm_handle_exit;
1151         vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1152         vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1153         if (!vcpu->arch.csr)
1154                 return -ENOMEM;
1155 
1156         /*
1157          * All kvm exceptions share one exception entry, and host <-> guest
1158          * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1159          */
1160         vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1161 
1162         /* Init */
1163         vcpu->arch.last_sched_cpu = -1;
1164 
1165         /*
1166          * Initialize guest register state to valid architectural reset state.
1167          */
1168         timer_hz = calc_const_freq();
1169         kvm_init_timer(vcpu, timer_hz);
1170 
1171         /* Set Initialize mode for guest */
1172         csr = vcpu->arch.csr;
1173         kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1174 
1175         /* Set cpuid */
1176         kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1177         kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1178 
1179         /* Start with no pending virtual guest interrupts */
1180         csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1181 
1182         return 0;
1183 }
1184 
1185 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1186 {
1187 }
1188 
1189 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1190 {
1191         int cpu;
1192         struct kvm_context *context;
1193 
1194         hrtimer_cancel(&vcpu->arch.swtimer);
1195         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1196         kvm_drop_cpuid(vcpu);
1197         kfree(vcpu->arch.csr);
1198 
1199         /*
1200          * If the vCPU is freed and reused as another vCPU, we don't want the
1201          * matching pointer wrongly hanging around in last_vcpu.
1202          */
1203         for_each_possible_cpu(cpu) {
1204                 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1205                 if (context->last_vcpu == vcpu)
1206                         context->last_vcpu = NULL;
1207         }
1208 }
1209 
1210 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1211 {
1212         bool migrated;
1213         struct kvm_context *context;
1214         struct loongarch_csrs *csr = vcpu->arch.csr;
1215 
1216         /*
1217          * Have we migrated to a different CPU?
1218          * If so, any old guest TLB state may be stale.
1219          */
1220         migrated = (vcpu->arch.last_sched_cpu != cpu);
1221 
1222         /*
1223          * Was this the last vCPU to run on this CPU?
1224          * If not, any old guest state from this vCPU will have been clobbered.
1225          */
1226         context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1227         if (migrated || (context->last_vcpu != vcpu))
1228                 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1229         context->last_vcpu = vcpu;
1230 
1231         /* Restore timer state regardless */
1232         kvm_restore_timer(vcpu);
1233 
1234         /* Control guest page CCA attribute */
1235         change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
1236         kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1237 
1238         /* Don't bother restoring registers multiple times unless necessary */
1239         if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1240                 return 0;
1241 
1242         write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1243 
1244         /* Restore guest CSR registers */
1245         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1246         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1247         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1248         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1249         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1250         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1251         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1252         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1253         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1254         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1255         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1256         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1257         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1258         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1259         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1260         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1261         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1262         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1263         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1264         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1265         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1266         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1267         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1268         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1269         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1270         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1271         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1272         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1273         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1274         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1275         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1276         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1277         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1278         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1279         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1280         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1281         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1282         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1283         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1284         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1285         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1286         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1287         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1288         kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1289 
1290         /* Restore Root.GINTC from unused Guest.GINTC register */
1291         write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1292 
1293         /*
1294          * We should clear linked load bit to break interrupted atomics. This
1295          * prevents a SC on the next vCPU from succeeding by matching a LL on
1296          * the previous vCPU.
1297          */
1298         if (vcpu->kvm->created_vcpus > 1)
1299                 set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1300 
1301         vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1302 
1303         return 0;
1304 }
1305 
1306 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1307 {
1308         unsigned long flags;
1309 
1310         local_irq_save(flags);
1311         /* Restore guest state to registers */
1312         _kvm_vcpu_load(vcpu, cpu);
1313         local_irq_restore(flags);
1314 }
1315 
1316 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1317 {
1318         struct loongarch_csrs *csr = vcpu->arch.csr;
1319 
1320         kvm_lose_fpu(vcpu);
1321 
1322         /*
1323          * Update CSR state from hardware if software CSR state is stale,
1324          * most CSR registers are kept unchanged during process context
1325          * switch except CSR registers like remaining timer tick value and
1326          * injected interrupt state.
1327          */
1328         if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1329                 goto out;
1330 
1331         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1332         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1333         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1334         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1335         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1336         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1337         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1338         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1339         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1340         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1341         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1342         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1343         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1344         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1345         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1346         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1347         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1348         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1349         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1350         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1351         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1352         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1353         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1354         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1355         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1356         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1357         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1358         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1359         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1360         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1361         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1362         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1363         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1364         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1365         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1366         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1367         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1368         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1369         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1370         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1371         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1372         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1373         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1374         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1375         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1376         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1377         kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1378 
1379         vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1380 
1381 out:
1382         kvm_save_timer(vcpu);
1383         /* Save Root.GINTC into unused Guest.GINTC register */
1384         csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1385 
1386         return 0;
1387 }
1388 
1389 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1390 {
1391         int cpu;
1392         unsigned long flags;
1393 
1394         local_irq_save(flags);
1395         cpu = smp_processor_id();
1396         vcpu->arch.last_sched_cpu = cpu;
1397 
1398         /* Save guest state in registers */
1399         _kvm_vcpu_put(vcpu, cpu);
1400         local_irq_restore(flags);
1401 }
1402 
1403 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1404 {
1405         int r = -EINTR;
1406         struct kvm_run *run = vcpu->run;
1407 
1408         if (vcpu->mmio_needed) {
1409                 if (!vcpu->mmio_is_write)
1410                         kvm_complete_mmio_read(vcpu, run);
1411                 vcpu->mmio_needed = 0;
1412         }
1413 
1414         if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
1415                 if (!run->iocsr_io.is_write)
1416                         kvm_complete_iocsr_read(vcpu, run);
1417         }
1418 
1419         if (!vcpu->wants_to_run)
1420                 return r;
1421 
1422         /* Clear exit_reason */
1423         run->exit_reason = KVM_EXIT_UNKNOWN;
1424         lose_fpu(1);
1425         vcpu_load(vcpu);
1426         kvm_sigset_activate(vcpu);
1427         r = kvm_pre_enter_guest(vcpu);
1428         if (r != RESUME_GUEST)
1429                 goto out;
1430 
1431         guest_timing_enter_irqoff();
1432         guest_state_enter_irqoff();
1433         trace_kvm_enter(vcpu);
1434         r = kvm_loongarch_ops->enter_guest(run, vcpu);
1435 
1436         trace_kvm_out(vcpu);
1437         /*
1438          * Guest exit is already recorded at kvm_handle_exit()
1439          * return value must not be RESUME_GUEST
1440          */
1441         local_irq_enable();
1442 out:
1443         kvm_sigset_deactivate(vcpu);
1444         vcpu_put(vcpu);
1445 
1446         return r;
1447 }
1448 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php