~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/loongarch/kvm/exit.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
  4  */
  5 
  6 #include <linux/err.h>
  7 #include <linux/errno.h>
  8 #include <linux/kvm_host.h>
  9 #include <linux/module.h>
 10 #include <linux/preempt.h>
 11 #include <linux/vmalloc.h>
 12 #include <trace/events/kvm.h>
 13 #include <asm/fpu.h>
 14 #include <asm/inst.h>
 15 #include <asm/loongarch.h>
 16 #include <asm/mmzone.h>
 17 #include <asm/numa.h>
 18 #include <asm/time.h>
 19 #include <asm/tlb.h>
 20 #include <asm/kvm_csr.h>
 21 #include <asm/kvm_vcpu.h>
 22 #include "trace.h"
 23 
 24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
 25 {
 26         int rd, rj;
 27         unsigned int index, ret;
 28 
 29         if (inst.reg2_format.opcode != cpucfg_op)
 30                 return EMULATE_FAIL;
 31 
 32         rd = inst.reg2_format.rd;
 33         rj = inst.reg2_format.rj;
 34         ++vcpu->stat.cpucfg_exits;
 35         index = vcpu->arch.gprs[rj];
 36 
 37         /*
 38          * By LoongArch Reference Manual 2.2.10.5
 39          * Return value is 0 for undefined CPUCFG index
 40          *
 41          * Disable preemption since hw gcsr is accessed
 42          */
 43         preempt_disable();
 44         switch (index) {
 45         case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
 46                 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
 47                 break;
 48         case CPUCFG_KVM_SIG:
 49                 /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
 50                 vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
 51                 break;
 52         case CPUCFG_KVM_FEATURE:
 53                 ret = KVM_FEATURE_IPI;
 54                 if (kvm_pvtime_supported())
 55                         ret |= KVM_FEATURE_STEAL_TIME;
 56                 vcpu->arch.gprs[rd] = ret;
 57                 break;
 58         default:
 59                 vcpu->arch.gprs[rd] = 0;
 60                 break;
 61         }
 62         preempt_enable();
 63 
 64         return EMULATE_DONE;
 65 }
 66 
 67 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
 68 {
 69         unsigned long val = 0;
 70         struct loongarch_csrs *csr = vcpu->arch.csr;
 71 
 72         /*
 73          * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
 74          * For undefined CSR id, return value is 0
 75          */
 76         if (get_gcsr_flag(csrid) & SW_GCSR)
 77                 val = kvm_read_sw_gcsr(csr, csrid);
 78         else
 79                 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
 80 
 81         return val;
 82 }
 83 
 84 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
 85 {
 86         unsigned long old = 0;
 87         struct loongarch_csrs *csr = vcpu->arch.csr;
 88 
 89         if (get_gcsr_flag(csrid) & SW_GCSR) {
 90                 old = kvm_read_sw_gcsr(csr, csrid);
 91                 kvm_write_sw_gcsr(csr, csrid, val);
 92         } else
 93                 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
 94 
 95         return old;
 96 }
 97 
 98 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
 99                                 unsigned long csr_mask, unsigned long val)
100 {
101         unsigned long old = 0;
102         struct loongarch_csrs *csr = vcpu->arch.csr;
103 
104         if (get_gcsr_flag(csrid) & SW_GCSR) {
105                 old = kvm_read_sw_gcsr(csr, csrid);
106                 val = (old & ~csr_mask) | (val & csr_mask);
107                 kvm_write_sw_gcsr(csr, csrid, val);
108                 old = old & csr_mask;
109         } else
110                 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
111 
112         return old;
113 }
114 
115 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
116 {
117         unsigned int rd, rj, csrid;
118         unsigned long csr_mask, val = 0;
119 
120         /*
121          * CSR value mask imm
122          * rj = 0 means csrrd
123          * rj = 1 means csrwr
124          * rj != 0,1 means csrxchg
125          */
126         rd = inst.reg2csr_format.rd;
127         rj = inst.reg2csr_format.rj;
128         csrid = inst.reg2csr_format.csr;
129 
130         /* Process CSR ops */
131         switch (rj) {
132         case 0: /* process csrrd */
133                 val = kvm_emu_read_csr(vcpu, csrid);
134                 vcpu->arch.gprs[rd] = val;
135                 break;
136         case 1: /* process csrwr */
137                 val = vcpu->arch.gprs[rd];
138                 val = kvm_emu_write_csr(vcpu, csrid, val);
139                 vcpu->arch.gprs[rd] = val;
140                 break;
141         default: /* process csrxchg */
142                 val = vcpu->arch.gprs[rd];
143                 csr_mask = vcpu->arch.gprs[rj];
144                 val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
145                 vcpu->arch.gprs[rd] = val;
146         }
147 
148         return EMULATE_DONE;
149 }
150 
151 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
152 {
153         int ret;
154         unsigned long val;
155         u32 addr, rd, rj, opcode;
156 
157         /*
158          * Each IOCSR with different opcode
159          */
160         rd = inst.reg2_format.rd;
161         rj = inst.reg2_format.rj;
162         opcode = inst.reg2_format.opcode;
163         addr = vcpu->arch.gprs[rj];
164         ret = EMULATE_DO_IOCSR;
165         run->iocsr_io.phys_addr = addr;
166         run->iocsr_io.is_write = 0;
167 
168         /* LoongArch is Little endian */
169         switch (opcode) {
170         case iocsrrdb_op:
171                 run->iocsr_io.len = 1;
172                 break;
173         case iocsrrdh_op:
174                 run->iocsr_io.len = 2;
175                 break;
176         case iocsrrdw_op:
177                 run->iocsr_io.len = 4;
178                 break;
179         case iocsrrdd_op:
180                 run->iocsr_io.len = 8;
181                 break;
182         case iocsrwrb_op:
183                 run->iocsr_io.len = 1;
184                 run->iocsr_io.is_write = 1;
185                 break;
186         case iocsrwrh_op:
187                 run->iocsr_io.len = 2;
188                 run->iocsr_io.is_write = 1;
189                 break;
190         case iocsrwrw_op:
191                 run->iocsr_io.len = 4;
192                 run->iocsr_io.is_write = 1;
193                 break;
194         case iocsrwrd_op:
195                 run->iocsr_io.len = 8;
196                 run->iocsr_io.is_write = 1;
197                 break;
198         default:
199                 ret = EMULATE_FAIL;
200                 break;
201         }
202 
203         if (ret == EMULATE_DO_IOCSR) {
204                 if (run->iocsr_io.is_write) {
205                         val = vcpu->arch.gprs[rd];
206                         memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
207                 }
208                 vcpu->arch.io_gpr = rd;
209         }
210 
211         return ret;
212 }
213 
214 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
215 {
216         enum emulation_result er = EMULATE_DONE;
217         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
218 
219         switch (run->iocsr_io.len) {
220         case 1:
221                 *gpr = *(s8 *)run->iocsr_io.data;
222                 break;
223         case 2:
224                 *gpr = *(s16 *)run->iocsr_io.data;
225                 break;
226         case 4:
227                 *gpr = *(s32 *)run->iocsr_io.data;
228                 break;
229         case 8:
230                 *gpr = *(s64 *)run->iocsr_io.data;
231                 break;
232         default:
233                 kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
234                                 run->iocsr_io.len, vcpu->arch.badv);
235                 er = EMULATE_FAIL;
236                 break;
237         }
238 
239         return er;
240 }
241 
242 int kvm_emu_idle(struct kvm_vcpu *vcpu)
243 {
244         ++vcpu->stat.idle_exits;
245         trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
246 
247         if (!kvm_arch_vcpu_runnable(vcpu))
248                 kvm_vcpu_halt(vcpu);
249 
250         return EMULATE_DONE;
251 }
252 
253 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
254 {
255         unsigned long curr_pc;
256         larch_inst inst;
257         enum emulation_result er = EMULATE_DONE;
258         struct kvm_run *run = vcpu->run;
259 
260         /* Fetch the instruction */
261         inst.word = vcpu->arch.badi;
262         curr_pc = vcpu->arch.pc;
263         update_pc(&vcpu->arch);
264 
265         trace_kvm_exit_gspr(vcpu, inst.word);
266         er = EMULATE_FAIL;
267         switch (((inst.word >> 24) & 0xff)) {
268         case 0x0: /* CPUCFG GSPR */
269                 er = kvm_emu_cpucfg(vcpu, inst);
270                 break;
271         case 0x4: /* CSR{RD,WR,XCHG} GSPR */
272                 er = kvm_handle_csr(vcpu, inst);
273                 break;
274         case 0x6: /* Cache, Idle and IOCSR GSPR */
275                 switch (((inst.word >> 22) & 0x3ff)) {
276                 case 0x18: /* Cache GSPR */
277                         er = EMULATE_DONE;
278                         trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
279                         break;
280                 case 0x19: /* Idle/IOCSR GSPR */
281                         switch (((inst.word >> 15) & 0x1ffff)) {
282                         case 0xc90: /* IOCSR GSPR */
283                                 er = kvm_emu_iocsr(inst, run, vcpu);
284                                 break;
285                         case 0xc91: /* Idle GSPR */
286                                 er = kvm_emu_idle(vcpu);
287                                 break;
288                         default:
289                                 er = EMULATE_FAIL;
290                                 break;
291                         }
292                         break;
293                 default:
294                         er = EMULATE_FAIL;
295                         break;
296                 }
297                 break;
298         default:
299                 er = EMULATE_FAIL;
300                 break;
301         }
302 
303         /* Rollback PC only if emulation was unsuccessful */
304         if (er == EMULATE_FAIL) {
305                 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
306                         curr_pc, __func__, inst.word);
307 
308                 kvm_arch_vcpu_dump_regs(vcpu);
309                 vcpu->arch.pc = curr_pc;
310         }
311 
312         return er;
313 }
314 
315 /*
316  * Trigger GSPR:
317  * 1) Execute CPUCFG instruction;
318  * 2) Execute CACOP/IDLE instructions;
319  * 3) Access to unimplemented CSRs/IOCSRs.
320  */
321 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
322 {
323         int ret = RESUME_GUEST;
324         enum emulation_result er = EMULATE_DONE;
325 
326         er = kvm_trap_handle_gspr(vcpu);
327 
328         if (er == EMULATE_DONE) {
329                 ret = RESUME_GUEST;
330         } else if (er == EMULATE_DO_MMIO) {
331                 vcpu->run->exit_reason = KVM_EXIT_MMIO;
332                 ret = RESUME_HOST;
333         } else if (er == EMULATE_DO_IOCSR) {
334                 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
335                 ret = RESUME_HOST;
336         } else {
337                 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
338                 ret = RESUME_GUEST;
339         }
340 
341         return ret;
342 }
343 
344 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
345 {
346         int ret;
347         unsigned int op8, opcode, rd;
348         struct kvm_run *run = vcpu->run;
349 
350         run->mmio.phys_addr = vcpu->arch.badv;
351         vcpu->mmio_needed = 2;  /* signed */
352         op8 = (inst.word >> 24) & 0xff;
353         ret = EMULATE_DO_MMIO;
354 
355         switch (op8) {
356         case 0x24 ... 0x27:     /* ldptr.w/d process */
357                 rd = inst.reg2i14_format.rd;
358                 opcode = inst.reg2i14_format.opcode;
359 
360                 switch (opcode) {
361                 case ldptrw_op:
362                         run->mmio.len = 4;
363                         break;
364                 case ldptrd_op:
365                         run->mmio.len = 8;
366                         break;
367                 default:
368                         break;
369                 }
370                 break;
371         case 0x28 ... 0x2e:     /* ld.b/h/w/d, ld.bu/hu/wu process */
372                 rd = inst.reg2i12_format.rd;
373                 opcode = inst.reg2i12_format.opcode;
374 
375                 switch (opcode) {
376                 case ldb_op:
377                         run->mmio.len = 1;
378                         break;
379                 case ldbu_op:
380                         vcpu->mmio_needed = 1;  /* unsigned */
381                         run->mmio.len = 1;
382                         break;
383                 case ldh_op:
384                         run->mmio.len = 2;
385                         break;
386                 case ldhu_op:
387                         vcpu->mmio_needed = 1;  /* unsigned */
388                         run->mmio.len = 2;
389                         break;
390                 case ldw_op:
391                         run->mmio.len = 4;
392                         break;
393                 case ldwu_op:
394                         vcpu->mmio_needed = 1;  /* unsigned */
395                         run->mmio.len = 4;
396                         break;
397                 case ldd_op:
398                         run->mmio.len = 8;
399                         break;
400                 default:
401                         ret = EMULATE_FAIL;
402                         break;
403                 }
404                 break;
405         case 0x38:      /* ldx.b/h/w/d, ldx.bu/hu/wu process */
406                 rd = inst.reg3_format.rd;
407                 opcode = inst.reg3_format.opcode;
408 
409                 switch (opcode) {
410                 case ldxb_op:
411                         run->mmio.len = 1;
412                         break;
413                 case ldxbu_op:
414                         run->mmio.len = 1;
415                         vcpu->mmio_needed = 1;  /* unsigned */
416                         break;
417                 case ldxh_op:
418                         run->mmio.len = 2;
419                         break;
420                 case ldxhu_op:
421                         run->mmio.len = 2;
422                         vcpu->mmio_needed = 1;  /* unsigned */
423                         break;
424                 case ldxw_op:
425                         run->mmio.len = 4;
426                         break;
427                 case ldxwu_op:
428                         run->mmio.len = 4;
429                         vcpu->mmio_needed = 1;  /* unsigned */
430                         break;
431                 case ldxd_op:
432                         run->mmio.len = 8;
433                         break;
434                 default:
435                         ret = EMULATE_FAIL;
436                         break;
437                 }
438                 break;
439         default:
440                 ret = EMULATE_FAIL;
441         }
442 
443         if (ret == EMULATE_DO_MMIO) {
444                 /* Set for kvm_complete_mmio_read() use */
445                 vcpu->arch.io_gpr = rd;
446                 run->mmio.is_write = 0;
447                 vcpu->mmio_is_write = 0;
448                 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len,
449                                 run->mmio.phys_addr, NULL);
450         } else {
451                 kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
452                         inst.word, vcpu->arch.pc, vcpu->arch.badv);
453                 kvm_arch_vcpu_dump_regs(vcpu);
454                 vcpu->mmio_needed = 0;
455         }
456 
457         return ret;
458 }
459 
460 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
461 {
462         enum emulation_result er = EMULATE_DONE;
463         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
464 
465         /* Update with new PC */
466         update_pc(&vcpu->arch);
467         switch (run->mmio.len) {
468         case 1:
469                 if (vcpu->mmio_needed == 2)
470                         *gpr = *(s8 *)run->mmio.data;
471                 else
472                         *gpr = *(u8 *)run->mmio.data;
473                 break;
474         case 2:
475                 if (vcpu->mmio_needed == 2)
476                         *gpr = *(s16 *)run->mmio.data;
477                 else
478                         *gpr = *(u16 *)run->mmio.data;
479                 break;
480         case 4:
481                 if (vcpu->mmio_needed == 2)
482                         *gpr = *(s32 *)run->mmio.data;
483                 else
484                         *gpr = *(u32 *)run->mmio.data;
485                 break;
486         case 8:
487                 *gpr = *(s64 *)run->mmio.data;
488                 break;
489         default:
490                 kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
491                                 run->mmio.len, vcpu->arch.badv);
492                 er = EMULATE_FAIL;
493                 break;
494         }
495 
496         trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
497                         run->mmio.phys_addr, run->mmio.data);
498 
499         return er;
500 }
501 
502 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
503 {
504         int ret;
505         unsigned int rd, op8, opcode;
506         unsigned long curr_pc, rd_val = 0;
507         struct kvm_run *run = vcpu->run;
508         void *data = run->mmio.data;
509 
510         /*
511          * Update PC and hold onto current PC in case there is
512          * an error and we want to rollback the PC
513          */
514         curr_pc = vcpu->arch.pc;
515         update_pc(&vcpu->arch);
516 
517         op8 = (inst.word >> 24) & 0xff;
518         run->mmio.phys_addr = vcpu->arch.badv;
519         ret = EMULATE_DO_MMIO;
520         switch (op8) {
521         case 0x24 ... 0x27:     /* stptr.w/d process */
522                 rd = inst.reg2i14_format.rd;
523                 opcode = inst.reg2i14_format.opcode;
524 
525                 switch (opcode) {
526                 case stptrw_op:
527                         run->mmio.len = 4;
528                         *(unsigned int *)data = vcpu->arch.gprs[rd];
529                         break;
530                 case stptrd_op:
531                         run->mmio.len = 8;
532                         *(unsigned long *)data = vcpu->arch.gprs[rd];
533                         break;
534                 default:
535                         ret = EMULATE_FAIL;
536                         break;
537                 }
538                 break;
539         case 0x28 ... 0x2e:     /* st.b/h/w/d  process */
540                 rd = inst.reg2i12_format.rd;
541                 opcode = inst.reg2i12_format.opcode;
542                 rd_val = vcpu->arch.gprs[rd];
543 
544                 switch (opcode) {
545                 case stb_op:
546                         run->mmio.len = 1;
547                         *(unsigned char *)data = rd_val;
548                         break;
549                 case sth_op:
550                         run->mmio.len = 2;
551                         *(unsigned short *)data = rd_val;
552                         break;
553                 case stw_op:
554                         run->mmio.len = 4;
555                         *(unsigned int *)data = rd_val;
556                         break;
557                 case std_op:
558                         run->mmio.len = 8;
559                         *(unsigned long *)data = rd_val;
560                         break;
561                 default:
562                         ret = EMULATE_FAIL;
563                         break;
564                 }
565                 break;
566         case 0x38:      /* stx.b/h/w/d process */
567                 rd = inst.reg3_format.rd;
568                 opcode = inst.reg3_format.opcode;
569 
570                 switch (opcode) {
571                 case stxb_op:
572                         run->mmio.len = 1;
573                         *(unsigned char *)data = vcpu->arch.gprs[rd];
574                         break;
575                 case stxh_op:
576                         run->mmio.len = 2;
577                         *(unsigned short *)data = vcpu->arch.gprs[rd];
578                         break;
579                 case stxw_op:
580                         run->mmio.len = 4;
581                         *(unsigned int *)data = vcpu->arch.gprs[rd];
582                         break;
583                 case stxd_op:
584                         run->mmio.len = 8;
585                         *(unsigned long *)data = vcpu->arch.gprs[rd];
586                         break;
587                 default:
588                         ret = EMULATE_FAIL;
589                         break;
590                 }
591                 break;
592         default:
593                 ret = EMULATE_FAIL;
594         }
595 
596         if (ret == EMULATE_DO_MMIO) {
597                 run->mmio.is_write = 1;
598                 vcpu->mmio_needed = 1;
599                 vcpu->mmio_is_write = 1;
600                 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len,
601                                 run->mmio.phys_addr, data);
602         } else {
603                 vcpu->arch.pc = curr_pc;
604                 kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
605                         inst.word, vcpu->arch.pc, vcpu->arch.badv);
606                 kvm_arch_vcpu_dump_regs(vcpu);
607                 /* Rollback PC if emulation was unsuccessful */
608         }
609 
610         return ret;
611 }
612 
613 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
614 {
615         int ret;
616         larch_inst inst;
617         enum emulation_result er = EMULATE_DONE;
618         struct kvm_run *run = vcpu->run;
619         unsigned long badv = vcpu->arch.badv;
620 
621         ret = kvm_handle_mm_fault(vcpu, badv, write);
622         if (ret) {
623                 /* Treat as MMIO */
624                 inst.word = vcpu->arch.badi;
625                 if (write) {
626                         er = kvm_emu_mmio_write(vcpu, inst);
627                 } else {
628                         /* A code fetch fault doesn't count as an MMIO */
629                         if (kvm_is_ifetch_fault(&vcpu->arch)) {
630                                 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
631                                 return RESUME_GUEST;
632                         }
633 
634                         er = kvm_emu_mmio_read(vcpu, inst);
635                 }
636         }
637 
638         if (er == EMULATE_DONE) {
639                 ret = RESUME_GUEST;
640         } else if (er == EMULATE_DO_MMIO) {
641                 run->exit_reason = KVM_EXIT_MMIO;
642                 ret = RESUME_HOST;
643         } else {
644                 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
645                 ret = RESUME_GUEST;
646         }
647 
648         return ret;
649 }
650 
651 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
652 {
653         return kvm_handle_rdwr_fault(vcpu, false);
654 }
655 
656 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
657 {
658         return kvm_handle_rdwr_fault(vcpu, true);
659 }
660 
661 /**
662  * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
663  * @vcpu:       Virtual CPU context.
664  *
665  * Handle when the guest attempts to use fpu which hasn't been allowed
666  * by the root context.
667  */
668 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
669 {
670         struct kvm_run *run = vcpu->run;
671 
672         if (!kvm_guest_has_fpu(&vcpu->arch)) {
673                 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
674                 return RESUME_GUEST;
675         }
676 
677         /*
678          * If guest FPU not present, the FPU operation should have been
679          * treated as a reserved instruction!
680          * If FPU already in use, we shouldn't get this at all.
681          */
682         if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
683                 kvm_err("%s internal error\n", __func__);
684                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
685                 return RESUME_HOST;
686         }
687 
688         kvm_own_fpu(vcpu);
689 
690         return RESUME_GUEST;
691 }
692 
693 static long kvm_save_notify(struct kvm_vcpu *vcpu)
694 {
695         unsigned long id, data;
696 
697         id   = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
698         data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
699         switch (id) {
700         case KVM_FEATURE_STEAL_TIME:
701                 if (!kvm_pvtime_supported())
702                         return KVM_HCALL_INVALID_CODE;
703 
704                 if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
705                         return KVM_HCALL_INVALID_PARAMETER;
706 
707                 vcpu->arch.st.guest_addr = data;
708                 if (!(data & KVM_STEAL_PHYS_VALID))
709                         break;
710 
711                 vcpu->arch.st.last_steal = current->sched_info.run_delay;
712                 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
713                 break;
714         default:
715                 break;
716         };
717 
718         return 0;
719 };
720 
721 /*
722  * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
723  * @vcpu:      Virtual CPU context.
724  *
725  * Handle when the guest attempts to use LSX when it is disabled in the root
726  * context.
727  */
728 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
729 {
730         if (kvm_own_lsx(vcpu))
731                 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
732 
733         return RESUME_GUEST;
734 }
735 
736 /*
737  * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
738  * @vcpu:       Virtual CPU context.
739  *
740  * Handle when the guest attempts to use LASX when it is disabled in the root
741  * context.
742  */
743 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
744 {
745         if (kvm_own_lasx(vcpu))
746                 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
747 
748         return RESUME_GUEST;
749 }
750 
751 static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
752 {
753         unsigned int min, cpu, i;
754         unsigned long ipi_bitmap;
755         struct kvm_vcpu *dest;
756 
757         min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
758         for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
759                 ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
760                 if (!ipi_bitmap)
761                         continue;
762 
763                 cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
764                 while (cpu < BITS_PER_LONG) {
765                         dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
766                         cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
767                         if (!dest)
768                                 continue;
769 
770                         /* Send SWI0 to dest vcpu to emulate IPI interrupt */
771                         kvm_queue_irq(dest, INT_SWI0);
772                         kvm_vcpu_kick(dest);
773                 }
774         }
775 
776         return 0;
777 }
778 
779 /*
780  * Hypercall emulation always return to guest, Caller should check retval.
781  */
782 static void kvm_handle_service(struct kvm_vcpu *vcpu)
783 {
784         unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
785         long ret;
786 
787         switch (func) {
788         case KVM_HCALL_FUNC_IPI:
789                 kvm_send_pv_ipi(vcpu);
790                 ret = KVM_HCALL_SUCCESS;
791                 break;
792         case KVM_HCALL_FUNC_NOTIFY:
793                 ret = kvm_save_notify(vcpu);
794                 break;
795         default:
796                 ret = KVM_HCALL_INVALID_CODE;
797                 break;
798         }
799 
800         kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
801 }
802 
803 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
804 {
805         int ret;
806         larch_inst inst;
807         unsigned int code;
808 
809         inst.word = vcpu->arch.badi;
810         code = inst.reg0i15_format.immediate;
811         ret = RESUME_GUEST;
812 
813         switch (code) {
814         case KVM_HCALL_SERVICE:
815                 vcpu->stat.hypercall_exits++;
816                 kvm_handle_service(vcpu);
817                 break;
818         case KVM_HCALL_SWDBG:
819                 /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
820                 if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
821                         vcpu->run->exit_reason = KVM_EXIT_DEBUG;
822                         ret = RESUME_HOST;
823                         break;
824                 }
825                 fallthrough;
826         default:
827                 /* Treat it as noop intruction, only set return value */
828                 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
829                 break;
830         }
831 
832         if (ret == RESUME_GUEST)
833                 update_pc(&vcpu->arch);
834 
835         return ret;
836 }
837 
838 /*
839  * LoongArch KVM callback handling for unimplemented guest exiting
840  */
841 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
842 {
843         unsigned int ecode, inst;
844         unsigned long estat, badv;
845 
846         /* Fetch the instruction */
847         inst = vcpu->arch.badi;
848         badv = vcpu->arch.badv;
849         estat = vcpu->arch.host_estat;
850         ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
851         kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
852                         ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
853         kvm_arch_vcpu_dump_regs(vcpu);
854         kvm_queue_exception(vcpu, EXCCODE_INE, 0);
855 
856         return RESUME_GUEST;
857 }
858 
859 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
860         [0 ... EXCCODE_INT_START - 1]   = kvm_fault_ni,
861         [EXCCODE_TLBI]                  = kvm_handle_read_fault,
862         [EXCCODE_TLBL]                  = kvm_handle_read_fault,
863         [EXCCODE_TLBS]                  = kvm_handle_write_fault,
864         [EXCCODE_TLBM]                  = kvm_handle_write_fault,
865         [EXCCODE_FPDIS]                 = kvm_handle_fpu_disabled,
866         [EXCCODE_LSXDIS]                = kvm_handle_lsx_disabled,
867         [EXCCODE_LASXDIS]               = kvm_handle_lasx_disabled,
868         [EXCCODE_GSPR]                  = kvm_handle_gspr,
869         [EXCCODE_HVC]                   = kvm_handle_hypercall,
870 };
871 
872 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
873 {
874         return kvm_fault_tables[fault](vcpu);
875 }
876 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php