~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/include/asm/kvm_host.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4  *
  5  * Authors:
  6  *     Anup Patel <anup.patel@wdc.com>
  7  */
  8 
  9 #ifndef __RISCV_KVM_HOST_H__
 10 #define __RISCV_KVM_HOST_H__
 11 
 12 #include <linux/types.h>
 13 #include <linux/kvm.h>
 14 #include <linux/kvm_types.h>
 15 #include <linux/spinlock.h>
 16 #include <asm/hwcap.h>
 17 #include <asm/kvm_aia.h>
 18 #include <asm/ptrace.h>
 19 #include <asm/kvm_vcpu_fp.h>
 20 #include <asm/kvm_vcpu_insn.h>
 21 #include <asm/kvm_vcpu_sbi.h>
 22 #include <asm/kvm_vcpu_timer.h>
 23 #include <asm/kvm_vcpu_pmu.h>
 24 
 25 #define KVM_MAX_VCPUS                   1024
 26 
 27 #define KVM_HALT_POLL_NS_DEFAULT        500000
 28 
 29 #define KVM_VCPU_MAX_FEATURES           0
 30 
 31 #define KVM_IRQCHIP_NUM_PINS            1024
 32 
 33 #define KVM_REQ_SLEEP \
 34         KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 35 #define KVM_REQ_VCPU_RESET              KVM_ARCH_REQ(1)
 36 #define KVM_REQ_UPDATE_HGATP            KVM_ARCH_REQ(2)
 37 #define KVM_REQ_FENCE_I                 \
 38         KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 39 #define KVM_REQ_HFENCE_GVMA_VMID_ALL    KVM_REQ_TLB_FLUSH
 40 #define KVM_REQ_HFENCE_VVMA_ALL         \
 41         KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 42 #define KVM_REQ_HFENCE                  \
 43         KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 44 #define KVM_REQ_STEAL_UPDATE            KVM_ARCH_REQ(6)
 45 
 46 #define KVM_HEDELEG_DEFAULT             (BIT(EXC_INST_MISALIGNED) | \
 47                                          BIT(EXC_BREAKPOINT)      | \
 48                                          BIT(EXC_SYSCALL)         | \
 49                                          BIT(EXC_INST_PAGE_FAULT) | \
 50                                          BIT(EXC_LOAD_PAGE_FAULT) | \
 51                                          BIT(EXC_STORE_PAGE_FAULT))
 52 
 53 #define KVM_HIDELEG_DEFAULT             (BIT(IRQ_VS_SOFT)  | \
 54                                          BIT(IRQ_VS_TIMER) | \
 55                                          BIT(IRQ_VS_EXT))
 56 
 57 enum kvm_riscv_hfence_type {
 58         KVM_RISCV_HFENCE_UNKNOWN = 0,
 59         KVM_RISCV_HFENCE_GVMA_VMID_GPA,
 60         KVM_RISCV_HFENCE_VVMA_ASID_GVA,
 61         KVM_RISCV_HFENCE_VVMA_ASID_ALL,
 62         KVM_RISCV_HFENCE_VVMA_GVA,
 63 };
 64 
 65 struct kvm_riscv_hfence {
 66         enum kvm_riscv_hfence_type type;
 67         unsigned long asid;
 68         unsigned long order;
 69         gpa_t addr;
 70         gpa_t size;
 71 };
 72 
 73 #define KVM_RISCV_VCPU_MAX_HFENCE       64
 74 
 75 struct kvm_vm_stat {
 76         struct kvm_vm_stat_generic generic;
 77 };
 78 
 79 struct kvm_vcpu_stat {
 80         struct kvm_vcpu_stat_generic generic;
 81         u64 ecall_exit_stat;
 82         u64 wfi_exit_stat;
 83         u64 wrs_exit_stat;
 84         u64 mmio_exit_user;
 85         u64 mmio_exit_kernel;
 86         u64 csr_exit_user;
 87         u64 csr_exit_kernel;
 88         u64 signal_exits;
 89         u64 exits;
 90 };
 91 
 92 struct kvm_arch_memory_slot {
 93 };
 94 
 95 struct kvm_vmid {
 96         /*
 97          * Writes to vmid_version and vmid happen with vmid_lock held
 98          * whereas reads happen without any lock held.
 99          */
100         unsigned long vmid_version;
101         unsigned long vmid;
102 };
103 
104 struct kvm_arch {
105         /* G-stage vmid */
106         struct kvm_vmid vmid;
107 
108         /* G-stage page table */
109         pgd_t *pgd;
110         phys_addr_t pgd_phys;
111 
112         /* Guest Timer */
113         struct kvm_guest_timer timer;
114 
115         /* AIA Guest/VM context */
116         struct kvm_aia aia;
117 };
118 
119 struct kvm_cpu_trap {
120         unsigned long sepc;
121         unsigned long scause;
122         unsigned long stval;
123         unsigned long htval;
124         unsigned long htinst;
125 };
126 
127 struct kvm_cpu_context {
128         unsigned long zero;
129         unsigned long ra;
130         unsigned long sp;
131         unsigned long gp;
132         unsigned long tp;
133         unsigned long t0;
134         unsigned long t1;
135         unsigned long t2;
136         unsigned long s0;
137         unsigned long s1;
138         unsigned long a0;
139         unsigned long a1;
140         unsigned long a2;
141         unsigned long a3;
142         unsigned long a4;
143         unsigned long a5;
144         unsigned long a6;
145         unsigned long a7;
146         unsigned long s2;
147         unsigned long s3;
148         unsigned long s4;
149         unsigned long s5;
150         unsigned long s6;
151         unsigned long s7;
152         unsigned long s8;
153         unsigned long s9;
154         unsigned long s10;
155         unsigned long s11;
156         unsigned long t3;
157         unsigned long t4;
158         unsigned long t5;
159         unsigned long t6;
160         unsigned long sepc;
161         unsigned long sstatus;
162         unsigned long hstatus;
163         union __riscv_fp_state fp;
164         struct __riscv_v_ext_state vector;
165 };
166 
167 struct kvm_vcpu_csr {
168         unsigned long vsstatus;
169         unsigned long vsie;
170         unsigned long vstvec;
171         unsigned long vsscratch;
172         unsigned long vsepc;
173         unsigned long vscause;
174         unsigned long vstval;
175         unsigned long hvip;
176         unsigned long vsatp;
177         unsigned long scounteren;
178         unsigned long senvcfg;
179 };
180 
181 struct kvm_vcpu_config {
182         u64 henvcfg;
183         u64 hstateen0;
184         unsigned long hedeleg;
185 };
186 
187 struct kvm_vcpu_smstateen_csr {
188         unsigned long sstateen0;
189 };
190 
191 struct kvm_vcpu_arch {
192         /* VCPU ran at least once */
193         bool ran_atleast_once;
194 
195         /* Last Host CPU on which Guest VCPU exited */
196         int last_exit_cpu;
197 
198         /* ISA feature bits (similar to MISA) */
199         DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
200 
201         /* Vendor, Arch, and Implementation details */
202         unsigned long mvendorid;
203         unsigned long marchid;
204         unsigned long mimpid;
205 
206         /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
207         unsigned long host_sscratch;
208         unsigned long host_stvec;
209         unsigned long host_scounteren;
210         unsigned long host_senvcfg;
211         unsigned long host_sstateen0;
212 
213         /* CPU context of Host */
214         struct kvm_cpu_context host_context;
215 
216         /* CPU context of Guest VCPU */
217         struct kvm_cpu_context guest_context;
218 
219         /* CPU CSR context of Guest VCPU */
220         struct kvm_vcpu_csr guest_csr;
221 
222         /* CPU Smstateen CSR context of Guest VCPU */
223         struct kvm_vcpu_smstateen_csr smstateen_csr;
224 
225         /* CPU context upon Guest VCPU reset */
226         struct kvm_cpu_context guest_reset_context;
227         spinlock_t reset_cntx_lock;
228 
229         /* CPU CSR context upon Guest VCPU reset */
230         struct kvm_vcpu_csr guest_reset_csr;
231 
232         /*
233          * VCPU interrupts
234          *
235          * We have a lockless approach for tracking pending VCPU interrupts
236          * implemented using atomic bitops. The irqs_pending bitmap represent
237          * pending interrupts whereas irqs_pending_mask represent bits changed
238          * in irqs_pending. Our approach is modeled around multiple producer
239          * and single consumer problem where the consumer is the VCPU itself.
240          */
241 #define KVM_RISCV_VCPU_NR_IRQS  64
242         DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
243         DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
244 
245         /* VCPU Timer */
246         struct kvm_vcpu_timer timer;
247 
248         /* HFENCE request queue */
249         spinlock_t hfence_lock;
250         unsigned long hfence_head;
251         unsigned long hfence_tail;
252         struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
253 
254         /* MMIO instruction details */
255         struct kvm_mmio_decode mmio_decode;
256 
257         /* CSR instruction details */
258         struct kvm_csr_decode csr_decode;
259 
260         /* SBI context */
261         struct kvm_vcpu_sbi_context sbi_context;
262 
263         /* AIA VCPU context */
264         struct kvm_vcpu_aia aia_context;
265 
266         /* Cache pages needed to program page tables with spinlock held */
267         struct kvm_mmu_memory_cache mmu_page_cache;
268 
269         /* VCPU power state */
270         struct kvm_mp_state mp_state;
271         spinlock_t mp_state_lock;
272 
273         /* Don't run the VCPU (blocked) */
274         bool pause;
275 
276         /* Performance monitoring context */
277         struct kvm_pmu pmu_context;
278 
279         /* 'static' configurations which are set only once */
280         struct kvm_vcpu_config cfg;
281 
282         /* SBI steal-time accounting */
283         struct {
284                 gpa_t shmem;
285                 u64 last_steal;
286         } sta;
287 };
288 
289 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
290 
291 #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER          12
292 
293 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
294                                           gpa_t gpa, gpa_t gpsz,
295                                           unsigned long order);
296 void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
297 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
298                                      unsigned long order);
299 void kvm_riscv_local_hfence_gvma_all(void);
300 void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
301                                           unsigned long asid,
302                                           unsigned long gva,
303                                           unsigned long gvsz,
304                                           unsigned long order);
305 void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
306                                           unsigned long asid);
307 void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
308                                      unsigned long gva, unsigned long gvsz,
309                                      unsigned long order);
310 void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
311 
312 void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
313 
314 void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
315 void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
316 void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
317 void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
318 
319 void kvm_riscv_fence_i(struct kvm *kvm,
320                        unsigned long hbase, unsigned long hmask);
321 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
322                                     unsigned long hbase, unsigned long hmask,
323                                     gpa_t gpa, gpa_t gpsz,
324                                     unsigned long order);
325 void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
326                                     unsigned long hbase, unsigned long hmask);
327 void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
328                                     unsigned long hbase, unsigned long hmask,
329                                     unsigned long gva, unsigned long gvsz,
330                                     unsigned long order, unsigned long asid);
331 void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
332                                     unsigned long hbase, unsigned long hmask,
333                                     unsigned long asid);
334 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
335                                unsigned long hbase, unsigned long hmask,
336                                unsigned long gva, unsigned long gvsz,
337                                unsigned long order);
338 void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
339                                unsigned long hbase, unsigned long hmask);
340 
341 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
342                              phys_addr_t hpa, unsigned long size,
343                              bool writable, bool in_atomic);
344 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
345                               unsigned long size);
346 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
347                          struct kvm_memory_slot *memslot,
348                          gpa_t gpa, unsigned long hva, bool is_write);
349 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
350 void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
351 void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
352 void __init kvm_riscv_gstage_mode_detect(void);
353 unsigned long __init kvm_riscv_gstage_mode(void);
354 int kvm_riscv_gstage_gpa_bits(void);
355 
356 void __init kvm_riscv_gstage_vmid_detect(void);
357 unsigned long kvm_riscv_gstage_vmid_bits(void);
358 int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
359 bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
360 void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
361 
362 int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
363 
364 void __kvm_riscv_unpriv_trap(void);
365 
366 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
367                                          bool read_insn,
368                                          unsigned long guest_addr,
369                                          struct kvm_cpu_trap *trap);
370 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
371                                   struct kvm_cpu_trap *trap);
372 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
373                         struct kvm_cpu_trap *trap);
374 
375 void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
376 
377 void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
378 unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
379 int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
380                                     u64 __user *uindices);
381 int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
382                            const struct kvm_one_reg *reg);
383 int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
384                            const struct kvm_one_reg *reg);
385 
386 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
387 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
388 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
389 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
390 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
391 void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
392 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
393 void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
394 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
395 bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
396 
397 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu);
398 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
399 
400 #endif /* __RISCV_KVM_HOST_H__ */
401 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php