~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/kvm/vcpu_sbi_replace.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
  4  *
  5  * Authors:
  6  *     Atish Patra <atish.patra@wdc.com>
  7  */
  8 
  9 #include <linux/errno.h>
 10 #include <linux/err.h>
 11 #include <linux/kvm_host.h>
 12 #include <asm/sbi.h>
 13 #include <asm/kvm_vcpu_timer.h>
 14 #include <asm/kvm_vcpu_pmu.h>
 15 #include <asm/kvm_vcpu_sbi.h>
 16 
 17 static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
 18                                     struct kvm_vcpu_sbi_return *retdata)
 19 {
 20         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
 21         u64 next_cycle;
 22 
 23         if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
 24                 retdata->err_val = SBI_ERR_INVALID_PARAM;
 25                 return 0;
 26         }
 27 
 28         kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_SET_TIMER);
 29 #if __riscv_xlen == 32
 30         next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
 31 #else
 32         next_cycle = (u64)cp->a0;
 33 #endif
 34         kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
 35 
 36         return 0;
 37 }
 38 
 39 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
 40         .extid_start = SBI_EXT_TIME,
 41         .extid_end = SBI_EXT_TIME,
 42         .handler = kvm_sbi_ext_time_handler,
 43 };
 44 
 45 static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
 46                                    struct kvm_vcpu_sbi_return *retdata)
 47 {
 48         int ret = 0;
 49         unsigned long i;
 50         struct kvm_vcpu *tmp;
 51         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
 52         unsigned long hmask = cp->a0;
 53         unsigned long hbase = cp->a1;
 54 
 55         if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
 56                 retdata->err_val = SBI_ERR_INVALID_PARAM;
 57                 return 0;
 58         }
 59 
 60         kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_IPI_SENT);
 61         kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
 62                 if (hbase != -1UL) {
 63                         if (tmp->vcpu_id < hbase)
 64                                 continue;
 65                         if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
 66                                 continue;
 67                 }
 68                 ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
 69                 if (ret < 0)
 70                         break;
 71                 kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
 72         }
 73 
 74         return ret;
 75 }
 76 
 77 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
 78         .extid_start = SBI_EXT_IPI,
 79         .extid_end = SBI_EXT_IPI,
 80         .handler = kvm_sbi_ext_ipi_handler,
 81 };
 82 
 83 static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
 84                                       struct kvm_vcpu_sbi_return *retdata)
 85 {
 86         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
 87         unsigned long hmask = cp->a0;
 88         unsigned long hbase = cp->a1;
 89         unsigned long funcid = cp->a6;
 90 
 91         switch (funcid) {
 92         case SBI_EXT_RFENCE_REMOTE_FENCE_I:
 93                 kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
 94                 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
 95                 break;
 96         case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
 97                 if (cp->a2 == 0 && cp->a3 == 0)
 98                         kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
 99                 else
100                         kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
101                                                   cp->a2, cp->a3, PAGE_SHIFT);
102                 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
103                 break;
104         case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
105                 if (cp->a2 == 0 && cp->a3 == 0)
106                         kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
107                                                        hbase, hmask, cp->a4);
108                 else
109                         kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
110                                                        hbase, hmask,
111                                                        cp->a2, cp->a3,
112                                                        PAGE_SHIFT, cp->a4);
113                 kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
114                 break;
115         case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
116         case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
117         case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
118         case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
119                 /*
120                  * Until nested virtualization is implemented, the
121                  * SBI HFENCE calls should be treated as NOPs
122                  */
123                 break;
124         default:
125                 retdata->err_val = SBI_ERR_NOT_SUPPORTED;
126         }
127 
128         return 0;
129 }
130 
131 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
132         .extid_start = SBI_EXT_RFENCE,
133         .extid_end = SBI_EXT_RFENCE,
134         .handler = kvm_sbi_ext_rfence_handler,
135 };
136 
137 static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu,
138                                     struct kvm_run *run,
139                                     struct kvm_vcpu_sbi_return *retdata)
140 {
141         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
142         unsigned long funcid = cp->a6;
143         u32 reason = cp->a1;
144         u32 type = cp->a0;
145 
146         switch (funcid) {
147         case SBI_EXT_SRST_RESET:
148                 switch (type) {
149                 case SBI_SRST_RESET_TYPE_SHUTDOWN:
150                         kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
151                                                 KVM_SYSTEM_EVENT_SHUTDOWN,
152                                                 reason);
153                         retdata->uexit = true;
154                         break;
155                 case SBI_SRST_RESET_TYPE_COLD_REBOOT:
156                 case SBI_SRST_RESET_TYPE_WARM_REBOOT:
157                         kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
158                                                 KVM_SYSTEM_EVENT_RESET,
159                                                 reason);
160                         retdata->uexit = true;
161                         break;
162                 default:
163                         retdata->err_val = SBI_ERR_NOT_SUPPORTED;
164                 }
165                 break;
166         default:
167                 retdata->err_val = SBI_ERR_NOT_SUPPORTED;
168         }
169 
170         return 0;
171 }
172 
173 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
174         .extid_start = SBI_EXT_SRST,
175         .extid_end = SBI_EXT_SRST,
176         .handler = kvm_sbi_ext_srst_handler,
177 };
178 
179 static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu *vcpu,
180                                     struct kvm_run *run,
181                                     struct kvm_vcpu_sbi_return *retdata)
182 {
183         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
184         unsigned long funcid = cp->a6;
185 
186         switch (funcid) {
187         case SBI_EXT_DBCN_CONSOLE_WRITE:
188         case SBI_EXT_DBCN_CONSOLE_READ:
189         case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
190                 /*
191                  * The SBI debug console functions are unconditionally
192                  * forwarded to the userspace.
193                  */
194                 kvm_riscv_vcpu_sbi_forward(vcpu, run);
195                 retdata->uexit = true;
196                 break;
197         default:
198                 retdata->err_val = SBI_ERR_NOT_SUPPORTED;
199         }
200 
201         return 0;
202 }
203 
204 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = {
205         .extid_start = SBI_EXT_DBCN,
206         .extid_end = SBI_EXT_DBCN,
207         .default_disabled = true,
208         .handler = kvm_sbi_ext_dbcn_handler,
209 };
210 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php