~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/kvm/vcpu_sbi.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  4  *
  5  * Authors:
  6  *     Atish Patra <atish.patra@wdc.com>
  7  */
  8 
  9 #include <linux/errno.h>
 10 #include <linux/err.h>
 11 #include <linux/kvm_host.h>
 12 #include <asm/sbi.h>
 13 #include <asm/kvm_vcpu_sbi.h>
 14 
 15 #ifndef CONFIG_RISCV_SBI_V01
 16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
 17         .extid_start = -1UL,
 18         .extid_end = -1UL,
 19         .handler = NULL,
 20 };
 21 #endif
 22 
 23 #ifndef CONFIG_RISCV_PMU_SBI
 24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
 25         .extid_start = -1UL,
 26         .extid_end = -1UL,
 27         .handler = NULL,
 28 };
 29 #endif
 30 
 31 struct kvm_riscv_sbi_extension_entry {
 32         enum KVM_RISCV_SBI_EXT_ID ext_idx;
 33         const struct kvm_vcpu_sbi_extension *ext_ptr;
 34 };
 35 
 36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
 37         {
 38                 .ext_idx = KVM_RISCV_SBI_EXT_V01,
 39                 .ext_ptr = &vcpu_sbi_ext_v01,
 40         },
 41         {
 42                 .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
 43                 .ext_ptr = &vcpu_sbi_ext_base,
 44         },
 45         {
 46                 .ext_idx = KVM_RISCV_SBI_EXT_TIME,
 47                 .ext_ptr = &vcpu_sbi_ext_time,
 48         },
 49         {
 50                 .ext_idx = KVM_RISCV_SBI_EXT_IPI,
 51                 .ext_ptr = &vcpu_sbi_ext_ipi,
 52         },
 53         {
 54                 .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
 55                 .ext_ptr = &vcpu_sbi_ext_rfence,
 56         },
 57         {
 58                 .ext_idx = KVM_RISCV_SBI_EXT_SRST,
 59                 .ext_ptr = &vcpu_sbi_ext_srst,
 60         },
 61         {
 62                 .ext_idx = KVM_RISCV_SBI_EXT_HSM,
 63                 .ext_ptr = &vcpu_sbi_ext_hsm,
 64         },
 65         {
 66                 .ext_idx = KVM_RISCV_SBI_EXT_PMU,
 67                 .ext_ptr = &vcpu_sbi_ext_pmu,
 68         },
 69         {
 70                 .ext_idx = KVM_RISCV_SBI_EXT_DBCN,
 71                 .ext_ptr = &vcpu_sbi_ext_dbcn,
 72         },
 73         {
 74                 .ext_idx = KVM_RISCV_SBI_EXT_STA,
 75                 .ext_ptr = &vcpu_sbi_ext_sta,
 76         },
 77         {
 78                 .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
 79                 .ext_ptr = &vcpu_sbi_ext_experimental,
 80         },
 81         {
 82                 .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
 83                 .ext_ptr = &vcpu_sbi_ext_vendor,
 84         },
 85 };
 86 
 87 static const struct kvm_riscv_sbi_extension_entry *
 88 riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
 89 {
 90         const struct kvm_riscv_sbi_extension_entry *sext = NULL;
 91 
 92         if (idx >= KVM_RISCV_SBI_EXT_MAX)
 93                 return NULL;
 94 
 95         for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
 96                 if (sbi_ext[i].ext_idx == idx) {
 97                         sext = &sbi_ext[i];
 98                         break;
 99                 }
100         }
101 
102         return sext;
103 }
104 
105 bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
106 {
107         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
108         const struct kvm_riscv_sbi_extension_entry *sext;
109 
110         sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
111 
112         return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
113 }
114 
115 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
116 {
117         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
118 
119         vcpu->arch.sbi_context.return_handled = 0;
120         vcpu->stat.ecall_exit_stat++;
121         run->exit_reason = KVM_EXIT_RISCV_SBI;
122         run->riscv_sbi.extension_id = cp->a7;
123         run->riscv_sbi.function_id = cp->a6;
124         run->riscv_sbi.args[0] = cp->a0;
125         run->riscv_sbi.args[1] = cp->a1;
126         run->riscv_sbi.args[2] = cp->a2;
127         run->riscv_sbi.args[3] = cp->a3;
128         run->riscv_sbi.args[4] = cp->a4;
129         run->riscv_sbi.args[5] = cp->a5;
130         run->riscv_sbi.ret[0] = cp->a0;
131         run->riscv_sbi.ret[1] = cp->a1;
132 }
133 
134 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
135                                      struct kvm_run *run,
136                                      u32 type, u64 reason)
137 {
138         unsigned long i;
139         struct kvm_vcpu *tmp;
140 
141         kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
142                 spin_lock(&vcpu->arch.mp_state_lock);
143                 WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
144                 spin_unlock(&vcpu->arch.mp_state_lock);
145         }
146         kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
147 
148         memset(&run->system_event, 0, sizeof(run->system_event));
149         run->system_event.type = type;
150         run->system_event.ndata = 1;
151         run->system_event.data[0] = reason;
152         run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
153 }
154 
155 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
156 {
157         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
158 
159         /* Handle SBI return only once */
160         if (vcpu->arch.sbi_context.return_handled)
161                 return 0;
162         vcpu->arch.sbi_context.return_handled = 1;
163 
164         /* Update return values */
165         cp->a0 = run->riscv_sbi.ret[0];
166         cp->a1 = run->riscv_sbi.ret[1];
167 
168         /* Move to next instruction */
169         vcpu->arch.guest_context.sepc += 4;
170 
171         return 0;
172 }
173 
174 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
175                                          unsigned long reg_num,
176                                          unsigned long reg_val)
177 {
178         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
179         const struct kvm_riscv_sbi_extension_entry *sext;
180 
181         if (reg_val != 1 && reg_val != 0)
182                 return -EINVAL;
183 
184         sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
185         if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
186                 return -ENOENT;
187 
188         scontext->ext_status[sext->ext_idx] = (reg_val) ?
189                         KVM_RISCV_SBI_EXT_STATUS_ENABLED :
190                         KVM_RISCV_SBI_EXT_STATUS_DISABLED;
191 
192         return 0;
193 }
194 
195 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
196                                          unsigned long reg_num,
197                                          unsigned long *reg_val)
198 {
199         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
200         const struct kvm_riscv_sbi_extension_entry *sext;
201 
202         sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
203         if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
204                 return -ENOENT;
205 
206         *reg_val = scontext->ext_status[sext->ext_idx] ==
207                                 KVM_RISCV_SBI_EXT_STATUS_ENABLED;
208 
209         return 0;
210 }
211 
212 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
213                                         unsigned long reg_num,
214                                         unsigned long reg_val, bool enable)
215 {
216         unsigned long i, ext_id;
217 
218         if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
219                 return -ENOENT;
220 
221         for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
222                 ext_id = i + reg_num * BITS_PER_LONG;
223                 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
224                         break;
225 
226                 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
227         }
228 
229         return 0;
230 }
231 
232 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
233                                         unsigned long reg_num,
234                                         unsigned long *reg_val)
235 {
236         unsigned long i, ext_id, ext_val;
237 
238         if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
239                 return -ENOENT;
240 
241         for (i = 0; i < BITS_PER_LONG; i++) {
242                 ext_id = i + reg_num * BITS_PER_LONG;
243                 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
244                         break;
245 
246                 ext_val = 0;
247                 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
248                 if (ext_val)
249                         *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
250         }
251 
252         return 0;
253 }
254 
255 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
256                                    const struct kvm_one_reg *reg)
257 {
258         unsigned long __user *uaddr =
259                         (unsigned long __user *)(unsigned long)reg->addr;
260         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
261                                             KVM_REG_SIZE_MASK |
262                                             KVM_REG_RISCV_SBI_EXT);
263         unsigned long reg_val, reg_subtype;
264 
265         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
266                 return -EINVAL;
267 
268         if (vcpu->arch.ran_atleast_once)
269                 return -EBUSY;
270 
271         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
272         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
273 
274         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
275                 return -EFAULT;
276 
277         switch (reg_subtype) {
278         case KVM_REG_RISCV_SBI_SINGLE:
279                 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
280         case KVM_REG_RISCV_SBI_MULTI_EN:
281                 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
282         case KVM_REG_RISCV_SBI_MULTI_DIS:
283                 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
284         default:
285                 return -ENOENT;
286         }
287 
288         return 0;
289 }
290 
291 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
292                                    const struct kvm_one_reg *reg)
293 {
294         int rc;
295         unsigned long __user *uaddr =
296                         (unsigned long __user *)(unsigned long)reg->addr;
297         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
298                                             KVM_REG_SIZE_MASK |
299                                             KVM_REG_RISCV_SBI_EXT);
300         unsigned long reg_val, reg_subtype;
301 
302         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
303                 return -EINVAL;
304 
305         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
306         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
307 
308         reg_val = 0;
309         switch (reg_subtype) {
310         case KVM_REG_RISCV_SBI_SINGLE:
311                 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
312                 break;
313         case KVM_REG_RISCV_SBI_MULTI_EN:
314         case KVM_REG_RISCV_SBI_MULTI_DIS:
315                 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
316                 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
317                         reg_val = ~reg_val;
318                 break;
319         default:
320                 rc = -ENOENT;
321         }
322         if (rc)
323                 return rc;
324 
325         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
326                 return -EFAULT;
327 
328         return 0;
329 }
330 
331 int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
332                                const struct kvm_one_reg *reg)
333 {
334         unsigned long __user *uaddr =
335                         (unsigned long __user *)(unsigned long)reg->addr;
336         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
337                                             KVM_REG_SIZE_MASK |
338                                             KVM_REG_RISCV_SBI_STATE);
339         unsigned long reg_subtype, reg_val;
340 
341         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
342                 return -EINVAL;
343 
344         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
345                 return -EFAULT;
346 
347         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
348         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
349 
350         switch (reg_subtype) {
351         case KVM_REG_RISCV_SBI_STA:
352                 return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
353         default:
354                 return -EINVAL;
355         }
356 
357         return 0;
358 }
359 
360 int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
361                                const struct kvm_one_reg *reg)
362 {
363         unsigned long __user *uaddr =
364                         (unsigned long __user *)(unsigned long)reg->addr;
365         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
366                                             KVM_REG_SIZE_MASK |
367                                             KVM_REG_RISCV_SBI_STATE);
368         unsigned long reg_subtype, reg_val;
369         int ret;
370 
371         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
372                 return -EINVAL;
373 
374         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
375         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
376 
377         switch (reg_subtype) {
378         case KVM_REG_RISCV_SBI_STA:
379                 ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
380                 break;
381         default:
382                 return -EINVAL;
383         }
384 
385         if (ret)
386                 return ret;
387 
388         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
389                 return -EFAULT;
390 
391         return 0;
392 }
393 
394 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
395                                 struct kvm_vcpu *vcpu, unsigned long extid)
396 {
397         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
398         const struct kvm_riscv_sbi_extension_entry *entry;
399         const struct kvm_vcpu_sbi_extension *ext;
400         int i;
401 
402         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
403                 entry = &sbi_ext[i];
404                 ext = entry->ext_ptr;
405 
406                 if (ext->extid_start <= extid && ext->extid_end >= extid) {
407                         if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
408                             scontext->ext_status[entry->ext_idx] ==
409                                                 KVM_RISCV_SBI_EXT_STATUS_ENABLED)
410                                 return ext;
411 
412                         return NULL;
413                 }
414         }
415 
416         return NULL;
417 }
418 
419 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
420 {
421         int ret = 1;
422         bool next_sepc = true;
423         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
424         const struct kvm_vcpu_sbi_extension *sbi_ext;
425         struct kvm_cpu_trap utrap = {0};
426         struct kvm_vcpu_sbi_return sbi_ret = {
427                 .out_val = 0,
428                 .err_val = 0,
429                 .utrap = &utrap,
430         };
431         bool ext_is_v01 = false;
432 
433         sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
434         if (sbi_ext && sbi_ext->handler) {
435 #ifdef CONFIG_RISCV_SBI_V01
436                 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
437                     cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
438                         ext_is_v01 = true;
439 #endif
440                 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
441         } else {
442                 /* Return error for unsupported SBI calls */
443                 cp->a0 = SBI_ERR_NOT_SUPPORTED;
444                 goto ecall_done;
445         }
446 
447         /*
448          * When the SBI extension returns a Linux error code, it exits the ioctl
449          * loop and forwards the error to userspace.
450          */
451         if (ret < 0) {
452                 next_sepc = false;
453                 goto ecall_done;
454         }
455 
456         /* Handle special error cases i.e trap, exit or userspace forward */
457         if (sbi_ret.utrap->scause) {
458                 /* No need to increment sepc or exit ioctl loop */
459                 ret = 1;
460                 sbi_ret.utrap->sepc = cp->sepc;
461                 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
462                 next_sepc = false;
463                 goto ecall_done;
464         }
465 
466         /* Exit ioctl loop or Propagate the error code the guest */
467         if (sbi_ret.uexit) {
468                 next_sepc = false;
469                 ret = 0;
470         } else {
471                 cp->a0 = sbi_ret.err_val;
472                 ret = 1;
473         }
474 ecall_done:
475         if (next_sepc)
476                 cp->sepc += 4;
477         /* a1 should only be updated when we continue the ioctl loop */
478         if (!ext_is_v01 && ret == 1)
479                 cp->a1 = sbi_ret.out_val;
480 
481         return ret;
482 }
483 
484 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
485 {
486         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
487         const struct kvm_riscv_sbi_extension_entry *entry;
488         const struct kvm_vcpu_sbi_extension *ext;
489         int i;
490 
491         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
492                 entry = &sbi_ext[i];
493                 ext = entry->ext_ptr;
494 
495                 if (ext->probe && !ext->probe(vcpu)) {
496                         scontext->ext_status[entry->ext_idx] =
497                                 KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
498                         continue;
499                 }
500 
501                 scontext->ext_status[entry->ext_idx] = ext->default_disabled ?
502                                         KVM_RISCV_SBI_EXT_STATUS_DISABLED :
503                                         KVM_RISCV_SBI_EXT_STATUS_ENABLED;
504         }
505 }
506 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php