~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/kvm/vcpu_vector.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 2022 SiFive
  4  *
  5  * Authors:
  6  *     Vincent Chen <vincent.chen@sifive.com>
  7  *     Greentime Hu <greentime.hu@sifive.com>
  8  */
  9 
 10 #include <linux/errno.h>
 11 #include <linux/err.h>
 12 #include <linux/kvm_host.h>
 13 #include <linux/uaccess.h>
 14 #include <asm/cpufeature.h>
 15 #include <asm/kvm_vcpu_vector.h>
 16 #include <asm/vector.h>
 17 
 18 #ifdef CONFIG_RISCV_ISA_V
 19 void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
 20 {
 21         unsigned long *isa = vcpu->arch.isa;
 22         struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
 23 
 24         cntx->sstatus &= ~SR_VS;
 25         if (riscv_isa_extension_available(isa, v)) {
 26                 cntx->sstatus |= SR_VS_INITIAL;
 27                 WARN_ON(!cntx->vector.datap);
 28                 memset(cntx->vector.datap, 0, riscv_v_vsize);
 29         } else {
 30                 cntx->sstatus |= SR_VS_OFF;
 31         }
 32 }
 33 
 34 static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx)
 35 {
 36         cntx->sstatus &= ~SR_VS;
 37         cntx->sstatus |= SR_VS_CLEAN;
 38 }
 39 
 40 void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
 41                                       unsigned long *isa)
 42 {
 43         if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) {
 44                 if (riscv_isa_extension_available(isa, v))
 45                         __kvm_riscv_vector_save(cntx);
 46                 kvm_riscv_vcpu_vector_clean(cntx);
 47         }
 48 }
 49 
 50 void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
 51                                          unsigned long *isa)
 52 {
 53         if ((cntx->sstatus & SR_VS) != SR_VS_OFF) {
 54                 if (riscv_isa_extension_available(isa, v))
 55                         __kvm_riscv_vector_restore(cntx);
 56                 kvm_riscv_vcpu_vector_clean(cntx);
 57         }
 58 }
 59 
 60 void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
 61 {
 62         /* No need to check host sstatus as it can be modified outside */
 63         if (riscv_isa_extension_available(NULL, v))
 64                 __kvm_riscv_vector_save(cntx);
 65 }
 66 
 67 void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
 68 {
 69         if (riscv_isa_extension_available(NULL, v))
 70                 __kvm_riscv_vector_restore(cntx);
 71 }
 72 
 73 int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
 74                                         struct kvm_cpu_context *cntx)
 75 {
 76         cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
 77         if (!cntx->vector.datap)
 78                 return -ENOMEM;
 79         cntx->vector.vlenb = riscv_v_vsize / 32;
 80 
 81         vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
 82         if (!vcpu->arch.host_context.vector.datap)
 83                 return -ENOMEM;
 84 
 85         return 0;
 86 }
 87 
 88 void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
 89 {
 90         kfree(vcpu->arch.guest_reset_context.vector.datap);
 91         kfree(vcpu->arch.host_context.vector.datap);
 92 }
 93 #endif
 94 
 95 static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
 96                                     unsigned long reg_num,
 97                                     size_t reg_size,
 98                                     void **reg_addr)
 99 {
100         struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
101         size_t vlenb = riscv_v_vsize / 32;
102 
103         if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
104                 if (reg_size != sizeof(unsigned long))
105                         return -EINVAL;
106                 switch (reg_num) {
107                 case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
108                         *reg_addr = &cntx->vector.vstart;
109                         break;
110                 case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
111                         *reg_addr = &cntx->vector.vl;
112                         break;
113                 case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
114                         *reg_addr = &cntx->vector.vtype;
115                         break;
116                 case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
117                         *reg_addr = &cntx->vector.vcsr;
118                         break;
119                 case KVM_REG_RISCV_VECTOR_CSR_REG(vlenb):
120                         *reg_addr = &cntx->vector.vlenb;
121                         break;
122                 case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
123                 default:
124                         return -ENOENT;
125                 }
126         } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
127                 if (reg_size != vlenb)
128                         return -EINVAL;
129                 *reg_addr = cntx->vector.datap +
130                             (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
131         } else {
132                 return -ENOENT;
133         }
134 
135         return 0;
136 }
137 
138 int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
139                                   const struct kvm_one_reg *reg)
140 {
141         unsigned long *isa = vcpu->arch.isa;
142         unsigned long __user *uaddr =
143                         (unsigned long __user *)(unsigned long)reg->addr;
144         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
145                                             KVM_REG_SIZE_MASK |
146                                             KVM_REG_RISCV_VECTOR);
147         size_t reg_size = KVM_REG_SIZE(reg->id);
148         void *reg_addr;
149         int rc;
150 
151         if (!riscv_isa_extension_available(isa, v))
152                 return -ENOENT;
153 
154         rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
155         if (rc)
156                 return rc;
157 
158         if (copy_to_user(uaddr, reg_addr, reg_size))
159                 return -EFAULT;
160 
161         return 0;
162 }
163 
164 int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
165                                   const struct kvm_one_reg *reg)
166 {
167         unsigned long *isa = vcpu->arch.isa;
168         unsigned long __user *uaddr =
169                         (unsigned long __user *)(unsigned long)reg->addr;
170         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
171                                             KVM_REG_SIZE_MASK |
172                                             KVM_REG_RISCV_VECTOR);
173         size_t reg_size = KVM_REG_SIZE(reg->id);
174         void *reg_addr;
175         int rc;
176 
177         if (!riscv_isa_extension_available(isa, v))
178                 return -ENOENT;
179 
180         if (reg_num == KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)) {
181                 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
182                 unsigned long reg_val;
183 
184                 if (copy_from_user(&reg_val, uaddr, reg_size))
185                         return -EFAULT;
186                 if (reg_val != cntx->vector.vlenb)
187                         return -EINVAL;
188 
189                 return 0;
190         }
191 
192         rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
193         if (rc)
194                 return rc;
195 
196         if (copy_from_user(reg_addr, uaddr, reg_size))
197                 return -EFAULT;
198 
199         return 0;
200 }
201 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php