~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kvm/emulate_loadstore.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  *
  4  * Copyright IBM Corp. 2007
  5  * Copyright 2011 Freescale Semiconductor, Inc.
  6  *
  7  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  8  */
  9 
 10 #include <linux/jiffies.h>
 11 #include <linux/hrtimer.h>
 12 #include <linux/types.h>
 13 #include <linux/string.h>
 14 #include <linux/kvm_host.h>
 15 #include <linux/clockchips.h>
 16 
 17 #include <asm/reg.h>
 18 #include <asm/time.h>
 19 #include <asm/byteorder.h>
 20 #include <asm/kvm_ppc.h>
 21 #include <asm/disassemble.h>
 22 #include <asm/ppc-opcode.h>
 23 #include <asm/sstep.h>
 24 #include "timing.h"
 25 #include "trace.h"
 26 
 27 #ifdef CONFIG_PPC_FPU
 28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
 29 {
 30         if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
 31                 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
 32                 return true;
 33         }
 34 
 35         return false;
 36 }
 37 #endif /* CONFIG_PPC_FPU */
 38 
 39 #ifdef CONFIG_VSX
 40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
 41 {
 42         if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
 43                 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
 44                 return true;
 45         }
 46 
 47         return false;
 48 }
 49 #endif /* CONFIG_VSX */
 50 
 51 #ifdef CONFIG_ALTIVEC
 52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
 53 {
 54         if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
 55                 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
 56                 return true;
 57         }
 58 
 59         return false;
 60 }
 61 #endif /* CONFIG_ALTIVEC */
 62 
 63 /*
 64  * XXX to do:
 65  * lfiwax, lfiwzx
 66  * vector loads and stores
 67  *
 68  * Instructions that trap when used on cache-inhibited mappings
 69  * are not emulated here: multiple and string instructions,
 70  * lq/stq, and the load-reserve/store-conditional instructions.
 71  */
 72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
 73 {
 74         ppc_inst_t inst;
 75         enum emulation_result emulated = EMULATE_FAIL;
 76         struct instruction_op op;
 77 
 78         /* this default type might be overwritten by subcategories */
 79         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
 80 
 81         emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
 82         if (emulated != EMULATE_DONE)
 83                 return emulated;
 84 
 85         vcpu->arch.mmio_vsx_copy_nums = 0;
 86         vcpu->arch.mmio_vsx_offset = 0;
 87         vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
 88         vcpu->arch.mmio_sp64_extend = 0;
 89         vcpu->arch.mmio_sign_extend = 0;
 90         vcpu->arch.mmio_vmx_copy_nums = 0;
 91         vcpu->arch.mmio_vmx_offset = 0;
 92         vcpu->arch.mmio_host_swabbed = 0;
 93 
 94         emulated = EMULATE_FAIL;
 95         vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
 96         if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
 97                 int type = op.type & INSTR_TYPE_MASK;
 98                 int size = GETSIZE(op.type);
 99 
100                 vcpu->mmio_is_write = OP_IS_STORE(type);
101 
102                 switch (type) {
103                 case LOAD:  {
104                         int instr_byte_swap = op.type & BYTEREV;
105 
106                         if (op.type & SIGNEXT)
107                                 emulated = kvmppc_handle_loads(vcpu,
108                                                 op.reg, size, !instr_byte_swap);
109                         else
110                                 emulated = kvmppc_handle_load(vcpu,
111                                                 op.reg, size, !instr_byte_swap);
112 
113                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
114                                 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
115 
116                         break;
117                 }
118 #ifdef CONFIG_PPC_FPU
119                 case LOAD_FP:
120                         if (kvmppc_check_fp_disabled(vcpu))
121                                 return EMULATE_DONE;
122 
123                         if (op.type & FPCONV)
124                                 vcpu->arch.mmio_sp64_extend = 1;
125 
126                         if (op.type & SIGNEXT)
127                                 emulated = kvmppc_handle_loads(vcpu,
128                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
129                         else
130                                 emulated = kvmppc_handle_load(vcpu,
131                                              KVM_MMIO_REG_FPR|op.reg, size, 1);
132 
133                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
134                                 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
135 
136                         break;
137 #endif
138 #ifdef CONFIG_ALTIVEC
139                 case LOAD_VMX:
140                         if (kvmppc_check_altivec_disabled(vcpu))
141                                 return EMULATE_DONE;
142 
143                         /* Hardware enforces alignment of VMX accesses */
144                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
145                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
146 
147                         if (size == 16) { /* lvx */
148                                 vcpu->arch.mmio_copy_type =
149                                                 KVMPPC_VMX_COPY_DWORD;
150                         } else if (size == 4) { /* lvewx  */
151                                 vcpu->arch.mmio_copy_type =
152                                                 KVMPPC_VMX_COPY_WORD;
153                         } else if (size == 2) { /* lvehx  */
154                                 vcpu->arch.mmio_copy_type =
155                                                 KVMPPC_VMX_COPY_HWORD;
156                         } else if (size == 1) { /* lvebx  */
157                                 vcpu->arch.mmio_copy_type =
158                                                 KVMPPC_VMX_COPY_BYTE;
159                         } else
160                                 break;
161 
162                         vcpu->arch.mmio_vmx_offset =
163                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
164 
165                         if (size == 16) {
166                                 vcpu->arch.mmio_vmx_copy_nums = 2;
167                                 emulated = kvmppc_handle_vmx_load(vcpu,
168                                                 KVM_MMIO_REG_VMX|op.reg,
169                                                 8, 1);
170                         } else {
171                                 vcpu->arch.mmio_vmx_copy_nums = 1;
172                                 emulated = kvmppc_handle_vmx_load(vcpu,
173                                                 KVM_MMIO_REG_VMX|op.reg,
174                                                 size, 1);
175                         }
176                         break;
177 #endif
178 #ifdef CONFIG_VSX
179                 case LOAD_VSX: {
180                         int io_size_each;
181 
182                         if (op.vsx_flags & VSX_CHECK_VEC) {
183                                 if (kvmppc_check_altivec_disabled(vcpu))
184                                         return EMULATE_DONE;
185                         } else {
186                                 if (kvmppc_check_vsx_disabled(vcpu))
187                                         return EMULATE_DONE;
188                         }
189 
190                         if (op.vsx_flags & VSX_FPCONV)
191                                 vcpu->arch.mmio_sp64_extend = 1;
192 
193                         if (op.element_size == 8)  {
194                                 if (op.vsx_flags & VSX_SPLAT)
195                                         vcpu->arch.mmio_copy_type =
196                                                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
197                                 else
198                                         vcpu->arch.mmio_copy_type =
199                                                 KVMPPC_VSX_COPY_DWORD;
200                         } else if (op.element_size == 4) {
201                                 if (op.vsx_flags & VSX_SPLAT)
202                                         vcpu->arch.mmio_copy_type =
203                                                 KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
204                                 else
205                                         vcpu->arch.mmio_copy_type =
206                                                 KVMPPC_VSX_COPY_WORD;
207                         } else
208                                 break;
209 
210                         if (size < op.element_size) {
211                                 /* precision convert case: lxsspx, etc */
212                                 vcpu->arch.mmio_vsx_copy_nums = 1;
213                                 io_size_each = size;
214                         } else { /* lxvw4x, lxvd2x, etc */
215                                 vcpu->arch.mmio_vsx_copy_nums =
216                                         size/op.element_size;
217                                 io_size_each = op.element_size;
218                         }
219 
220                         emulated = kvmppc_handle_vsx_load(vcpu,
221                                         KVM_MMIO_REG_VSX|op.reg, io_size_each,
222                                         1, op.type & SIGNEXT);
223                         break;
224                 }
225 #endif
226                 case STORE: {
227                         int instr_byte_swap = op.type & BYTEREV;
228 
229                         emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg),
230                                                        size, !instr_byte_swap);
231 
232                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
233                                 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
234 
235                         break;
236                 }
237 #ifdef CONFIG_PPC_FPU
238                 case STORE_FP:
239                         if (kvmppc_check_fp_disabled(vcpu))
240                                 return EMULATE_DONE;
241 
242                         /* The FP registers need to be flushed so that
243                          * kvmppc_handle_store() can read actual FP vals
244                          * from vcpu->arch.
245                          */
246                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
247                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
248                                                 MSR_FP);
249 
250                         if (op.type & FPCONV)
251                                 vcpu->arch.mmio_sp64_extend = 1;
252 
253                         emulated = kvmppc_handle_store(vcpu,
254                                         kvmppc_get_fpr(vcpu, op.reg), size, 1);
255 
256                         if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
257                                 kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
258 
259                         break;
260 #endif
261 #ifdef CONFIG_ALTIVEC
262                 case STORE_VMX:
263                         if (kvmppc_check_altivec_disabled(vcpu))
264                                 return EMULATE_DONE;
265 
266                         /* Hardware enforces alignment of VMX accesses. */
267                         vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
268                         vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
269 
270                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
271                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
272                                                 MSR_VEC);
273                         if (size == 16) { /* stvx */
274                                 vcpu->arch.mmio_copy_type =
275                                                 KVMPPC_VMX_COPY_DWORD;
276                         } else if (size == 4) { /* stvewx  */
277                                 vcpu->arch.mmio_copy_type =
278                                                 KVMPPC_VMX_COPY_WORD;
279                         } else if (size == 2) { /* stvehx  */
280                                 vcpu->arch.mmio_copy_type =
281                                                 KVMPPC_VMX_COPY_HWORD;
282                         } else if (size == 1) { /* stvebx  */
283                                 vcpu->arch.mmio_copy_type =
284                                                 KVMPPC_VMX_COPY_BYTE;
285                         } else
286                                 break;
287 
288                         vcpu->arch.mmio_vmx_offset =
289                                 (vcpu->arch.vaddr_accessed & 0xf)/size;
290 
291                         if (size == 16) {
292                                 vcpu->arch.mmio_vmx_copy_nums = 2;
293                                 emulated = kvmppc_handle_vmx_store(vcpu,
294                                                 op.reg, 8, 1);
295                         } else {
296                                 vcpu->arch.mmio_vmx_copy_nums = 1;
297                                 emulated = kvmppc_handle_vmx_store(vcpu,
298                                                 op.reg, size, 1);
299                         }
300 
301                         break;
302 #endif
303 #ifdef CONFIG_VSX
304                 case STORE_VSX: {
305                         int io_size_each;
306 
307                         if (op.vsx_flags & VSX_CHECK_VEC) {
308                                 if (kvmppc_check_altivec_disabled(vcpu))
309                                         return EMULATE_DONE;
310                         } else {
311                                 if (kvmppc_check_vsx_disabled(vcpu))
312                                         return EMULATE_DONE;
313                         }
314 
315                         if (vcpu->kvm->arch.kvm_ops->giveup_ext)
316                                 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
317                                                 MSR_VSX);
318 
319                         if (op.vsx_flags & VSX_FPCONV)
320                                 vcpu->arch.mmio_sp64_extend = 1;
321 
322                         if (op.element_size == 8)
323                                 vcpu->arch.mmio_copy_type =
324                                                 KVMPPC_VSX_COPY_DWORD;
325                         else if (op.element_size == 4)
326                                 vcpu->arch.mmio_copy_type =
327                                                 KVMPPC_VSX_COPY_WORD;
328                         else
329                                 break;
330 
331                         if (size < op.element_size) {
332                                 /* precise conversion case, like stxsspx */
333                                 vcpu->arch.mmio_vsx_copy_nums = 1;
334                                 io_size_each = size;
335                         } else { /* stxvw4x, stxvd2x, etc */
336                                 vcpu->arch.mmio_vsx_copy_nums =
337                                                 size/op.element_size;
338                                 io_size_each = op.element_size;
339                         }
340 
341                         emulated = kvmppc_handle_vsx_store(vcpu,
342                                         op.reg, io_size_each, 1);
343                         break;
344                 }
345 #endif
346                 case CACHEOP:
347                         /* Do nothing. The guest is performing dcbi because
348                          * hardware DMA is not snooped by the dcache, but
349                          * emulated DMA either goes through the dcache as
350                          * normal writes, or the host kernel has handled dcache
351                          * coherence.
352                          */
353                         emulated = EMULATE_DONE;
354                         break;
355                 default:
356                         break;
357                 }
358         }
359 
360         trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
361 
362         /* Advance past emulated instruction. */
363         if (emulated != EMULATE_FAIL)
364                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));
365 
366         return emulated;
367 }
368 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php