~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/kvm_emul.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  *
  4  * Copyright SUSE Linux Products GmbH 2010
  5  * Copyright 2010-2011 Freescale Semiconductor, Inc.
  6  *
  7  * Authors: Alexander Graf <agraf@suse.de>
  8  */
  9 
 10 #include <asm/ppc_asm.h>
 11 #include <asm/kvm_asm.h>
 12 #include <asm/reg.h>
 13 #include <asm/page.h>
 14 #include <asm/asm-offsets.h>
 15 #include <asm/asm-compat.h>
 16 
 17 #define KVM_MAGIC_PAGE          (-4096)
 18 
 19 #ifdef CONFIG_64BIT
 20 #define LL64(reg, offs, reg2)   ld      reg, (offs)(reg2)
 21 #define STL64(reg, offs, reg2)  std     reg, (offs)(reg2)
 22 #else
 23 #define LL64(reg, offs, reg2)   lwz     reg, (offs + 4)(reg2)
 24 #define STL64(reg, offs, reg2)  stw     reg, (offs + 4)(reg2)
 25 #endif
 26 
 27 #define SCRATCH_SAVE                                                    \
 28         /* Enable critical section. We are critical if                  \
 29            shared->critical == r1 */                                    \
 30         STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);              \
 31                                                                         \
 32         /* Save state */                                                \
 33         PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
 34         PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
 35         mfcr    r31;                                                    \
 36         stw     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
 37 
 38 #define SCRATCH_RESTORE                                                 \
 39         /* Restore state */                                             \
 40         PPC_LL  r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
 41         lwz     r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);          \
 42         mtcr    r30;                                                    \
 43         PPC_LL  r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
 44                                                                         \
 45         /* Disable critical section. We are critical if                 \
 46            shared->critical == r1 and r2 is always != r1 */             \
 47         STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
 48 
 49 .global kvm_template_start
 50 kvm_template_start:
 51 
 52 .global kvm_emulate_mtmsrd
 53 kvm_emulate_mtmsrd:
 54 
 55         SCRATCH_SAVE
 56 
 57         /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
 58         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 59         lis     r30, (~(MSR_EE | MSR_RI))@h
 60         ori     r30, r30, (~(MSR_EE | MSR_RI))@l
 61         and     r31, r31, r30
 62 
 63         /* OR the register's (MSR_EE|MSR_RI) on MSR */
 64 kvm_emulate_mtmsrd_reg:
 65         ori     r30, r0, 0
 66         andi.   r30, r30, (MSR_EE|MSR_RI)
 67         or      r31, r31, r30
 68 
 69         /* Put MSR back into magic page */
 70         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 71 
 72         /* Check if we have to fetch an interrupt */
 73         lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
 74         cmpwi   r31, 0
 75         beq+    no_check
 76 
 77         /* Check if we may trigger an interrupt */
 78         andi.   r30, r30, MSR_EE
 79         beq     no_check
 80 
 81         SCRATCH_RESTORE
 82 
 83         /* Nag hypervisor */
 84 kvm_emulate_mtmsrd_orig_ins:
 85         tlbsync
 86 
 87         b       kvm_emulate_mtmsrd_branch
 88 
 89 no_check:
 90 
 91         SCRATCH_RESTORE
 92 
 93         /* Go back to caller */
 94 kvm_emulate_mtmsrd_branch:
 95         b       .
 96 kvm_emulate_mtmsrd_end:
 97 
 98 .global kvm_emulate_mtmsrd_branch_offs
 99 kvm_emulate_mtmsrd_branch_offs:
100         .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
101 
102 .global kvm_emulate_mtmsrd_reg_offs
103 kvm_emulate_mtmsrd_reg_offs:
104         .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
105 
106 .global kvm_emulate_mtmsrd_orig_ins_offs
107 kvm_emulate_mtmsrd_orig_ins_offs:
108         .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
109 
110 .global kvm_emulate_mtmsrd_len
111 kvm_emulate_mtmsrd_len:
112         .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
113 
114 
115 #define MSR_SAFE_BITS (MSR_EE | MSR_RI)
116 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
117 
118 .global kvm_emulate_mtmsr
119 kvm_emulate_mtmsr:
120 
121         SCRATCH_SAVE
122 
123         /* Fetch old MSR in r31 */
124         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
125 
126         /* Find the changed bits between old and new MSR */
127 kvm_emulate_mtmsr_reg1:
128         ori     r30, r0, 0
129         xor     r31, r30, r31
130 
131         /* Check if we need to really do mtmsr */
132         LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
133         and.    r31, r31, r30
134 
135         /* No critical bits changed? Maybe we can stay in the guest. */
136         beq     maybe_stay_in_guest
137 
138 do_mtmsr:
139 
140         SCRATCH_RESTORE
141 
142         /* Just fire off the mtmsr if it's critical */
143 kvm_emulate_mtmsr_orig_ins:
144         mtmsr   r0
145 
146         b       kvm_emulate_mtmsr_branch
147 
148 maybe_stay_in_guest:
149 
150         /* Get the target register in r30 */
151 kvm_emulate_mtmsr_reg2:
152         ori     r30, r0, 0
153 
154         /* Put MSR into magic page because we don't call mtmsr */
155         STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
156 
157         /* Check if we have to fetch an interrupt */
158         lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
159         cmpwi   r31, 0
160         beq+    no_mtmsr
161 
162         /* Check if we may trigger an interrupt */
163         andi.   r31, r30, MSR_EE
164         bne     do_mtmsr
165 
166 no_mtmsr:
167 
168         SCRATCH_RESTORE
169 
170         /* Go back to caller */
171 kvm_emulate_mtmsr_branch:
172         b       .
173 kvm_emulate_mtmsr_end:
174 
175 .global kvm_emulate_mtmsr_branch_offs
176 kvm_emulate_mtmsr_branch_offs:
177         .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
178 
179 .global kvm_emulate_mtmsr_reg1_offs
180 kvm_emulate_mtmsr_reg1_offs:
181         .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
182 
183 .global kvm_emulate_mtmsr_reg2_offs
184 kvm_emulate_mtmsr_reg2_offs:
185         .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
186 
187 .global kvm_emulate_mtmsr_orig_ins_offs
188 kvm_emulate_mtmsr_orig_ins_offs:
189         .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
190 
191 .global kvm_emulate_mtmsr_len
192 kvm_emulate_mtmsr_len:
193         .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
194 
195 #ifdef CONFIG_BOOKE
196 
197 /* also used for wrteei 1 */
198 .global kvm_emulate_wrtee
199 kvm_emulate_wrtee:
200 
201         SCRATCH_SAVE
202 
203         /* Fetch old MSR in r31 */
204         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
205 
206         /* Insert new MSR[EE] */
207 kvm_emulate_wrtee_reg:
208         ori     r30, r0, 0
209         rlwimi  r31, r30, 0, MSR_EE
210 
211         /*
212          * If MSR[EE] is now set, check for a pending interrupt.
213          * We could skip this if MSR[EE] was already on, but that
214          * should be rare, so don't bother.
215          */
216         andi.   r30, r30, MSR_EE
217 
218         /* Put MSR into magic page because we don't call wrtee */
219         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
220 
221         beq     no_wrtee
222 
223         /* Check if we have to fetch an interrupt */
224         lwz     r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
225         cmpwi   r30, 0
226         bne     do_wrtee
227 
228 no_wrtee:
229         SCRATCH_RESTORE
230 
231         /* Go back to caller */
232 kvm_emulate_wrtee_branch:
233         b       .
234 
235 do_wrtee:
236         SCRATCH_RESTORE
237 
238         /* Just fire off the wrtee if it's critical */
239 kvm_emulate_wrtee_orig_ins:
240         wrtee   r0
241 
242         b       kvm_emulate_wrtee_branch
243 
244 kvm_emulate_wrtee_end:
245 
246 .global kvm_emulate_wrtee_branch_offs
247 kvm_emulate_wrtee_branch_offs:
248         .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
249 
250 .global kvm_emulate_wrtee_reg_offs
251 kvm_emulate_wrtee_reg_offs:
252         .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
253 
254 .global kvm_emulate_wrtee_orig_ins_offs
255 kvm_emulate_wrtee_orig_ins_offs:
256         .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
257 
258 .global kvm_emulate_wrtee_len
259 kvm_emulate_wrtee_len:
260         .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
261 
262 .global kvm_emulate_wrteei_0
263 kvm_emulate_wrteei_0:
264         SCRATCH_SAVE
265 
266         /* Fetch old MSR in r31 */
267         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
268 
269         /* Remove MSR_EE from old MSR */
270         rlwinm  r31, r31, 0, ~MSR_EE
271 
272         /* Write new MSR value back */
273         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
274 
275         SCRATCH_RESTORE
276 
277         /* Go back to caller */
278 kvm_emulate_wrteei_0_branch:
279         b       .
280 kvm_emulate_wrteei_0_end:
281 
282 .global kvm_emulate_wrteei_0_branch_offs
283 kvm_emulate_wrteei_0_branch_offs:
284         .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
285 
286 .global kvm_emulate_wrteei_0_len
287 kvm_emulate_wrteei_0_len:
288         .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
289 
290 #endif /* CONFIG_BOOKE */
291 
292 #ifdef CONFIG_PPC_BOOK3S_32
293 
294 .global kvm_emulate_mtsrin
295 kvm_emulate_mtsrin:
296 
297         SCRATCH_SAVE
298 
299         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
300         andi.   r31, r31, MSR_DR | MSR_IR
301         beq     kvm_emulate_mtsrin_reg1
302 
303         SCRATCH_RESTORE
304 
305 kvm_emulate_mtsrin_orig_ins:
306         nop
307         b       kvm_emulate_mtsrin_branch
308 
309 kvm_emulate_mtsrin_reg1:
310         /* rX >> 26 */
311         rlwinm  r30,r0,6,26,29
312 
313 kvm_emulate_mtsrin_reg2:
314         stw     r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
315 
316         SCRATCH_RESTORE
317 
318         /* Go back to caller */
319 kvm_emulate_mtsrin_branch:
320         b       .
321 kvm_emulate_mtsrin_end:
322 
323 .global kvm_emulate_mtsrin_branch_offs
324 kvm_emulate_mtsrin_branch_offs:
325         .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
326 
327 .global kvm_emulate_mtsrin_reg1_offs
328 kvm_emulate_mtsrin_reg1_offs:
329         .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
330 
331 .global kvm_emulate_mtsrin_reg2_offs
332 kvm_emulate_mtsrin_reg2_offs:
333         .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
334 
335 .global kvm_emulate_mtsrin_orig_ins_offs
336 kvm_emulate_mtsrin_orig_ins_offs:
337         .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
338 
339 .global kvm_emulate_mtsrin_len
340 kvm_emulate_mtsrin_len:
341         .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
342 
343 #endif /* CONFIG_PPC_BOOK3S_32 */
344 
345         .balign 4
346         .global kvm_tmp
347 kvm_tmp:
348         .space  (64 * 1024)
349 
350 .global kvm_tmp_end
351 kvm_tmp_end:
352 
353 .global kvm_template_end
354 kvm_template_end:

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php