1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright SUSE Linux Products GmbH 2010 5 * Copyright 2010-2011 Freescale Semiconductor 6 * 7 * Authors: Alexander Graf <agraf@suse.de> 8 */ 9 10 #include <asm/ppc_asm.h> 11 #include <asm/kvm_asm.h> 12 #include <asm/reg.h> 13 #include <asm/page.h> 14 #include <asm/asm-offsets.h> 15 #include <asm/asm-compat.h> 16 17 #define KVM_MAGIC_PAGE (-4096) 18 19 #ifdef CONFIG_64BIT 20 #define LL64(reg, offs, reg2) ld reg, ( 21 #define STL64(reg, offs, reg2) std reg, ( 22 #else 23 #define LL64(reg, offs, reg2) lwz reg, ( 24 #define STL64(reg, offs, reg2) stw reg, ( 25 #endif 26 27 #define SCRATCH_SAVE 28 /* Enable critical section. We are cri 29 shared->critical == r1 */ 30 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_C 31 32 /* Save state */ 33 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAG 34 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAG 35 mfcr r31; 36 stw r31, (KVM_MAGIC_PAGE + KVM_MAG 37 38 #define SCRATCH_RESTORE 39 /* Restore state */ 40 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAG 41 lwz r30, (KVM_MAGIC_PAGE + KVM_MAG 42 mtcr r30; 43 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAG 44 45 /* Disable critical section. We are cr 46 shared->critical == r1 and r2 is al 47 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_C 48 49 .global kvm_template_start 50 kvm_template_start: 51 52 .global kvm_emulate_mtmsrd 53 kvm_emulate_mtmsrd: 54 55 SCRATCH_SAVE 56 57 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 * 58 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_M 59 lis r30, (~(MSR_EE | MSR_RI))@h 60 ori r30, r30, (~(MSR_EE | MSR_RI)) 61 and r31, r31, r30 62 63 /* OR the register's (MSR_EE|MSR_RI) o 64 kvm_emulate_mtmsrd_reg: 65 ori r30, r0, 0 66 andi. r30, r30, (MSR_EE|MSR_RI) 67 or r31, r31, r30 68 69 /* Put MSR back into magic page */ 70 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_ 71 72 /* Check if we have to fetch an interr 73 lwz r31, (KVM_MAGIC_PAGE + KVM_MAG 74 cmpwi r31, 0 75 beq+ no_check 76 77 /* Check if we may trigger an interrup 78 andi. r30, r30, MSR_EE 79 beq no_check 80 81 SCRATCH_RESTORE 82 83 /* Nag hypervisor */ 84 kvm_emulate_mtmsrd_orig_ins: 85 tlbsync 86 87 b kvm_emulate_mtmsrd_branch 88 89 no_check: 90 91 SCRATCH_RESTORE 92 93 /* Go back to caller */ 94 kvm_emulate_mtmsrd_branch: 95 b . 96 kvm_emulate_mtmsrd_end: 97 98 .global kvm_emulate_mtmsrd_branch_offs 99 kvm_emulate_mtmsrd_branch_offs: 100 .long (kvm_emulate_mtmsrd_branch - kvm 101 102 .global kvm_emulate_mtmsrd_reg_offs 103 kvm_emulate_mtmsrd_reg_offs: 104 .long (kvm_emulate_mtmsrd_reg - kvm_em 105 106 .global kvm_emulate_mtmsrd_orig_ins_offs 107 kvm_emulate_mtmsrd_orig_ins_offs: 108 .long (kvm_emulate_mtmsrd_orig_ins - k 109 110 .global kvm_emulate_mtmsrd_len 111 kvm_emulate_mtmsrd_len: 112 .long (kvm_emulate_mtmsrd_end - kvm_em 113 114 115 #define MSR_SAFE_BITS (MSR_EE | MSR_RI) 116 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS 117 118 .global kvm_emulate_mtmsr 119 kvm_emulate_mtmsr: 120 121 SCRATCH_SAVE 122 123 /* Fetch old MSR in r31 */ 124 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_M 125 126 /* Find the changed bits between old a 127 kvm_emulate_mtmsr_reg1: 128 ori r30, r0, 0 129 xor r31, r30, r31 130 131 /* Check if we need to really do mtmsr 132 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_B 133 and. r31, r31, r30 134 135 /* No critical bits changed? Maybe we 136 beq maybe_stay_in_guest 137 138 do_mtmsr: 139 140 SCRATCH_RESTORE 141 142 /* Just fire off the mtmsr if it's cri 143 kvm_emulate_mtmsr_orig_ins: 144 mtmsr r0 145 146 b kvm_emulate_mtmsr_branch 147 148 maybe_stay_in_guest: 149 150 /* Get the target register in r30 */ 151 kvm_emulate_mtmsr_reg2: 152 ori r30, r0, 0 153 154 /* Put MSR into magic page because we 155 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_ 156 157 /* Check if we have to fetch an interr 158 lwz r31, (KVM_MAGIC_PAGE + KVM_MAG 159 cmpwi r31, 0 160 beq+ no_mtmsr 161 162 /* Check if we may trigger an interrup 163 andi. r31, r30, MSR_EE 164 bne do_mtmsr 165 166 no_mtmsr: 167 168 SCRATCH_RESTORE 169 170 /* Go back to caller */ 171 kvm_emulate_mtmsr_branch: 172 b . 173 kvm_emulate_mtmsr_end: 174 175 .global kvm_emulate_mtmsr_branch_offs 176 kvm_emulate_mtmsr_branch_offs: 177 .long (kvm_emulate_mtmsr_branch - kvm_ 178 179 .global kvm_emulate_mtmsr_reg1_offs 180 kvm_emulate_mtmsr_reg1_offs: 181 .long (kvm_emulate_mtmsr_reg1 - kvm_em 182 183 .global kvm_emulate_mtmsr_reg2_offs 184 kvm_emulate_mtmsr_reg2_offs: 185 .long (kvm_emulate_mtmsr_reg2 - kvm_em 186 187 .global kvm_emulate_mtmsr_orig_ins_offs 188 kvm_emulate_mtmsr_orig_ins_offs: 189 .long (kvm_emulate_mtmsr_orig_ins - kv 190 191 .global kvm_emulate_mtmsr_len 192 kvm_emulate_mtmsr_len: 193 .long (kvm_emulate_mtmsr_end - kvm_emu 194 195 #ifdef CONFIG_BOOKE 196 197 /* also used for wrteei 1 */ 198 .global kvm_emulate_wrtee 199 kvm_emulate_wrtee: 200 201 SCRATCH_SAVE 202 203 /* Fetch old MSR in r31 */ 204 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_M 205 206 /* Insert new MSR[EE] */ 207 kvm_emulate_wrtee_reg: 208 ori r30, r0, 0 209 rlwimi r31, r30, 0, MSR_EE 210 211 /* 212 * If MSR[EE] is now set, check for a 213 * We could skip this if MSR[EE] was a 214 * should be rare, so don't bother. 215 */ 216 andi. r30, r30, MSR_EE 217 218 /* Put MSR into magic page because we 219 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_ 220 221 beq no_wrtee 222 223 /* Check if we have to fetch an interr 224 lwz r30, (KVM_MAGIC_PAGE + KVM_MAG 225 cmpwi r30, 0 226 bne do_wrtee 227 228 no_wrtee: 229 SCRATCH_RESTORE 230 231 /* Go back to caller */ 232 kvm_emulate_wrtee_branch: 233 b . 234 235 do_wrtee: 236 SCRATCH_RESTORE 237 238 /* Just fire off the wrtee if it's cri 239 kvm_emulate_wrtee_orig_ins: 240 wrtee r0 241 242 b kvm_emulate_wrtee_branch 243 244 kvm_emulate_wrtee_end: 245 246 .global kvm_emulate_wrtee_branch_offs 247 kvm_emulate_wrtee_branch_offs: 248 .long (kvm_emulate_wrtee_branch - kvm_ 249 250 .global kvm_emulate_wrtee_reg_offs 251 kvm_emulate_wrtee_reg_offs: 252 .long (kvm_emulate_wrtee_reg - kvm_emu 253 254 .global kvm_emulate_wrtee_orig_ins_offs 255 kvm_emulate_wrtee_orig_ins_offs: 256 .long (kvm_emulate_wrtee_orig_ins - kv 257 258 .global kvm_emulate_wrtee_len 259 kvm_emulate_wrtee_len: 260 .long (kvm_emulate_wrtee_end - kvm_emu 261 262 .global kvm_emulate_wrteei_0 263 kvm_emulate_wrteei_0: 264 SCRATCH_SAVE 265 266 /* Fetch old MSR in r31 */ 267 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_M 268 269 /* Remove MSR_EE from old MSR */ 270 rlwinm r31, r31, 0, ~MSR_EE 271 272 /* Write new MSR value back */ 273 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_ 274 275 SCRATCH_RESTORE 276 277 /* Go back to caller */ 278 kvm_emulate_wrteei_0_branch: 279 b . 280 kvm_emulate_wrteei_0_end: 281 282 .global kvm_emulate_wrteei_0_branch_offs 283 kvm_emulate_wrteei_0_branch_offs: 284 .long (kvm_emulate_wrteei_0_branch - k 285 286 .global kvm_emulate_wrteei_0_len 287 kvm_emulate_wrteei_0_len: 288 .long (kvm_emulate_wrteei_0_end - kvm_ 289 290 #endif /* CONFIG_BOOKE */ 291 292 #ifdef CONFIG_PPC_BOOK3S_32 293 294 .global kvm_emulate_mtsrin 295 kvm_emulate_mtsrin: 296 297 SCRATCH_SAVE 298 299 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_M 300 andi. r31, r31, MSR_DR | MSR_IR 301 beq kvm_emulate_mtsrin_reg1 302 303 SCRATCH_RESTORE 304 305 kvm_emulate_mtsrin_orig_ins: 306 nop 307 b kvm_emulate_mtsrin_branch 308 309 kvm_emulate_mtsrin_reg1: 310 /* rX >> 26 */ 311 rlwinm r30,r0,6,26,29 312 313 kvm_emulate_mtsrin_reg2: 314 stw r0, (KVM_MAGIC_PAGE + KVM_MAGI 315 316 SCRATCH_RESTORE 317 318 /* Go back to caller */ 319 kvm_emulate_mtsrin_branch: 320 b . 321 kvm_emulate_mtsrin_end: 322 323 .global kvm_emulate_mtsrin_branch_offs 324 kvm_emulate_mtsrin_branch_offs: 325 .long (kvm_emulate_mtsrin_branch - kvm 326 327 .global kvm_emulate_mtsrin_reg1_offs 328 kvm_emulate_mtsrin_reg1_offs: 329 .long (kvm_emulate_mtsrin_reg1 - kvm_e 330 331 .global kvm_emulate_mtsrin_reg2_offs 332 kvm_emulate_mtsrin_reg2_offs: 333 .long (kvm_emulate_mtsrin_reg2 - kvm_e 334 335 .global kvm_emulate_mtsrin_orig_ins_offs 336 kvm_emulate_mtsrin_orig_ins_offs: 337 .long (kvm_emulate_mtsrin_orig_ins - k 338 339 .global kvm_emulate_mtsrin_len 340 kvm_emulate_mtsrin_len: 341 .long (kvm_emulate_mtsrin_end - kvm_em 342 343 #endif /* CONFIG_PPC_BOOK3S_32 */ 344 345 .balign 4 346 .global kvm_tmp 347 kvm_tmp: 348 .space (64 * 1024) 349 350 .global kvm_tmp_end 351 kvm_tmp_end: 352 353 .global kvm_template_end 354 kvm_template_end:
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.