~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kvm/kvm_cache_regs.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef ASM_KVM_CACHE_REGS_H
  3 #define ASM_KVM_CACHE_REGS_H
  4 
  5 #include <linux/kvm_host.h>
  6 
  7 #define KVM_POSSIBLE_CR0_GUEST_BITS     (X86_CR0_TS | X86_CR0_WP)
  8 #define KVM_POSSIBLE_CR4_GUEST_BITS                               \
  9         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
 10          | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
 11 
 12 #define X86_CR0_PDPTR_BITS    (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
 13 #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
 14 #define X86_CR4_PDPTR_BITS    (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
 15 
 16 static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
 17 
 18 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                                 \
 19 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
 20 {                                                                             \
 21         return vcpu->arch.regs[VCPU_REGS_##uname];                            \
 22 }                                                                             \
 23 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,        \
 24                                                 unsigned long val)            \
 25 {                                                                             \
 26         vcpu->arch.regs[VCPU_REGS_##uname] = val;                             \
 27 }
 28 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
 29 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
 30 BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
 31 BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
 32 BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
 33 BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
 34 BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
 35 #ifdef CONFIG_X86_64
 36 BUILD_KVM_GPR_ACCESSORS(r8,  R8)
 37 BUILD_KVM_GPR_ACCESSORS(r9,  R9)
 38 BUILD_KVM_GPR_ACCESSORS(r10, R10)
 39 BUILD_KVM_GPR_ACCESSORS(r11, R11)
 40 BUILD_KVM_GPR_ACCESSORS(r12, R12)
 41 BUILD_KVM_GPR_ACCESSORS(r13, R13)
 42 BUILD_KVM_GPR_ACCESSORS(r14, R14)
 43 BUILD_KVM_GPR_ACCESSORS(r15, R15)
 44 #endif
 45 
 46 /*
 47  * avail  dirty
 48  * 0      0       register in VMCS/VMCB
 49  * 0      1       *INVALID*
 50  * 1      0       register in vcpu->arch
 51  * 1      1       register in vcpu->arch, needs to be stored back
 52  */
 53 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
 54                                              enum kvm_reg reg)
 55 {
 56         return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 57 }
 58 
 59 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
 60                                          enum kvm_reg reg)
 61 {
 62         return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
 63 }
 64 
 65 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
 66                                                enum kvm_reg reg)
 67 {
 68         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 69 }
 70 
 71 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
 72                                            enum kvm_reg reg)
 73 {
 74         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 75         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
 76 }
 77 
 78 /*
 79  * kvm_register_test_and_mark_available() is a special snowflake that uses an
 80  * arch bitop directly to avoid the explicit instrumentation that comes with
 81  * the generic bitops.  This allows code that cannot be instrumented (noinstr
 82  * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
 83  */
 84 static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
 85                                                                  enum kvm_reg reg)
 86 {
 87         return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
 88 }
 89 
 90 /*
 91  * The "raw" register helpers are only for cases where the full 64 bits of a
 92  * register are read/written irrespective of current vCPU mode.  In other words,
 93  * odds are good you shouldn't be using the raw variants.
 94  */
 95 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
 96 {
 97         if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
 98                 return 0;
 99 
100         if (!kvm_register_is_available(vcpu, reg))
101                 kvm_x86_call(cache_reg)(vcpu, reg);
102 
103         return vcpu->arch.regs[reg];
104 }
105 
106 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
107                                           unsigned long val)
108 {
109         if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
110                 return;
111 
112         vcpu->arch.regs[reg] = val;
113         kvm_register_mark_dirty(vcpu, reg);
114 }
115 
116 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
117 {
118         return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
119 }
120 
121 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
122 {
123         kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
124 }
125 
126 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
127 {
128         return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
129 }
130 
131 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
132 {
133         kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
134 }
135 
136 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
137 {
138         might_sleep();  /* on svm */
139 
140         if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
141                 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR);
142 
143         return vcpu->arch.walk_mmu->pdptrs[index];
144 }
145 
146 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
147 {
148         vcpu->arch.walk_mmu->pdptrs[index] = value;
149 }
150 
151 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
152 {
153         ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
154         if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
155             !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
156                 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0);
157         return vcpu->arch.cr0 & mask;
158 }
159 
160 static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
161                                                unsigned long cr0_bit)
162 {
163         BUILD_BUG_ON(!is_power_of_2(cr0_bit));
164 
165         return !!kvm_read_cr0_bits(vcpu, cr0_bit);
166 }
167 
168 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
169 {
170         return kvm_read_cr0_bits(vcpu, ~0UL);
171 }
172 
173 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
174 {
175         ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
176         if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
177             !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
178                 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4);
179         return vcpu->arch.cr4 & mask;
180 }
181 
182 static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
183                                                unsigned long cr4_bit)
184 {
185         BUILD_BUG_ON(!is_power_of_2(cr4_bit));
186 
187         return !!kvm_read_cr4_bits(vcpu, cr4_bit);
188 }
189 
190 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
191 {
192         if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
193                 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3);
194         return vcpu->arch.cr3;
195 }
196 
197 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
198 {
199         return kvm_read_cr4_bits(vcpu, ~0UL);
200 }
201 
202 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
203 {
204         return (kvm_rax_read(vcpu) & -1u)
205                 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
206 }
207 
208 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
209 {
210         vcpu->arch.hflags |= HF_GUEST_MASK;
211         vcpu->stat.guest_mode = 1;
212 }
213 
214 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
215 {
216         vcpu->arch.hflags &= ~HF_GUEST_MASK;
217 
218         if (vcpu->arch.load_eoi_exitmap_pending) {
219                 vcpu->arch.load_eoi_exitmap_pending = false;
220                 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
221         }
222 
223         vcpu->stat.guest_mode = 0;
224 }
225 
226 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
227 {
228         return vcpu->arch.hflags & HF_GUEST_MASK;
229 }
230 
231 #endif
232 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php