~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kvm/vmx/vmx.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/x86/kvm/vmx/vmx.h (Architecture mips) and /arch/sparc/kvm/vmx/vmx.h (Architecture sparc)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 
  2 #ifndef __KVM_X86_VMX_H                           
  3 #define __KVM_X86_VMX_H                           
  4                                                   
  5 #include <linux/kvm_host.h>                       
  6                                                   
  7 #include <asm/kvm.h>                              
  8 #include <asm/intel_pt.h>                         
  9 #include <asm/perf_event.h>                       
 10 #include <asm/posted_intr.h>                      
 11                                                   
 12 #include "capabilities.h"                         
 13 #include "../kvm_cache_regs.h"                    
 14 #include "vmcs.h"                                 
 15 #include "vmx_ops.h"                              
 16 #include "../cpuid.h"                             
 17 #include "run_flags.h"                            
 18 #include "../mmu.h"                               
 19                                                   
 20 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >>    
 21                                                   
 22 #ifdef CONFIG_X86_64                              
 23 #define MAX_NR_USER_RETURN_MSRS 7                 
 24 #else                                             
 25 #define MAX_NR_USER_RETURN_MSRS 4                 
 26 #endif                                            
 27                                                   
 28 #define MAX_NR_LOADSTORE_MSRS   8                 
 29                                                   
 30 struct vmx_msrs {                                 
 31         unsigned int            nr;               
 32         struct vmx_msr_entry    val[MAX_NR_LOA    
 33 };                                                
 34                                                   
 35 struct vmx_uret_msr {                             
 36         bool load_into_hardware;                  
 37         u64 data;                                 
 38         u64 mask;                                 
 39 };                                                
 40                                                   
 41 enum segment_cache_field {                        
 42         SEG_FIELD_SEL = 0,                        
 43         SEG_FIELD_BASE = 1,                       
 44         SEG_FIELD_LIMIT = 2,                      
 45         SEG_FIELD_AR = 3,                         
 46                                                   
 47         SEG_FIELD_NR = 4                          
 48 };                                                
 49                                                   
 50 #define RTIT_ADDR_RANGE         4                 
 51                                                   
 52 struct pt_ctx {                                   
 53         u64 ctl;                                  
 54         u64 status;                               
 55         u64 output_base;                          
 56         u64 output_mask;                          
 57         u64 cr3_match;                            
 58         u64 addr_a[RTIT_ADDR_RANGE];              
 59         u64 addr_b[RTIT_ADDR_RANGE];              
 60 };                                                
 61                                                   
 62 struct pt_desc {                                  
 63         u64 ctl_bitmask;                          
 64         u32 num_address_ranges;                   
 65         u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_    
 66         struct pt_ctx host;                       
 67         struct pt_ctx guest;                      
 68 };                                                
 69                                                   
 70 union vmx_exit_reason {                           
 71         struct {                                  
 72                 u32     basic                     
 73                 u32     reserved16                
 74                 u32     reserved17                
 75                 u32     reserved18                
 76                 u32     reserved19                
 77                 u32     reserved20                
 78                 u32     reserved21                
 79                 u32     reserved22                
 80                 u32     reserved23                
 81                 u32     reserved24                
 82                 u32     reserved25                
 83                 u32     bus_lock_detected         
 84                 u32     enclave_mode              
 85                 u32     smi_pending_mtf           
 86                 u32     smi_from_vmx_root         
 87                 u32     reserved30                
 88                 u32     failed_vmentry            
 89         };                                        
 90         u32 full;                                 
 91 };                                                
 92                                                   
 93 struct lbr_desc {                                 
 94         /* Basic info about guest LBR records.    
 95         struct x86_pmu_lbr records;               
 96                                                   
 97         /*                                        
 98          * Emulate LBR feature via passthrough    
 99          * per-vcpu guest LBR event is schedul    
100          *                                        
101          * The records may be inaccurate if th    
102          */                                       
103         struct perf_event *event;                 
104                                                   
105         /* True if LBRs are marked as not inte    
106         bool msr_passthrough;                     
107 };                                                
108                                                   
109 extern struct x86_pmu_lbr vmx_lbr_caps;           
110                                                   
111 /*                                                
112  * The nested_vmx structure is part of vcpu_vm    
113  * for correct emulation of VMX (i.e., nested     
114  */                                               
115 struct nested_vmx {                               
116         /* Has the level1 guest done vmxon? */    
117         bool vmxon;                               
118         gpa_t vmxon_ptr;                          
119         bool pml_full;                            
120                                                   
121         /* The guest-physical address of the c    
122         gpa_t current_vmptr;                      
123         /*                                        
124          * Cache of the guest's VMCS, existing    
125          * Loaded from guest memory during VMP    
126          * memory during VMCLEAR and VMPTRLD.     
127          */                                       
128         struct vmcs12 *cached_vmcs12;             
129         /*                                        
130          * Cache of the guest's shadow VMCS, e    
131          * memory. Loaded from guest memory du    
132          * to guest memory during VM exit.        
133          */                                       
134         struct vmcs12 *cached_shadow_vmcs12;      
135                                                   
136         /*                                        
137          * GPA to HVA cache for accessing vmcs    
138          */                                       
139         struct gfn_to_hva_cache shadow_vmcs12_    
140                                                   
141         /*                                        
142          * GPA to HVA cache for VMCS12            
143          */                                       
144         struct gfn_to_hva_cache vmcs12_cache;     
145                                                   
146         /*                                        
147          * Indicates if the shadow vmcs or enl    
148          * with the data held by struct vmcs12    
149          */                                       
150         bool need_vmcs12_to_shadow_sync;          
151         bool dirty_vmcs12;                        
152                                                   
153         /*                                        
154          * Indicates whether MSR bitmap for L2    
155          * changes in MSR bitmap for L1 or swi    
156          * this flag can only be used reliably    
157          * which informs L0 whether any change    
158          * on its side.                           
159          */                                       
160         bool force_msr_bitmap_recalc;             
161                                                   
162         /*                                        
163          * Indicates lazily loaded guest state    
164          * vmcs02.                                
165          */                                       
166         bool need_sync_vmcs02_to_vmcs12_rare;     
167                                                   
168         /*                                        
169          * vmcs02 has been initialized, i.e. s    
170          * vmcs02 has been written to the back    
171          * is delayed until L1 actually attemp    
172          */                                       
173         bool vmcs02_initialized;                  
174                                                   
175         bool change_vmcs01_virtual_apic_mode;     
176         bool reload_vmcs01_apic_access_page;      
177         bool update_vmcs01_cpu_dirty_logging;     
178         bool update_vmcs01_apicv_status;          
179                                                   
180         /*                                        
181          * Enlightened VMCS has been enabled.     
182          * use it. However, VMX features avail    
183          * on what the enlightened VMCS suppor    
184          */                                       
185         bool enlightened_vmcs_enabled;            
186                                                   
187         /* L2 must run next, and mustn't decid    
188         bool nested_run_pending;                  
189                                                   
190         /* Pending MTF VM-exit into L1.  */       
191         bool mtf_pending;                         
192                                                   
193         struct loaded_vmcs vmcs02;                
194                                                   
195         /*                                        
196          * Guest pages referred to in the vmcs    
197          * pointers, so we must keep them pinn    
198          */                                       
199         struct kvm_host_map apic_access_page_m    
200         struct kvm_host_map virtual_apic_map;     
201         struct kvm_host_map pi_desc_map;          
202                                                   
203         struct kvm_host_map msr_bitmap_map;       
204                                                   
205         struct pi_desc *pi_desc;                  
206         bool pi_pending;                          
207         u16 posted_intr_nv;                       
208                                                   
209         struct hrtimer preemption_timer;          
210         u64 preemption_timer_deadline;            
211         bool has_preemption_timer_deadline;       
212         bool preemption_timer_expired;            
213                                                   
214         /*                                        
215          * Used to snapshot MSRs that are cond    
216          * order to propagate the guest's pre-    
217          * emulation of VMLAUNCH/VMRESUME, the    
218          * For KVM_SET_NESTED_STATE, the snaps    
219          * userspace restores MSRs before nest    
220          * MSRs after nested state, the snapsh    
221          * detect that, and the garbage value     
222          * MSR restoration in any case.           
223          */                                       
224         u64 pre_vmenter_debugctl;                 
225         u64 pre_vmenter_bndcfgs;                  
226                                                   
227         /* to migrate it to L1 if L2 writes to    
228         int l1_tpr_threshold;                     
229                                                   
230         u16 vpid02;                               
231         u16 last_vpid;                            
232                                                   
233         struct nested_vmx_msrs msrs;              
234                                                   
235         /* SMM related state */                   
236         struct {                                  
237                 /* in VMX operation on SMM ent    
238                 bool vmxon;                       
239                 /* in guest mode on SMM entry?    
240                 bool guest_mode;                  
241         } smm;                                    
242                                                   
243 #ifdef CONFIG_KVM_HYPERV                          
244         gpa_t hv_evmcs_vmptr;                     
245         struct kvm_host_map hv_evmcs_map;         
246         struct hv_enlightened_vmcs *hv_evmcs;     
247 #endif                                            
248 };                                                
249                                                   
250 struct vcpu_vmx {                                 
251         struct kvm_vcpu       vcpu;               
252         u8                    fail;               
253         u8                    x2apic_msr_bitma    
254                                                   
255         /*                                        
256          * If true, host state has been stored    
257          * the CPU registers that only need to    
258          * to/from the kernel, and the registe    
259          * values.  If false, host state is lo    
260          * and vmx->loaded_vmcs->host_state is    
261          */                                       
262         bool                  guest_state_load    
263                                                   
264         unsigned long         exit_qualificati    
265         u32                   exit_intr_info;     
266         u32                   idt_vectoring_in    
267         ulong                 rflags;             
268                                                   
269         /*                                        
270          * User return MSRs are always emulate    
271          * only loaded into hardware when nece    
272          * of 64-bit mode or if EFER.SCE=1, th    
273          * be loaded into hardware if those co    
274          */                                       
275         struct vmx_uret_msr   guest_uret_msrs[    
276         bool                  guest_uret_msrs_    
277 #ifdef CONFIG_X86_64                              
278         u64                   msr_host_kernel_    
279         u64                   msr_guest_kernel    
280 #endif                                            
281                                                   
282         u64                   spec_ctrl;          
283         u32                   msr_ia32_umwait_    
284                                                   
285         /*                                        
286          * loaded_vmcs points to the VMCS curr    
287          * non-nested (L1) guest, it always po    
288          * guest (L2), it points to a differen    
289          */                                       
290         struct loaded_vmcs    vmcs01;             
291         struct loaded_vmcs   *loaded_vmcs;        
292                                                   
293         struct msr_autoload {                     
294                 struct vmx_msrs guest;            
295                 struct vmx_msrs host;             
296         } msr_autoload;                           
297                                                   
298         struct msr_autostore {                    
299                 struct vmx_msrs guest;            
300         } msr_autostore;                          
301                                                   
302         struct {                                  
303                 int vm86_active;                  
304                 ulong save_rflags;                
305                 struct kvm_segment segs[8];       
306         } rmode;                                  
307         struct {                                  
308                 u32 bitmask; /* 4 bits per seg    
309                 struct kvm_save_segment {         
310                         u16 selector;             
311                         unsigned long base;       
312                         u32 limit;                
313                         u32 ar;                   
314                 } seg[8];                         
315         } segment_cache;                          
316         int vpid;                                 
317         bool emulation_required;                  
318                                                   
319         union vmx_exit_reason exit_reason;        
320                                                   
321         /* Posted interrupt descriptor */         
322         struct pi_desc pi_desc;                   
323                                                   
324         /* Used if this vCPU is waiting for PI    
325         struct list_head pi_wakeup_list;          
326                                                   
327         /* Support for a guest hypervisor (nes    
328         struct nested_vmx nested;                 
329                                                   
330         /* Dynamic PLE window. */                 
331         unsigned int ple_window;                  
332         bool ple_window_dirty;                    
333                                                   
334         /* Support for PML */                     
335 #define PML_ENTITY_NUM          512               
336         struct page *pml_pg;                      
337                                                   
338         /* apic deadline value in host tsc */     
339         u64 hv_deadline_tsc;                      
340                                                   
341         unsigned long host_debugctlmsr;           
342                                                   
343         /*                                        
344          * Only bits masked by msr_ia32_featur    
345          * msr_ia32_feature_control. FEAT_CTL_    
346          * in msr_ia32_feature_control_valid_b    
347          */                                       
348         u64 msr_ia32_feature_control;             
349         u64 msr_ia32_feature_control_valid_bit    
350         /* SGX Launch Control public key hash     
351         u64 msr_ia32_sgxlepubkeyhash[4];          
352         u64 msr_ia32_mcu_opt_ctrl;                
353         bool disable_fb_clear;                    
354                                                   
355         struct pt_desc pt_desc;                   
356         struct lbr_desc lbr_desc;                 
357                                                   
358         /* Save desired MSR intercept (read: p    
359 #define MAX_POSSIBLE_PASSTHROUGH_MSRS   16        
360         struct {                                  
361                 DECLARE_BITMAP(read, MAX_POSSI    
362                 DECLARE_BITMAP(write, MAX_POSS    
363         } shadow_msr_intercept;                   
364                                                   
365         /* ve_info must be page aligned. */       
366         struct vmx_ve_information *ve_info;       
367 };                                                
368                                                   
369 struct kvm_vmx {                                  
370         struct kvm kvm;                           
371                                                   
372         unsigned int tss_addr;                    
373         bool ept_identity_pagetable_done;         
374         gpa_t ept_identity_map_addr;              
375         /* Posted Interrupt Descriptor (PID) t    
376         u64 *pid_table;                           
377 };                                                
378                                                   
379 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu,    
380                         struct loaded_vmcs *bu    
381 int allocate_vpid(void);                          
382 void free_vpid(int vpid);                         
383 void vmx_set_constant_host_state(struct vcpu_v    
384 void vmx_prepare_switch_to_guest(struct kvm_vc    
385 void vmx_set_host_fs_gs(struct vmcs_host_state    
386                         unsigned long fs_base,    
387 int vmx_get_cpl(struct kvm_vcpu *vcpu);           
388 bool vmx_emulation_required(struct kvm_vcpu *v    
389 unsigned long vmx_get_rflags(struct kvm_vcpu *    
390 void vmx_set_rflags(struct kvm_vcpu *vcpu, uns    
391 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *    
392 void vmx_set_interrupt_shadow(struct kvm_vcpu     
393 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 ef    
394 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsign    
395 void vmx_set_cr4(struct kvm_vcpu *vcpu, unsign    
396 void set_cr4_guest_host_mask(struct vcpu_vmx *    
397 void ept_save_pdptrs(struct kvm_vcpu *vcpu);      
398 void vmx_get_segment(struct kvm_vcpu *vcpu, st    
399 void __vmx_set_segment(struct kvm_vcpu *vcpu,     
400 u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_    
401                                                   
402 bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu    
403 void vmx_update_exception_bitmap(struct kvm_vc    
404 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);      
405 bool __vmx_interrupt_blocked(struct kvm_vcpu *    
406 bool vmx_interrupt_blocked(struct kvm_vcpu *vc    
407 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);     
408 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, b    
409 void vmx_set_virtual_apic_mode(struct kvm_vcpu    
410 struct vmx_uret_msr *vmx_find_uret_msr(struct     
411 void pt_update_intercept_for_msr(struct kvm_vc    
412 void vmx_update_host_rsp(struct vcpu_vmx *vmx,    
413 void vmx_spec_ctrl_restore_host(struct vcpu_vm    
414 unsigned int __vmx_vcpu_run_flags(struct vcpu_    
415 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsi    
416                     unsigned int flags);          
417 int vmx_find_loadstore_msr_slot(struct vmx_msr    
418 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu    
419                                                   
420 void vmx_disable_intercept_for_msr(struct kvm_    
421 void vmx_enable_intercept_for_msr(struct kvm_v    
422                                                   
423 u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcp    
424 u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu     
425                                                   
426 gva_t vmx_get_untagged_addr(struct kvm_vcpu *v    
427                                                   
428 static inline void vmx_set_intercept_for_msr(s    
429                                              i    
430 {                                                 
431         if (value)                                
432                 vmx_enable_intercept_for_msr(v    
433         else                                      
434                 vmx_disable_intercept_for_msr(    
435 }                                                 
436                                                   
437 void vmx_update_cpu_dirty_logging(struct kvm_v    
438                                                   
439 /*                                                
440  * Note, early Intel manuals have the write-lo    
441  * the wrong way round.  The bitmaps control M    
442  * 0xc0000000-0xc0001fff.  The former (low) us    
443  * 0x800-0xbff for writes.  The latter (high)     
444  * 0xc00-0xfff for writes.  MSRs not covered b    
445  * VM-Exit.                                       
446  */                                               
447 #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, a    
448 static inline rtype vmx_##action##_msr_bitmap_    
449                                                   
450 {                                                 
451         int f = sizeof(unsigned long);            
452                                                   
453         if (msr <= 0x1fff)                        
454                 return bitop##_bit(msr, bitmap    
455         else if ((msr >= 0xc0000000) && (msr <    
456                 return bitop##_bit(msr & 0x1ff    
457         return (rtype)true;                       
458 }                                                 
459 #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type,    
460         __BUILD_VMX_MSR_BITMAP_HELPER(ret_type    
461         __BUILD_VMX_MSR_BITMAP_HELPER(ret_type    
462                                                   
463 BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)    
464 BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __cl    
465 BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)    
466                                                   
467 static inline u8 vmx_get_rvi(void)                
468 {                                                 
469         return vmcs_read16(GUEST_INTR_STATUS)     
470 }                                                 
471                                                   
472 #define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS      
473         (VM_ENTRY_LOAD_DEBUG_CONTROLS)            
474 #ifdef CONFIG_X86_64                              
475         #define KVM_REQUIRED_VMX_VM_ENTRY_CONT    
476                 (__KVM_REQUIRED_VMX_VM_ENTRY_C    
477                  VM_ENTRY_IA32E_MODE)             
478 #else                                             
479         #define KVM_REQUIRED_VMX_VM_ENTRY_CONT    
480                 __KVM_REQUIRED_VMX_VM_ENTRY_CO    
481 #endif                                            
482 #define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS        
483         (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |    
484          VM_ENTRY_LOAD_IA32_PAT |                 
485          VM_ENTRY_LOAD_IA32_EFER |                
486          VM_ENTRY_LOAD_BNDCFGS |                  
487          VM_ENTRY_PT_CONCEAL_PIP |                
488          VM_ENTRY_LOAD_IA32_RTIT_CTL)             
489                                                   
490 #define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS       
491         (VM_EXIT_SAVE_DEBUG_CONTROLS |            
492          VM_EXIT_ACK_INTR_ON_EXIT)                
493 #ifdef CONFIG_X86_64                              
494         #define KVM_REQUIRED_VMX_VM_EXIT_CONTR    
495                 (__KVM_REQUIRED_VMX_VM_EXIT_CO    
496                  VM_EXIT_HOST_ADDR_SPACE_SIZE)    
497 #else                                             
498         #define KVM_REQUIRED_VMX_VM_EXIT_CONTR    
499                 __KVM_REQUIRED_VMX_VM_EXIT_CON    
500 #endif                                            
501 #define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS         
502               (VM_EXIT_LOAD_IA32_PERF_GLOBAL_C    
503                VM_EXIT_SAVE_IA32_PAT |            
504                VM_EXIT_LOAD_IA32_PAT |            
505                VM_EXIT_SAVE_IA32_EFER |           
506                VM_EXIT_SAVE_VMX_PREEMPTION_TIM    
507                VM_EXIT_LOAD_IA32_EFER |           
508                VM_EXIT_CLEAR_BNDCFGS |            
509                VM_EXIT_PT_CONCEAL_PIP |           
510                VM_EXIT_CLEAR_IA32_RTIT_CTL)       
511                                                   
512 #define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CON    
513         (PIN_BASED_EXT_INTR_MASK |                
514          PIN_BASED_NMI_EXITING)                   
515 #define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CON    
516         (PIN_BASED_VIRTUAL_NMIS |                 
517          PIN_BASED_POSTED_INTR |                  
518          PIN_BASED_VMX_PREEMPTION_TIMER)          
519                                                   
520 #define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_C    
521         (CPU_BASED_HLT_EXITING |                  
522          CPU_BASED_CR3_LOAD_EXITING |             
523          CPU_BASED_CR3_STORE_EXITING |            
524          CPU_BASED_UNCOND_IO_EXITING |            
525          CPU_BASED_MOV_DR_EXITING |               
526          CPU_BASED_USE_TSC_OFFSETTING |           
527          CPU_BASED_MWAIT_EXITING |                
528          CPU_BASED_MONITOR_EXITING |              
529          CPU_BASED_INVLPG_EXITING |               
530          CPU_BASED_RDPMC_EXITING |                
531          CPU_BASED_INTR_WINDOW_EXITING)           
532                                                   
533 #ifdef CONFIG_X86_64                              
534         #define KVM_REQUIRED_VMX_CPU_BASED_VM_    
535                 (__KVM_REQUIRED_VMX_CPU_BASED_    
536                  CPU_BASED_CR8_LOAD_EXITING |     
537                  CPU_BASED_CR8_STORE_EXITING)     
538 #else                                             
539         #define KVM_REQUIRED_VMX_CPU_BASED_VM_    
540                 __KVM_REQUIRED_VMX_CPU_BASED_V    
541 #endif                                            
542                                                   
543 #define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CON    
544         (CPU_BASED_RDTSC_EXITING |                
545          CPU_BASED_TPR_SHADOW |                   
546          CPU_BASED_USE_IO_BITMAPS |               
547          CPU_BASED_MONITOR_TRAP_FLAG |            
548          CPU_BASED_USE_MSR_BITMAPS |              
549          CPU_BASED_NMI_WINDOW_EXITING |           
550          CPU_BASED_PAUSE_EXITING |                
551          CPU_BASED_ACTIVATE_SECONDARY_CONTROLS    
552          CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)    
553                                                   
554 #define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CON    
555 #define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CON    
556         (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESS    
557          SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE    
558          SECONDARY_EXEC_WBINVD_EXITING |          
559          SECONDARY_EXEC_ENABLE_VPID |             
560          SECONDARY_EXEC_ENABLE_EPT |              
561          SECONDARY_EXEC_UNRESTRICTED_GUEST |      
562          SECONDARY_EXEC_PAUSE_LOOP_EXITING |      
563          SECONDARY_EXEC_DESC |                    
564          SECONDARY_EXEC_ENABLE_RDTSCP |           
565          SECONDARY_EXEC_ENABLE_INVPCID |          
566          SECONDARY_EXEC_APIC_REGISTER_VIRT |      
567          SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY     
568          SECONDARY_EXEC_SHADOW_VMCS |             
569          SECONDARY_EXEC_ENABLE_XSAVES |           
570          SECONDARY_EXEC_RDSEED_EXITING |          
571          SECONDARY_EXEC_RDRAND_EXITING |          
572          SECONDARY_EXEC_ENABLE_PML |              
573          SECONDARY_EXEC_TSC_SCALING |             
574          SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE     
575          SECONDARY_EXEC_PT_USE_GPA |              
576          SECONDARY_EXEC_PT_CONCEAL_VMX |          
577          SECONDARY_EXEC_ENABLE_VMFUNC |           
578          SECONDARY_EXEC_BUS_LOCK_DETECTION |      
579          SECONDARY_EXEC_NOTIFY_VM_EXITING |       
580          SECONDARY_EXEC_ENCLS_EXITING |           
581          SECONDARY_EXEC_EPT_VIOLATION_VE)         
582                                                   
583 #define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONT    
584 #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONT    
585         (TERTIARY_EXEC_IPI_VIRT)                  
586                                                   
587 #define BUILD_CONTROLS_SHADOW(lname, uname, bi    
588 static inline void lname##_controls_set(struct    
589 {                                                 
590         if (vmx->loaded_vmcs->controls_shadow.    
591                 vmcs_write##bits(uname, val);     
592                 vmx->loaded_vmcs->controls_sha    
593         }                                         
594 }                                                 
595 static inline u##bits __##lname##_controls_get    
596 {                                                 
597         return vmcs->controls_shadow.lname;       
598 }                                                 
599 static inline u##bits lname##_controls_get(str    
600 {                                                 
601         return __##lname##_controls_get(vmx->l    
602 }                                                 
603 static __always_inline void lname##_controls_s    
604 {                                                 
605         BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX    
606         lname##_controls_set(vmx, lname##_cont    
607 }                                                 
608 static __always_inline void lname##_controls_c    
609 {                                                 
610         BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX    
611         lname##_controls_set(vmx, lname##_cont    
612 }                                                 
613 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTR    
614 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROL    
615 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_C    
616 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_    
617 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDAR    
618 BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_    
619                                                   
620 /*                                                
621  * VMX_REGS_LAZY_LOAD_SET - The set of registe    
622  * cache on demand.  Other registers not liste    
623  * the cache immediately after VM-Exit.           
624  */                                               
625 #define VMX_REGS_LAZY_LOAD_SET  ((1 << VCPU_RE    
626                                 (1 << VCPU_REG    
627                                 (1 << VCPU_EXR    
628                                 (1 << VCPU_EXR    
629                                 (1 << VCPU_EXR    
630                                 (1 << VCPU_EXR    
631                                 (1 << VCPU_EXR    
632                                 (1 << VCPU_EXR    
633                                 (1 << VCPU_EXR    
634                                 (1 << VCPU_EXR    
635                                                   
636 static inline unsigned long vmx_l1_guest_owned    
637 {                                                 
638         unsigned long bits = KVM_POSSIBLE_CR0_    
639                                                   
640         /*                                        
641          * CR0.WP needs to be intercepted when    
642          * in order to construct shadow PTEs w    
643          * Note!  CR0.WP technically can be pa    
644          * paging is disabled, but checking CR    
645          * dependency of sorts due to forcing     
646          * the correct value prior to determin    
647          * by L1.  Keep it simple and limit th    
648          */                                       
649         if (!enable_ept)                          
650                 bits &= ~X86_CR0_WP;              
651         return bits;                              
652 }                                                 
653                                                   
654 static __always_inline struct kvm_vmx *to_kvm_    
655 {                                                 
656         return container_of(kvm, struct kvm_vm    
657 }                                                 
658                                                   
659 static __always_inline struct vcpu_vmx *to_vmx    
660 {                                                 
661         return container_of(vcpu, struct vcpu_    
662 }                                                 
663                                                   
664 static inline struct lbr_desc *vcpu_to_lbr_des    
665 {                                                 
666         return &to_vmx(vcpu)->lbr_desc;           
667 }                                                 
668                                                   
669 static inline struct x86_pmu_lbr *vcpu_to_lbr_    
670 {                                                 
671         return &vcpu_to_lbr_desc(vcpu)->record    
672 }                                                 
673                                                   
674 static inline bool intel_pmu_lbr_is_enabled(st    
675 {                                                 
676         return !!vcpu_to_lbr_records(vcpu)->nr    
677 }                                                 
678                                                   
679 void intel_pmu_cross_mapped_check(struct kvm_p    
680 int intel_pmu_create_guest_lbr_event(struct kv    
681 void vmx_passthrough_lbr_msrs(struct kvm_vcpu     
682                                                   
683 static __always_inline unsigned long vmx_get_e    
684 {                                                 
685         struct vcpu_vmx *vmx = to_vmx(vcpu);      
686                                                   
687         if (!kvm_register_test_and_mark_availa    
688                 vmx->exit_qualification = vmcs    
689                                                   
690         return vmx->exit_qualification;           
691 }                                                 
692                                                   
693 static __always_inline u32 vmx_get_intr_info(s    
694 {                                                 
695         struct vcpu_vmx *vmx = to_vmx(vcpu);      
696                                                   
697         if (!kvm_register_test_and_mark_availa    
698                 vmx->exit_intr_info = vmcs_rea    
699                                                   
700         return vmx->exit_intr_info;               
701 }                                                 
702                                                   
703 struct vmcs *alloc_vmcs_cpu(bool shadow, int c    
704 void free_vmcs(struct vmcs *vmcs);                
705 int alloc_loaded_vmcs(struct loaded_vmcs *load    
706 void free_loaded_vmcs(struct loaded_vmcs *load    
707 void loaded_vmcs_clear(struct loaded_vmcs *loa    
708                                                   
709 static inline struct vmcs *alloc_vmcs(bool sha    
710 {                                                 
711         return alloc_vmcs_cpu(shadow, raw_smp_    
712                               GFP_KERNEL_ACCOU    
713 }                                                 
714                                                   
715 static inline bool vmx_has_waitpkg(struct vcpu    
716 {                                                 
717         return secondary_exec_controls_get(vmx    
718                 SECONDARY_EXEC_ENABLE_USR_WAIT    
719 }                                                 
720                                                   
721 static inline bool vmx_need_pf_intercept(struc    
722 {                                                 
723         if (!enable_ept)                          
724                 return true;                      
725                                                   
726         return allow_smaller_maxphyaddr &&        
727                cpuid_maxphyaddr(vcpu) < kvm_ho    
728 }                                                 
729                                                   
730 static inline bool is_unrestricted_guest(struc    
731 {                                                 
732         return enable_unrestricted_guest && (!    
733             (secondary_exec_controls_get(to_vm    
734             SECONDARY_EXEC_UNRESTRICTED_GUEST)    
735 }                                                 
736                                                   
737 bool __vmx_guest_state_valid(struct kvm_vcpu *    
738 static inline bool vmx_guest_state_valid(struc    
739 {                                                 
740         return is_unrestricted_guest(vcpu) ||     
741 }                                                 
742                                                   
743 void dump_vmcs(struct kvm_vcpu *vcpu);            
744                                                   
745 static inline int vmx_get_instr_info_reg2(u32     
746 {                                                 
747         return (vmx_instr_info >> 28) & 0xf;      
748 }                                                 
749                                                   
750 static inline bool vmx_can_use_ipiv(struct kvm    
751 {                                                 
752         return  lapic_in_kernel(vcpu) && enabl    
753 }                                                 
754                                                   
755 static inline void vmx_segment_cache_clear(str    
756 {                                                 
757         vmx->segment_cache.bitmask = 0;           
758 }                                                 
759                                                   
760 #endif /* __KVM_X86_VMX_H */                      
761                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php