~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kvm/mmu/spte.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Kernel-based Virtual Machine driver for Linux
  4  *
  5  * Macros and functions to access KVM PTEs (also known as SPTEs)
  6  *
  7  * Copyright (C) 2006 Qumranet, Inc.
  8  * Copyright 2020 Red Hat, Inc. and/or its affiliates.
  9  */
 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11 
 12 #include <linux/kvm_host.h>
 13 #include "mmu.h"
 14 #include "mmu_internal.h"
 15 #include "x86.h"
 16 #include "spte.h"
 17 
 18 #include <asm/e820/api.h>
 19 #include <asm/memtype.h>
 20 #include <asm/vmx.h>
 21 
 22 bool __read_mostly enable_mmio_caching = true;
 23 static bool __ro_after_init allow_mmio_caching;
 24 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
 25 EXPORT_SYMBOL_GPL(enable_mmio_caching);
 26 
 27 u64 __read_mostly shadow_host_writable_mask;
 28 u64 __read_mostly shadow_mmu_writable_mask;
 29 u64 __read_mostly shadow_nx_mask;
 30 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
 31 u64 __read_mostly shadow_user_mask;
 32 u64 __read_mostly shadow_accessed_mask;
 33 u64 __read_mostly shadow_dirty_mask;
 34 u64 __read_mostly shadow_mmio_value;
 35 u64 __read_mostly shadow_mmio_mask;
 36 u64 __read_mostly shadow_mmio_access_mask;
 37 u64 __read_mostly shadow_present_mask;
 38 u64 __read_mostly shadow_memtype_mask;
 39 u64 __read_mostly shadow_me_value;
 40 u64 __read_mostly shadow_me_mask;
 41 u64 __read_mostly shadow_acc_track_mask;
 42 
 43 u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
 44 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
 45 
 46 static u8 __init kvm_get_host_maxphyaddr(void)
 47 {
 48         /*
 49          * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
 50          * in CPU detection code, but the processor treats those reduced bits as
 51          * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
 52          * the physical address bits reported by CPUID, i.e. the raw MAXPHYADDR,
 53          * when reasoning about CPU behavior with respect to MAXPHYADDR.
 54          */
 55         if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
 56                 return cpuid_eax(0x80000008) & 0xff;
 57 
 58         /*
 59          * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
 60          * custom CPUID.  Proceed with whatever the kernel found since these features
 61          * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
 62          */
 63         return boot_cpu_data.x86_phys_bits;
 64 }
 65 
 66 void __init kvm_mmu_spte_module_init(void)
 67 {
 68         /*
 69          * Snapshot userspace's desire to allow MMIO caching.  Whether or not
 70          * KVM can actually enable MMIO caching depends on vendor-specific
 71          * hardware capabilities and other module params that can't be resolved
 72          * until the vendor module is loaded, i.e. enable_mmio_caching can and
 73          * will change when the vendor module is (re)loaded.
 74          */
 75         allow_mmio_caching = enable_mmio_caching;
 76 
 77         kvm_host.maxphyaddr = kvm_get_host_maxphyaddr();
 78 }
 79 
 80 static u64 generation_mmio_spte_mask(u64 gen)
 81 {
 82         u64 mask;
 83 
 84         WARN_ON_ONCE(gen & ~MMIO_SPTE_GEN_MASK);
 85 
 86         mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
 87         mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
 88         return mask;
 89 }
 90 
 91 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
 92 {
 93         u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
 94         u64 spte = generation_mmio_spte_mask(gen);
 95         u64 gpa = gfn << PAGE_SHIFT;
 96 
 97         WARN_ON_ONCE(!vcpu->kvm->arch.shadow_mmio_value);
 98 
 99         access &= shadow_mmio_access_mask;
100         spte |= vcpu->kvm->arch.shadow_mmio_value | access;
101         spte |= gpa | shadow_nonpresent_or_rsvd_mask;
102         spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
103                 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
104 
105         return spte;
106 }
107 
108 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
109 {
110         if (pfn_valid(pfn))
111                 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
112                         /*
113                          * Some reserved pages, such as those from NVDIMM
114                          * DAX devices, are not for MMIO, and can be mapped
115                          * with cached memory type for better performance.
116                          * However, the above check misconceives those pages
117                          * as MMIO, and results in KVM mapping them with UC
118                          * memory type, which would hurt the performance.
119                          * Therefore, we check the host memory type in addition
120                          * and only treat UC/UC-/WC pages as MMIO.
121                          */
122                         (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
123 
124         return !e820__mapped_raw_any(pfn_to_hpa(pfn),
125                                      pfn_to_hpa(pfn + 1) - 1,
126                                      E820_TYPE_RAM);
127 }
128 
129 /*
130  * Returns true if the SPTE has bits that may be set without holding mmu_lock.
131  * The caller is responsible for checking if the SPTE is shadow-present, and
132  * for determining whether or not the caller cares about non-leaf SPTEs.
133  */
134 bool spte_has_volatile_bits(u64 spte)
135 {
136         /*
137          * Always atomically update spte if it can be updated
138          * out of mmu-lock, it can ensure dirty bit is not lost,
139          * also, it can help us to get a stable is_writable_pte()
140          * to ensure tlb flush is not missed.
141          */
142         if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
143                 return true;
144 
145         if (is_access_track_spte(spte))
146                 return true;
147 
148         if (spte_ad_enabled(spte)) {
149                 if (!(spte & shadow_accessed_mask) ||
150                     (is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
151                         return true;
152         }
153 
154         return false;
155 }
156 
157 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
158                const struct kvm_memory_slot *slot,
159                unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
160                u64 old_spte, bool prefetch, bool can_unsync,
161                bool host_writable, u64 *new_spte)
162 {
163         int level = sp->role.level;
164         u64 spte = SPTE_MMU_PRESENT_MASK;
165         bool wrprot = false;
166 
167         /*
168          * For the EPT case, shadow_present_mask has no RWX bits set if
169          * exec-only page table entries are supported.  In that case,
170          * ACC_USER_MASK and shadow_user_mask are used to represent
171          * read access.  See FNAME(gpte_access) in paging_tmpl.h.
172          */
173         WARN_ON_ONCE((pte_access | shadow_present_mask) == SHADOW_NONPRESENT_VALUE);
174 
175         if (sp->role.ad_disabled)
176                 spte |= SPTE_TDP_AD_DISABLED;
177         else if (kvm_mmu_page_ad_need_write_protect(sp))
178                 spte |= SPTE_TDP_AD_WRPROT_ONLY;
179 
180         spte |= shadow_present_mask;
181         if (!prefetch)
182                 spte |= spte_shadow_accessed_mask(spte);
183 
184         /*
185          * For simplicity, enforce the NX huge page mitigation even if not
186          * strictly necessary.  KVM could ignore the mitigation if paging is
187          * disabled in the guest, as the guest doesn't have any page tables to
188          * abuse.  But to safely ignore the mitigation, KVM would have to
189          * ensure a new MMU is loaded (or all shadow pages zapped) when CR0.PG
190          * is toggled on, and that's a net negative for performance when TDP is
191          * enabled.  When TDP is disabled, KVM will always switch to a new MMU
192          * when CR0.PG is toggled, but leveraging that to ignore the mitigation
193          * would tie make_spte() further to vCPU/MMU state, and add complexity
194          * just to optimize a mode that is anything but performance critical.
195          */
196         if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
197             is_nx_huge_page_enabled(vcpu->kvm)) {
198                 pte_access &= ~ACC_EXEC_MASK;
199         }
200 
201         if (pte_access & ACC_EXEC_MASK)
202                 spte |= shadow_x_mask;
203         else
204                 spte |= shadow_nx_mask;
205 
206         if (pte_access & ACC_USER_MASK)
207                 spte |= shadow_user_mask;
208 
209         if (level > PG_LEVEL_4K)
210                 spte |= PT_PAGE_SIZE_MASK;
211 
212         if (shadow_memtype_mask)
213                 spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
214                                                   kvm_is_mmio_pfn(pfn));
215         if (host_writable)
216                 spte |= shadow_host_writable_mask;
217         else
218                 pte_access &= ~ACC_WRITE_MASK;
219 
220         if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
221                 spte |= shadow_me_value;
222 
223         spte |= (u64)pfn << PAGE_SHIFT;
224 
225         if (pte_access & ACC_WRITE_MASK) {
226                 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
227 
228                 /*
229                  * Optimization: for pte sync, if spte was writable the hash
230                  * lookup is unnecessary (and expensive). Write protection
231                  * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots.
232                  * Same reasoning can be applied to dirty page accounting.
233                  */
234                 if (is_writable_pte(old_spte))
235                         goto out;
236 
237                 /*
238                  * Unsync shadow pages that are reachable by the new, writable
239                  * SPTE.  Write-protect the SPTE if the page can't be unsync'd,
240                  * e.g. it's write-tracked (upper-level SPs) or has one or more
241                  * shadow pages and unsync'ing pages is not allowed.
242                  */
243                 if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) {
244                         wrprot = true;
245                         pte_access &= ~ACC_WRITE_MASK;
246                         spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
247                 }
248         }
249 
250         if (pte_access & ACC_WRITE_MASK)
251                 spte |= spte_shadow_dirty_mask(spte);
252 
253 out:
254         if (prefetch)
255                 spte = mark_spte_for_access_track(spte);
256 
257         WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
258                   "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
259                   get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
260 
261         if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
262                 /* Enforced by kvm_mmu_hugepage_adjust. */
263                 WARN_ON_ONCE(level > PG_LEVEL_4K);
264                 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
265         }
266 
267         *new_spte = spte;
268         return wrprot;
269 }
270 
271 static u64 make_spte_executable(u64 spte)
272 {
273         bool is_access_track = is_access_track_spte(spte);
274 
275         if (is_access_track)
276                 spte = restore_acc_track_spte(spte);
277 
278         spte &= ~shadow_nx_mask;
279         spte |= shadow_x_mask;
280 
281         if (is_access_track)
282                 spte = mark_spte_for_access_track(spte);
283 
284         return spte;
285 }
286 
287 /*
288  * Construct an SPTE that maps a sub-page of the given huge page SPTE where
289  * `index` identifies which sub-page.
290  *
291  * This is used during huge page splitting to build the SPTEs that make up the
292  * new page table.
293  */
294 u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte,
295                               union kvm_mmu_page_role role, int index)
296 {
297         u64 child_spte = huge_spte;
298 
299         KVM_BUG_ON(!is_shadow_present_pte(huge_spte) || !is_large_pte(huge_spte), kvm);
300 
301         /*
302          * The child_spte already has the base address of the huge page being
303          * split. So we just have to OR in the offset to the page at the next
304          * lower level for the given index.
305          */
306         child_spte |= (index * KVM_PAGES_PER_HPAGE(role.level)) << PAGE_SHIFT;
307 
308         if (role.level == PG_LEVEL_4K) {
309                 child_spte &= ~PT_PAGE_SIZE_MASK;
310 
311                 /*
312                  * When splitting to a 4K page where execution is allowed, mark
313                  * the page executable as the NX hugepage mitigation no longer
314                  * applies.
315                  */
316                 if ((role.access & ACC_EXEC_MASK) && is_nx_huge_page_enabled(kvm))
317                         child_spte = make_spte_executable(child_spte);
318         }
319 
320         return child_spte;
321 }
322 
323 
324 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
325 {
326         u64 spte = SPTE_MMU_PRESENT_MASK;
327 
328         spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
329                 shadow_user_mask | shadow_x_mask | shadow_me_value;
330 
331         if (ad_disabled)
332                 spte |= SPTE_TDP_AD_DISABLED;
333         else
334                 spte |= shadow_accessed_mask;
335 
336         return spte;
337 }
338 
339 u64 mark_spte_for_access_track(u64 spte)
340 {
341         if (spte_ad_enabled(spte))
342                 return spte & ~shadow_accessed_mask;
343 
344         if (is_access_track_spte(spte))
345                 return spte;
346 
347         check_spte_writable_invariants(spte);
348 
349         WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
350                           SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
351                   "Access Tracking saved bit locations are not zero\n");
352 
353         spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
354                 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
355         spte &= ~shadow_acc_track_mask;
356 
357         return spte;
358 }
359 
360 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
361 {
362         BUG_ON((u64)(unsigned)access_mask != access_mask);
363         WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
364 
365         /*
366          * Reset to the original module param value to honor userspace's desire
367          * to (dis)allow MMIO caching.  Update the param itself so that
368          * userspace can see whether or not KVM is actually using MMIO caching.
369          */
370         enable_mmio_caching = allow_mmio_caching;
371         if (!enable_mmio_caching)
372                 mmio_value = 0;
373 
374         /*
375          * The mask must contain only bits that are carved out specifically for
376          * the MMIO SPTE mask, e.g. to ensure there's no overlap with the MMIO
377          * generation.
378          */
379         if (WARN_ON(mmio_mask & ~SPTE_MMIO_ALLOWED_MASK))
380                 mmio_value = 0;
381 
382         /*
383          * Disable MMIO caching if the MMIO value collides with the bits that
384          * are used to hold the relocated GFN when the L1TF mitigation is
385          * enabled.  This should never fire as there is no known hardware that
386          * can trigger this condition, e.g. SME/SEV CPUs that require a custom
387          * MMIO value are not susceptible to L1TF.
388          */
389         if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask <<
390                                   SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
391                 mmio_value = 0;
392 
393         /*
394          * The masked MMIO value must obviously match itself and a frozen SPTE
395          * must not get a false positive.  Frozen SPTEs and MMIO SPTEs should
396          * never collide as MMIO must set some RWX bits, and frozen SPTEs must
397          * not set any RWX bits.
398          */
399         if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
400             WARN_ON(mmio_value && (FROZEN_SPTE & mmio_mask) == mmio_value))
401                 mmio_value = 0;
402 
403         if (!mmio_value)
404                 enable_mmio_caching = false;
405 
406         shadow_mmio_value = mmio_value;
407         shadow_mmio_mask  = mmio_mask;
408         shadow_mmio_access_mask = access_mask;
409 }
410 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
411 
412 void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
413 {
414         /* shadow_me_value must be a subset of shadow_me_mask */
415         if (WARN_ON(me_value & ~me_mask))
416                 me_value = me_mask = 0;
417 
418         shadow_me_value = me_value;
419         shadow_me_mask = me_mask;
420 }
421 EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask);
422 
423 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
424 {
425         shadow_user_mask        = VMX_EPT_READABLE_MASK;
426         shadow_accessed_mask    = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
427         shadow_dirty_mask       = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
428         shadow_nx_mask          = 0ull;
429         shadow_x_mask           = VMX_EPT_EXECUTABLE_MASK;
430         /* VMX_EPT_SUPPRESS_VE_BIT is needed for W or X violation. */
431         shadow_present_mask     =
432                 (has_exec_only ? 0ull : VMX_EPT_READABLE_MASK) | VMX_EPT_SUPPRESS_VE_BIT;
433         /*
434          * EPT overrides the host MTRRs, and so KVM must program the desired
435          * memtype directly into the SPTEs.  Note, this mask is just the mask
436          * of all bits that factor into the memtype, the actual memtype must be
437          * dynamically calculated, e.g. to ensure host MMIO is mapped UC.
438          */
439         shadow_memtype_mask     = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT;
440         shadow_acc_track_mask   = VMX_EPT_RWX_MASK;
441         shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
442         shadow_mmu_writable_mask  = EPT_SPTE_MMU_WRITABLE;
443 
444         /*
445          * EPT Misconfigurations are generated if the value of bits 2:0
446          * of an EPT paging-structure entry is 110b (write/execute).
447          */
448         kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
449                                    VMX_EPT_RWX_MASK | VMX_EPT_SUPPRESS_VE_BIT, 0);
450 }
451 EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
452 
453 void kvm_mmu_reset_all_pte_masks(void)
454 {
455         u8 low_phys_bits;
456         u64 mask;
457 
458         /*
459          * If the CPU has 46 or less physical address bits, then set an
460          * appropriate mask to guard against L1TF attacks. Otherwise, it is
461          * assumed that the CPU is not vulnerable to L1TF.
462          *
463          * Some Intel CPUs address the L1 cache using more PA bits than are
464          * reported by CPUID. Use the PA width of the L1 cache when possible
465          * to achieve more effective mitigation, e.g. if system RAM overlaps
466          * the most significant bits of legal physical address space.
467          */
468         shadow_nonpresent_or_rsvd_mask = 0;
469         low_phys_bits = boot_cpu_data.x86_phys_bits;
470         if (boot_cpu_has_bug(X86_BUG_L1TF) &&
471             !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
472                           52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
473                 low_phys_bits = boot_cpu_data.x86_cache_bits
474                         - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
475                 shadow_nonpresent_or_rsvd_mask =
476                         rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
477         }
478 
479         shadow_nonpresent_or_rsvd_lower_gfn_mask =
480                 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
481 
482         shadow_user_mask        = PT_USER_MASK;
483         shadow_accessed_mask    = PT_ACCESSED_MASK;
484         shadow_dirty_mask       = PT_DIRTY_MASK;
485         shadow_nx_mask          = PT64_NX_MASK;
486         shadow_x_mask           = 0;
487         shadow_present_mask     = PT_PRESENT_MASK;
488 
489         /*
490          * For shadow paging and NPT, KVM uses PAT entry '' to encode WB
491          * memtype in the SPTEs, i.e. relies on host MTRRs to provide the
492          * correct memtype (WB is the "weakest" memtype).
493          */
494         shadow_memtype_mask     = 0;
495         shadow_acc_track_mask   = 0;
496         shadow_me_mask          = 0;
497         shadow_me_value         = 0;
498 
499         shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITABLE;
500         shadow_mmu_writable_mask  = DEFAULT_SPTE_MMU_WRITABLE;
501 
502         /*
503          * Set a reserved PA bit in MMIO SPTEs to generate page faults with
504          * PFEC.RSVD=1 on MMIO accesses.  64-bit PTEs (PAE, x86-64, and EPT
505          * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
506          * 52-bit physical addresses then there are no reserved PA bits in the
507          * PTEs and so the reserved PA approach must be disabled.
508          */
509         if (kvm_host.maxphyaddr < 52)
510                 mask = BIT_ULL(51) | PT_PRESENT_MASK;
511         else
512                 mask = 0;
513 
514         kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
515 }
516 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php