~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kvm/mmu/tdp_mmu.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  3 
  4 #include "mmu.h"
  5 #include "mmu_internal.h"
  6 #include "mmutrace.h"
  7 #include "tdp_iter.h"
  8 #include "tdp_mmu.h"
  9 #include "spte.h"
 10 
 11 #include <asm/cmpxchg.h>
 12 #include <trace/events/kvm.h>
 13 
 14 /* Initializes the TDP MMU for the VM, if enabled. */
 15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
 16 {
 17         INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
 18         spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
 19 }
 20 
 21 /* Arbitrarily returns true so that this may be used in if statements. */
 22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
 23                                                              bool shared)
 24 {
 25         if (shared)
 26                 lockdep_assert_held_read(&kvm->mmu_lock);
 27         else
 28                 lockdep_assert_held_write(&kvm->mmu_lock);
 29 
 30         return true;
 31 }
 32 
 33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
 34 {
 35         /*
 36          * Invalidate all roots, which besides the obvious, schedules all roots
 37          * for zapping and thus puts the TDP MMU's reference to each root, i.e.
 38          * ultimately frees all roots.
 39          */
 40         kvm_tdp_mmu_invalidate_all_roots(kvm);
 41         kvm_tdp_mmu_zap_invalidated_roots(kvm);
 42 
 43         WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
 44         WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
 45 
 46         /*
 47          * Ensure that all the outstanding RCU callbacks to free shadow pages
 48          * can run before the VM is torn down.  Putting the last reference to
 49          * zapped roots will create new callbacks.
 50          */
 51         rcu_barrier();
 52 }
 53 
 54 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
 55 {
 56         free_page((unsigned long)sp->spt);
 57         kmem_cache_free(mmu_page_header_cache, sp);
 58 }
 59 
 60 /*
 61  * This is called through call_rcu in order to free TDP page table memory
 62  * safely with respect to other kernel threads that may be operating on
 63  * the memory.
 64  * By only accessing TDP MMU page table memory in an RCU read critical
 65  * section, and freeing it after a grace period, lockless access to that
 66  * memory won't use it after it is freed.
 67  */
 68 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
 69 {
 70         struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
 71                                                rcu_head);
 72 
 73         tdp_mmu_free_sp(sp);
 74 }
 75 
 76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
 77 {
 78         if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
 79                 return;
 80 
 81         /*
 82          * The TDP MMU itself holds a reference to each root until the root is
 83          * explicitly invalidated, i.e. the final reference should be never be
 84          * put for a valid root.
 85          */
 86         KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
 87 
 88         spin_lock(&kvm->arch.tdp_mmu_pages_lock);
 89         list_del_rcu(&root->link);
 90         spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
 91         call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
 92 }
 93 
 94 /*
 95  * Returns the next root after @prev_root (or the first root if @prev_root is
 96  * NULL).  A reference to the returned root is acquired, and the reference to
 97  * @prev_root is released (the caller obviously must hold a reference to
 98  * @prev_root if it's non-NULL).
 99  *
100  * If @only_valid is true, invalid roots are skipped.
101  *
102  * Returns NULL if the end of tdp_mmu_roots was reached.
103  */
104 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
105                                               struct kvm_mmu_page *prev_root,
106                                               bool only_valid)
107 {
108         struct kvm_mmu_page *next_root;
109 
110         /*
111          * While the roots themselves are RCU-protected, fields such as
112          * role.invalid are protected by mmu_lock.
113          */
114         lockdep_assert_held(&kvm->mmu_lock);
115 
116         rcu_read_lock();
117 
118         if (prev_root)
119                 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
120                                                   &prev_root->link,
121                                                   typeof(*prev_root), link);
122         else
123                 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
124                                                    typeof(*next_root), link);
125 
126         while (next_root) {
127                 if ((!only_valid || !next_root->role.invalid) &&
128                     kvm_tdp_mmu_get_root(next_root))
129                         break;
130 
131                 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
132                                 &next_root->link, typeof(*next_root), link);
133         }
134 
135         rcu_read_unlock();
136 
137         if (prev_root)
138                 kvm_tdp_mmu_put_root(kvm, prev_root);
139 
140         return next_root;
141 }
142 
143 /*
144  * Note: this iterator gets and puts references to the roots it iterates over.
145  * This makes it safe to release the MMU lock and yield within the loop, but
146  * if exiting the loop early, the caller must drop the reference to the most
147  * recent root. (Unless keeping a live reference is desirable.)
148  *
149  * If shared is set, this function is operating under the MMU lock in read
150  * mode.
151  */
152 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)    \
153         for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);                \
154              ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;              \
155              _root = tdp_mmu_next_root(_kvm, _root, _only_valid))               \
156                 if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) {       \
157                 } else
158 
159 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)     \
160         __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
161 
162 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root)                   \
163         for (_root = tdp_mmu_next_root(_kvm, NULL, false);              \
164              ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;      \
165              _root = tdp_mmu_next_root(_kvm, _root, false))
166 
167 /*
168  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
169  * the implication being that any flow that holds mmu_lock for read is
170  * inherently yield-friendly and should use the yield-safe variant above.
171  * Holding mmu_lock for write obviates the need for RCU protection as the list
172  * is guaranteed to be stable.
173  */
174 #define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid)               \
175         list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)             \
176                 if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&            \
177                     ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) ||    \
178                      ((_only_valid) && (_root)->role.invalid))) {               \
179                 } else
180 
181 #define for_each_tdp_mmu_root(_kvm, _root, _as_id)                      \
182         __for_each_tdp_mmu_root(_kvm, _root, _as_id, false)
183 
184 #define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id)                \
185         __for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
186 
187 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
188 {
189         struct kvm_mmu_page *sp;
190 
191         sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
192         sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
193 
194         return sp;
195 }
196 
197 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
198                             gfn_t gfn, union kvm_mmu_page_role role)
199 {
200         INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
201 
202         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
203 
204         sp->role = role;
205         sp->gfn = gfn;
206         sp->ptep = sptep;
207         sp->tdp_mmu_page = true;
208 
209         trace_kvm_mmu_get_page(sp, true);
210 }
211 
212 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
213                                   struct tdp_iter *iter)
214 {
215         struct kvm_mmu_page *parent_sp;
216         union kvm_mmu_page_role role;
217 
218         parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
219 
220         role = parent_sp->role;
221         role.level--;
222 
223         tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
224 }
225 
226 int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu)
227 {
228         struct kvm_mmu *mmu = vcpu->arch.mmu;
229         union kvm_mmu_page_role role = mmu->root_role;
230         int as_id = kvm_mmu_role_as_id(role);
231         struct kvm *kvm = vcpu->kvm;
232         struct kvm_mmu_page *root;
233 
234         /*
235          * Check for an existing root before acquiring the pages lock to avoid
236          * unnecessary serialization if multiple vCPUs are loading a new root.
237          * E.g. when bringing up secondary vCPUs, KVM will already have created
238          * a valid root on behalf of the primary vCPU.
239          */
240         read_lock(&kvm->mmu_lock);
241 
242         for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) {
243                 if (root->role.word == role.word)
244                         goto out_read_unlock;
245         }
246 
247         spin_lock(&kvm->arch.tdp_mmu_pages_lock);
248 
249         /*
250          * Recheck for an existing root after acquiring the pages lock, another
251          * vCPU may have raced ahead and created a new usable root.  Manually
252          * walk the list of roots as the standard macros assume that the pages
253          * lock is *not* held.  WARN if grabbing a reference to a usable root
254          * fails, as the last reference to a root can only be put *after* the
255          * root has been invalidated, which requires holding mmu_lock for write.
256          */
257         list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
258                 if (root->role.word == role.word &&
259                     !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))
260                         goto out_spin_unlock;
261         }
262 
263         root = tdp_mmu_alloc_sp(vcpu);
264         tdp_mmu_init_sp(root, NULL, 0, role);
265 
266         /*
267          * TDP MMU roots are kept until they are explicitly invalidated, either
268          * by a memslot update or by the destruction of the VM.  Initialize the
269          * refcount to two; one reference for the vCPU, and one reference for
270          * the TDP MMU itself, which is held until the root is invalidated and
271          * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
272          */
273         refcount_set(&root->tdp_mmu_root_count, 2);
274         list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
275 
276 out_spin_unlock:
277         spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
278 out_read_unlock:
279         read_unlock(&kvm->mmu_lock);
280         /*
281          * Note, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS will prevent entering the guest
282          * and actually consuming the root if it's invalidated after dropping
283          * mmu_lock, and the root can't be freed as this vCPU holds a reference.
284          */
285         mmu->root.hpa = __pa(root->spt);
286         mmu->root.pgd = 0;
287         return 0;
288 }
289 
290 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
291                                 u64 old_spte, u64 new_spte, int level,
292                                 bool shared);
293 
294 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
295 {
296         kvm_account_pgtable_pages((void *)sp->spt, +1);
297         atomic64_inc(&kvm->arch.tdp_mmu_pages);
298 }
299 
300 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
301 {
302         kvm_account_pgtable_pages((void *)sp->spt, -1);
303         atomic64_dec(&kvm->arch.tdp_mmu_pages);
304 }
305 
306 /**
307  * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
308  *
309  * @kvm: kvm instance
310  * @sp: the page to be removed
311  */
312 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
313 {
314         tdp_unaccount_mmu_page(kvm, sp);
315 
316         if (!sp->nx_huge_page_disallowed)
317                 return;
318 
319         spin_lock(&kvm->arch.tdp_mmu_pages_lock);
320         sp->nx_huge_page_disallowed = false;
321         untrack_possible_nx_huge_page(kvm, sp);
322         spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
323 }
324 
325 /**
326  * handle_removed_pt() - handle a page table removed from the TDP structure
327  *
328  * @kvm: kvm instance
329  * @pt: the page removed from the paging structure
330  * @shared: This operation may not be running under the exclusive use
331  *          of the MMU lock and the operation must synchronize with other
332  *          threads that might be modifying SPTEs.
333  *
334  * Given a page table that has been removed from the TDP paging structure,
335  * iterates through the page table to clear SPTEs and free child page tables.
336  *
337  * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
338  * protection. Since this thread removed it from the paging structure,
339  * this thread will be responsible for ensuring the page is freed. Hence the
340  * early rcu_dereferences in the function.
341  */
342 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
343 {
344         struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
345         int level = sp->role.level;
346         gfn_t base_gfn = sp->gfn;
347         int i;
348 
349         trace_kvm_mmu_prepare_zap_page(sp);
350 
351         tdp_mmu_unlink_sp(kvm, sp);
352 
353         for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
354                 tdp_ptep_t sptep = pt + i;
355                 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
356                 u64 old_spte;
357 
358                 if (shared) {
359                         /*
360                          * Set the SPTE to a nonpresent value that other
361                          * threads will not overwrite. If the SPTE was
362                          * already marked as frozen then another thread
363                          * handling a page fault could overwrite it, so
364                          * set the SPTE until it is set from some other
365                          * value to the frozen SPTE value.
366                          */
367                         for (;;) {
368                                 old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE);
369                                 if (!is_frozen_spte(old_spte))
370                                         break;
371                                 cpu_relax();
372                         }
373                 } else {
374                         /*
375                          * If the SPTE is not MMU-present, there is no backing
376                          * page associated with the SPTE and so no side effects
377                          * that need to be recorded, and exclusive ownership of
378                          * mmu_lock ensures the SPTE can't be made present.
379                          * Note, zapping MMIO SPTEs is also unnecessary as they
380                          * are guarded by the memslots generation, not by being
381                          * unreachable.
382                          */
383                         old_spte = kvm_tdp_mmu_read_spte(sptep);
384                         if (!is_shadow_present_pte(old_spte))
385                                 continue;
386 
387                         /*
388                          * Use the common helper instead of a raw WRITE_ONCE as
389                          * the SPTE needs to be updated atomically if it can be
390                          * modified by a different vCPU outside of mmu_lock.
391                          * Even though the parent SPTE is !PRESENT, the TLB
392                          * hasn't yet been flushed, and both Intel and AMD
393                          * document that A/D assists can use upper-level PxE
394                          * entries that are cached in the TLB, i.e. the CPU can
395                          * still access the page and mark it dirty.
396                          *
397                          * No retry is needed in the atomic update path as the
398                          * sole concern is dropping a Dirty bit, i.e. no other
399                          * task can zap/remove the SPTE as mmu_lock is held for
400                          * write.  Marking the SPTE as a frozen SPTE is not
401                          * strictly necessary for the same reason, but using
402                          * the frozen SPTE value keeps the shared/exclusive
403                          * paths consistent and allows the handle_changed_spte()
404                          * call below to hardcode the new value to FROZEN_SPTE.
405                          *
406                          * Note, even though dropping a Dirty bit is the only
407                          * scenario where a non-atomic update could result in a
408                          * functional bug, simply checking the Dirty bit isn't
409                          * sufficient as a fast page fault could read the upper
410                          * level SPTE before it is zapped, and then make this
411                          * target SPTE writable, resume the guest, and set the
412                          * Dirty bit between reading the SPTE above and writing
413                          * it here.
414                          */
415                         old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
416                                                           FROZEN_SPTE, level);
417                 }
418                 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
419                                     old_spte, FROZEN_SPTE, level, shared);
420         }
421 
422         call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
423 }
424 
425 /**
426  * handle_changed_spte - handle bookkeeping associated with an SPTE change
427  * @kvm: kvm instance
428  * @as_id: the address space of the paging structure the SPTE was a part of
429  * @gfn: the base GFN that was mapped by the SPTE
430  * @old_spte: The value of the SPTE before the change
431  * @new_spte: The value of the SPTE after the change
432  * @level: the level of the PT the SPTE is part of in the paging structure
433  * @shared: This operation may not be running under the exclusive use of
434  *          the MMU lock and the operation must synchronize with other
435  *          threads that might be modifying SPTEs.
436  *
437  * Handle bookkeeping that might result from the modification of a SPTE.  Note,
438  * dirty logging updates are handled in common code, not here (see make_spte()
439  * and fast_pf_fix_direct_spte()).
440  */
441 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
442                                 u64 old_spte, u64 new_spte, int level,
443                                 bool shared)
444 {
445         bool was_present = is_shadow_present_pte(old_spte);
446         bool is_present = is_shadow_present_pte(new_spte);
447         bool was_leaf = was_present && is_last_spte(old_spte, level);
448         bool is_leaf = is_present && is_last_spte(new_spte, level);
449         bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
450 
451         WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);
452         WARN_ON_ONCE(level < PG_LEVEL_4K);
453         WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
454 
455         /*
456          * If this warning were to trigger it would indicate that there was a
457          * missing MMU notifier or a race with some notifier handler.
458          * A present, leaf SPTE should never be directly replaced with another
459          * present leaf SPTE pointing to a different PFN. A notifier handler
460          * should be zapping the SPTE before the main MM's page table is
461          * changed, or the SPTE should be zeroed, and the TLBs flushed by the
462          * thread before replacement.
463          */
464         if (was_leaf && is_leaf && pfn_changed) {
465                 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
466                        "SPTE with another present leaf SPTE mapping a\n"
467                        "different PFN!\n"
468                        "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
469                        as_id, gfn, old_spte, new_spte, level);
470 
471                 /*
472                  * Crash the host to prevent error propagation and guest data
473                  * corruption.
474                  */
475                 BUG();
476         }
477 
478         if (old_spte == new_spte)
479                 return;
480 
481         trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
482 
483         if (is_leaf)
484                 check_spte_writable_invariants(new_spte);
485 
486         /*
487          * The only times a SPTE should be changed from a non-present to
488          * non-present state is when an MMIO entry is installed/modified/
489          * removed. In that case, there is nothing to do here.
490          */
491         if (!was_present && !is_present) {
492                 /*
493                  * If this change does not involve a MMIO SPTE or frozen SPTE,
494                  * it is unexpected. Log the change, though it should not
495                  * impact the guest since both the former and current SPTEs
496                  * are nonpresent.
497                  */
498                 if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) &&
499                                  !is_mmio_spte(kvm, new_spte) &&
500                                  !is_frozen_spte(new_spte)))
501                         pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
502                                "should not be replaced with another,\n"
503                                "different nonpresent SPTE, unless one or both\n"
504                                "are MMIO SPTEs, or the new SPTE is\n"
505                                "a temporary frozen SPTE.\n"
506                                "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
507                                as_id, gfn, old_spte, new_spte, level);
508                 return;
509         }
510 
511         if (is_leaf != was_leaf)
512                 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
513 
514         if (was_leaf && is_dirty_spte(old_spte) &&
515             (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
516                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
517 
518         /*
519          * Recursively handle child PTs if the change removed a subtree from
520          * the paging structure.  Note the WARN on the PFN changing without the
521          * SPTE being converted to a hugepage (leaf) or being zapped.  Shadow
522          * pages are kernel allocations and should never be migrated.
523          */
524         if (was_present && !was_leaf &&
525             (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
526                 handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
527 
528         if (was_leaf && is_accessed_spte(old_spte) &&
529             (!is_present || !is_accessed_spte(new_spte) || pfn_changed))
530                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
531 }
532 
533 static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
534                                                          u64 new_spte)
535 {
536         u64 *sptep = rcu_dereference(iter->sptep);
537 
538         /*
539          * The caller is responsible for ensuring the old SPTE is not a FROZEN
540          * SPTE.  KVM should never attempt to zap or manipulate a FROZEN SPTE,
541          * and pre-checking before inserting a new SPTE is advantageous as it
542          * avoids unnecessary work.
543          */
544         WARN_ON_ONCE(iter->yielded || is_frozen_spte(iter->old_spte));
545 
546         /*
547          * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
548          * does not hold the mmu_lock.  On failure, i.e. if a different logical
549          * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
550          * the current value, so the caller operates on fresh data, e.g. if it
551          * retries tdp_mmu_set_spte_atomic()
552          */
553         if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
554                 return -EBUSY;
555 
556         return 0;
557 }
558 
559 /*
560  * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
561  * and handle the associated bookkeeping.  Do not mark the page dirty
562  * in KVM's dirty bitmaps.
563  *
564  * If setting the SPTE fails because it has changed, iter->old_spte will be
565  * refreshed to the current value of the spte.
566  *
567  * @kvm: kvm instance
568  * @iter: a tdp_iter instance currently on the SPTE that should be set
569  * @new_spte: The value the SPTE should be set to
570  * Return:
571  * * 0      - If the SPTE was set.
572  * * -EBUSY - If the SPTE cannot be set. In this case this function will have
573  *            no side-effects other than setting iter->old_spte to the last
574  *            known value of the spte.
575  */
576 static inline int __must_check tdp_mmu_set_spte_atomic(struct kvm *kvm,
577                                                        struct tdp_iter *iter,
578                                                        u64 new_spte)
579 {
580         int ret;
581 
582         lockdep_assert_held_read(&kvm->mmu_lock);
583 
584         ret = __tdp_mmu_set_spte_atomic(iter, new_spte);
585         if (ret)
586                 return ret;
587 
588         handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
589                             new_spte, iter->level, true);
590 
591         return 0;
592 }
593 
594 static inline int __must_check tdp_mmu_zap_spte_atomic(struct kvm *kvm,
595                                                        struct tdp_iter *iter)
596 {
597         int ret;
598 
599         lockdep_assert_held_read(&kvm->mmu_lock);
600 
601         /*
602          * Freeze the SPTE by setting it to a special, non-present value. This
603          * will stop other threads from immediately installing a present entry
604          * in its place before the TLBs are flushed.
605          *
606          * Delay processing of the zapped SPTE until after TLBs are flushed and
607          * the FROZEN_SPTE is replaced (see below).
608          */
609         ret = __tdp_mmu_set_spte_atomic(iter, FROZEN_SPTE);
610         if (ret)
611                 return ret;
612 
613         kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
614 
615         /*
616          * No other thread can overwrite the frozen SPTE as they must either
617          * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
618          * overwrite the special frozen SPTE value. Use the raw write helper to
619          * avoid an unnecessary check on volatile bits.
620          */
621         __kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);
622 
623         /*
624          * Process the zapped SPTE after flushing TLBs, and after replacing
625          * FROZEN_SPTE with 0. This minimizes the amount of time vCPUs are
626          * blocked by the FROZEN_SPTE and reduces contention on the child
627          * SPTEs.
628          */
629         handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
630                             SHADOW_NONPRESENT_VALUE, iter->level, true);
631 
632         return 0;
633 }
634 
635 
636 /*
637  * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
638  * @kvm:              KVM instance
639  * @as_id:            Address space ID, i.e. regular vs. SMM
640  * @sptep:            Pointer to the SPTE
641  * @old_spte:         The current value of the SPTE
642  * @new_spte:         The new value that will be set for the SPTE
643  * @gfn:              The base GFN that was (or will be) mapped by the SPTE
644  * @level:            The level _containing_ the SPTE (its parent PT's level)
645  *
646  * Returns the old SPTE value, which _may_ be different than @old_spte if the
647  * SPTE had voldatile bits.
648  */
649 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
650                             u64 old_spte, u64 new_spte, gfn_t gfn, int level)
651 {
652         lockdep_assert_held_write(&kvm->mmu_lock);
653 
654         /*
655          * No thread should be using this function to set SPTEs to or from the
656          * temporary frozen SPTE value.
657          * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
658          * should be used. If operating under the MMU lock in write mode, the
659          * use of the frozen SPTE should not be necessary.
660          */
661         WARN_ON_ONCE(is_frozen_spte(old_spte) || is_frozen_spte(new_spte));
662 
663         old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
664 
665         handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
666         return old_spte;
667 }
668 
669 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
670                                          u64 new_spte)
671 {
672         WARN_ON_ONCE(iter->yielded);
673         iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
674                                           iter->old_spte, new_spte,
675                                           iter->gfn, iter->level);
676 }
677 
678 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
679         for_each_tdp_pte(_iter, _root, _start, _end)
680 
681 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)  \
682         tdp_root_for_each_pte(_iter, _root, _start, _end)               \
683                 if (!is_shadow_present_pte(_iter.old_spte) ||           \
684                     !is_last_spte(_iter.old_spte, _iter.level))         \
685                         continue;                                       \
686                 else
687 
688 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)         \
689         for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
690 
691 /*
692  * Yield if the MMU lock is contended or this thread needs to return control
693  * to the scheduler.
694  *
695  * If this function should yield and flush is set, it will perform a remote
696  * TLB flush before yielding.
697  *
698  * If this function yields, iter->yielded is set and the caller must skip to
699  * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
700  * over the paging structures to allow the iterator to continue its traversal
701  * from the paging structure root.
702  *
703  * Returns true if this function yielded.
704  */
705 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
706                                                           struct tdp_iter *iter,
707                                                           bool flush, bool shared)
708 {
709         WARN_ON_ONCE(iter->yielded);
710 
711         /* Ensure forward progress has been made before yielding. */
712         if (iter->next_last_level_gfn == iter->yielded_gfn)
713                 return false;
714 
715         if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
716                 if (flush)
717                         kvm_flush_remote_tlbs(kvm);
718 
719                 rcu_read_unlock();
720 
721                 if (shared)
722                         cond_resched_rwlock_read(&kvm->mmu_lock);
723                 else
724                         cond_resched_rwlock_write(&kvm->mmu_lock);
725 
726                 rcu_read_lock();
727 
728                 WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
729 
730                 iter->yielded = true;
731         }
732 
733         return iter->yielded;
734 }
735 
736 static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
737 {
738         /*
739          * Bound TDP MMU walks at host.MAXPHYADDR.  KVM disallows memslots with
740          * a gpa range that would exceed the max gfn, and KVM does not create
741          * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
742          * the slow emulation path every time.
743          */
744         return kvm_mmu_max_gfn() + 1;
745 }
746 
747 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
748                                bool shared, int zap_level)
749 {
750         struct tdp_iter iter;
751 
752         gfn_t end = tdp_mmu_max_gfn_exclusive();
753         gfn_t start = 0;
754 
755         for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
756 retry:
757                 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
758                         continue;
759 
760                 if (!is_shadow_present_pte(iter.old_spte))
761                         continue;
762 
763                 if (iter.level > zap_level)
764                         continue;
765 
766                 if (!shared)
767                         tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
768                 else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))
769                         goto retry;
770         }
771 }
772 
773 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
774                              bool shared)
775 {
776 
777         /*
778          * The root must have an elevated refcount so that it's reachable via
779          * mmu_notifier callbacks, which allows this path to yield and drop
780          * mmu_lock.  When handling an unmap/release mmu_notifier command, KVM
781          * must drop all references to relevant pages prior to completing the
782          * callback.  Dropping mmu_lock with an unreachable root would result
783          * in zapping SPTEs after a relevant mmu_notifier callback completes
784          * and lead to use-after-free as zapping a SPTE triggers "writeback" of
785          * dirty accessed bits to the SPTE's associated struct page.
786          */
787         WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
788 
789         kvm_lockdep_assert_mmu_lock_held(kvm, shared);
790 
791         rcu_read_lock();
792 
793         /*
794          * Zap roots in multiple passes of decreasing granularity, i.e. zap at
795          * 4KiB=>2MiB=>1GiB=>root, in order to better honor need_resched() (all
796          * preempt models) or mmu_lock contention (full or real-time models).
797          * Zapping at finer granularity marginally increases the total time of
798          * the zap, but in most cases the zap itself isn't latency sensitive.
799          *
800          * If KVM is configured to prove the MMU, skip the 4KiB and 2MiB zaps
801          * in order to mimic the page fault path, which can replace a 1GiB page
802          * table with an equivalent 1GiB hugepage, i.e. can get saddled with
803          * zapping a 1GiB region that's fully populated with 4KiB SPTEs.  This
804          * allows verifying that KVM can safely zap 1GiB regions, e.g. without
805          * inducing RCU stalls, without relying on a relatively rare event
806          * (zapping roots is orders of magnitude more common).  Note, because
807          * zapping a SP recurses on its children, stepping down to PG_LEVEL_4K
808          * in the iterator itself is unnecessary.
809          */
810         if (!IS_ENABLED(CONFIG_KVM_PROVE_MMU)) {
811                 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K);
812                 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M);
813         }
814         __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
815         __tdp_mmu_zap_root(kvm, root, shared, root->role.level);
816 
817         rcu_read_unlock();
818 }
819 
820 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
821 {
822         u64 old_spte;
823 
824         /*
825          * This helper intentionally doesn't allow zapping a root shadow page,
826          * which doesn't have a parent page table and thus no associated entry.
827          */
828         if (WARN_ON_ONCE(!sp->ptep))
829                 return false;
830 
831         old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
832         if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
833                 return false;
834 
835         tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,
836                          SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1);
837 
838         return true;
839 }
840 
841 /*
842  * If can_yield is true, will release the MMU lock and reschedule if the
843  * scheduler needs the CPU or there is contention on the MMU lock. If this
844  * function cannot yield, it will not release the MMU lock or reschedule and
845  * the caller must ensure it does not supply too large a GFN range, or the
846  * operation can cause a soft lockup.
847  */
848 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
849                               gfn_t start, gfn_t end, bool can_yield, bool flush)
850 {
851         struct tdp_iter iter;
852 
853         end = min(end, tdp_mmu_max_gfn_exclusive());
854 
855         lockdep_assert_held_write(&kvm->mmu_lock);
856 
857         rcu_read_lock();
858 
859         for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
860                 if (can_yield &&
861                     tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
862                         flush = false;
863                         continue;
864                 }
865 
866                 if (!is_shadow_present_pte(iter.old_spte) ||
867                     !is_last_spte(iter.old_spte, iter.level))
868                         continue;
869 
870                 tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
871 
872                 /*
873                  * Zappings SPTEs in invalid roots doesn't require a TLB flush,
874                  * see kvm_tdp_mmu_zap_invalidated_roots() for details.
875                  */
876                 if (!root->role.invalid)
877                         flush = true;
878         }
879 
880         rcu_read_unlock();
881 
882         /*
883          * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
884          * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
885          */
886         return flush;
887 }
888 
889 /*
890  * Zap leaf SPTEs for the range of gfns, [start, end), for all *VALID** roots.
891  * Returns true if a TLB flush is needed before releasing the MMU lock, i.e. if
892  * one or more SPTEs were zapped since the MMU lock was last acquired.
893  */
894 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
895 {
896         struct kvm_mmu_page *root;
897 
898         lockdep_assert_held_write(&kvm->mmu_lock);
899         for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1)
900                 flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
901 
902         return flush;
903 }
904 
905 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
906 {
907         struct kvm_mmu_page *root;
908 
909         /*
910          * Zap all roots, including invalid roots, as all SPTEs must be dropped
911          * before returning to the caller.  Zap directly even if the root is
912          * also being zapped by a worker.  Walking zapped top-level SPTEs isn't
913          * all that expensive and mmu_lock is already held, which means the
914          * worker has yielded, i.e. flushing the work instead of zapping here
915          * isn't guaranteed to be any faster.
916          *
917          * A TLB flush is unnecessary, KVM zaps everything if and only the VM
918          * is being destroyed or the userspace VMM has exited.  In both cases,
919          * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
920          */
921         lockdep_assert_held_write(&kvm->mmu_lock);
922         for_each_tdp_mmu_root_yield_safe(kvm, root)
923                 tdp_mmu_zap_root(kvm, root, false);
924 }
925 
926 /*
927  * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
928  * zap" completes.
929  */
930 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
931 {
932         struct kvm_mmu_page *root;
933 
934         read_lock(&kvm->mmu_lock);
935 
936         for_each_tdp_mmu_root_yield_safe(kvm, root) {
937                 if (!root->tdp_mmu_scheduled_root_to_zap)
938                         continue;
939 
940                 root->tdp_mmu_scheduled_root_to_zap = false;
941                 KVM_BUG_ON(!root->role.invalid, kvm);
942 
943                 /*
944                  * A TLB flush is not necessary as KVM performs a local TLB
945                  * flush when allocating a new root (see kvm_mmu_load()), and
946                  * when migrating a vCPU to a different pCPU.  Note, the local
947                  * TLB flush on reuse also invalidates paging-structure-cache
948                  * entries, i.e. TLB entries for intermediate paging structures,
949                  * that may be zapped, as such entries are associated with the
950                  * ASID on both VMX and SVM.
951                  */
952                 tdp_mmu_zap_root(kvm, root, true);
953 
954                 /*
955                  * The referenced needs to be put *after* zapping the root, as
956                  * the root must be reachable by mmu_notifiers while it's being
957                  * zapped
958                  */
959                 kvm_tdp_mmu_put_root(kvm, root);
960         }
961 
962         read_unlock(&kvm->mmu_lock);
963 }
964 
965 /*
966  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
967  * is about to be zapped, e.g. in response to a memslots update.  The actual
968  * zapping is done separately so that it happens with mmu_lock with read,
969  * whereas invalidating roots must be done with mmu_lock held for write (unless
970  * the VM is being destroyed).
971  *
972  * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
973  * See kvm_tdp_mmu_alloc_root().
974  */
975 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
976 {
977         struct kvm_mmu_page *root;
978 
979         /*
980          * mmu_lock must be held for write to ensure that a root doesn't become
981          * invalid while there are active readers (invalidating a root while
982          * there are active readers may or may not be problematic in practice,
983          * but it's uncharted territory and not supported).
984          *
985          * Waive the assertion if there are no users of @kvm, i.e. the VM is
986          * being destroyed after all references have been put, or if no vCPUs
987          * have been created (which means there are no roots), i.e. the VM is
988          * being destroyed in an error path of KVM_CREATE_VM.
989          */
990         if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
991             refcount_read(&kvm->users_count) && kvm->created_vcpus)
992                 lockdep_assert_held_write(&kvm->mmu_lock);
993 
994         /*
995          * As above, mmu_lock isn't held when destroying the VM!  There can't
996          * be other references to @kvm, i.e. nothing else can invalidate roots
997          * or get/put references to roots.
998          */
999         list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
1000                 /*
1001                  * Note, invalid roots can outlive a memslot update!  Invalid
1002                  * roots must be *zapped* before the memslot update completes,
1003                  * but a different task can acquire a reference and keep the
1004                  * root alive after its been zapped.
1005                  */
1006                 if (!root->role.invalid) {
1007                         root->tdp_mmu_scheduled_root_to_zap = true;
1008                         root->role.invalid = true;
1009                 }
1010         }
1011 }
1012 
1013 /*
1014  * Installs a last-level SPTE to handle a TDP page fault.
1015  * (NPT/EPT violation/misconfiguration)
1016  */
1017 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
1018                                           struct kvm_page_fault *fault,
1019                                           struct tdp_iter *iter)
1020 {
1021         struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
1022         u64 new_spte;
1023         int ret = RET_PF_FIXED;
1024         bool wrprot = false;
1025 
1026         if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
1027                 return RET_PF_RETRY;
1028 
1029         if (unlikely(!fault->slot))
1030                 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
1031         else
1032                 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
1033                                          fault->pfn, iter->old_spte, fault->prefetch, true,
1034                                          fault->map_writable, &new_spte);
1035 
1036         if (new_spte == iter->old_spte)
1037                 ret = RET_PF_SPURIOUS;
1038         else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
1039                 return RET_PF_RETRY;
1040         else if (is_shadow_present_pte(iter->old_spte) &&
1041                  !is_last_spte(iter->old_spte, iter->level))
1042                 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
1043 
1044         /*
1045          * If the page fault was caused by a write but the page is write
1046          * protected, emulation is needed. If the emulation was skipped,
1047          * the vCPU would have the same fault again.
1048          */
1049         if (wrprot) {
1050                 if (fault->write)
1051                         ret = RET_PF_EMULATE;
1052         }
1053 
1054         /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
1055         if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {
1056                 vcpu->stat.pf_mmio_spte_created++;
1057                 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
1058                                      new_spte);
1059                 ret = RET_PF_EMULATE;
1060         } else {
1061                 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
1062                                        rcu_dereference(iter->sptep));
1063         }
1064 
1065         return ret;
1066 }
1067 
1068 /*
1069  * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1070  * provided page table.
1071  *
1072  * @kvm: kvm instance
1073  * @iter: a tdp_iter instance currently on the SPTE that should be set
1074  * @sp: The new TDP page table to install.
1075  * @shared: This operation is running under the MMU lock in read mode.
1076  *
1077  * Returns: 0 if the new page table was installed. Non-0 if the page table
1078  *          could not be installed (e.g. the atomic compare-exchange failed).
1079  */
1080 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1081                            struct kvm_mmu_page *sp, bool shared)
1082 {
1083         u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1084         int ret = 0;
1085 
1086         if (shared) {
1087                 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1088                 if (ret)
1089                         return ret;
1090         } else {
1091                 tdp_mmu_iter_set_spte(kvm, iter, spte);
1092         }
1093 
1094         tdp_account_mmu_page(kvm, sp);
1095 
1096         return 0;
1097 }
1098 
1099 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1100                                    struct kvm_mmu_page *sp, bool shared);
1101 
1102 /*
1103  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1104  * page tables and SPTEs to translate the faulting guest physical address.
1105  */
1106 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1107 {
1108         struct kvm_mmu *mmu = vcpu->arch.mmu;
1109         struct kvm *kvm = vcpu->kvm;
1110         struct tdp_iter iter;
1111         struct kvm_mmu_page *sp;
1112         int ret = RET_PF_RETRY;
1113 
1114         kvm_mmu_hugepage_adjust(vcpu, fault);
1115 
1116         trace_kvm_mmu_spte_requested(fault);
1117 
1118         rcu_read_lock();
1119 
1120         tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1121                 int r;
1122 
1123                 if (fault->nx_huge_page_workaround_enabled)
1124                         disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1125 
1126                 /*
1127                  * If SPTE has been frozen by another thread, just give up and
1128                  * retry, avoiding unnecessary page table allocation and free.
1129                  */
1130                 if (is_frozen_spte(iter.old_spte))
1131                         goto retry;
1132 
1133                 if (iter.level == fault->goal_level)
1134                         goto map_target_level;
1135 
1136                 /* Step down into the lower level page table if it exists. */
1137                 if (is_shadow_present_pte(iter.old_spte) &&
1138                     !is_large_pte(iter.old_spte))
1139                         continue;
1140 
1141                 /*
1142                  * The SPTE is either non-present or points to a huge page that
1143                  * needs to be split.
1144                  */
1145                 sp = tdp_mmu_alloc_sp(vcpu);
1146                 tdp_mmu_init_child_sp(sp, &iter);
1147 
1148                 sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
1149 
1150                 if (is_shadow_present_pte(iter.old_spte))
1151                         r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1152                 else
1153                         r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1154 
1155                 /*
1156                  * Force the guest to retry if installing an upper level SPTE
1157                  * failed, e.g. because a different task modified the SPTE.
1158                  */
1159                 if (r) {
1160                         tdp_mmu_free_sp(sp);
1161                         goto retry;
1162                 }
1163 
1164                 if (fault->huge_page_disallowed &&
1165                     fault->req_level >= iter.level) {
1166                         spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1167                         if (sp->nx_huge_page_disallowed)
1168                                 track_possible_nx_huge_page(kvm, sp);
1169                         spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1170                 }
1171         }
1172 
1173         /*
1174          * The walk aborted before reaching the target level, e.g. because the
1175          * iterator detected an upper level SPTE was frozen during traversal.
1176          */
1177         WARN_ON_ONCE(iter.level == fault->goal_level);
1178         goto retry;
1179 
1180 map_target_level:
1181         ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1182 
1183 retry:
1184         rcu_read_unlock();
1185         return ret;
1186 }
1187 
1188 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1189                                  bool flush)
1190 {
1191         struct kvm_mmu_page *root;
1192 
1193         __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
1194                 flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1195                                           range->may_block, flush);
1196 
1197         return flush;
1198 }
1199 
1200 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1201                               struct kvm_gfn_range *range);
1202 
1203 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1204                                                    struct kvm_gfn_range *range,
1205                                                    tdp_handler_t handler)
1206 {
1207         struct kvm_mmu_page *root;
1208         struct tdp_iter iter;
1209         bool ret = false;
1210 
1211         /*
1212          * Don't support rescheduling, none of the MMU notifiers that funnel
1213          * into this helper allow blocking; it'd be dead, wasteful code.
1214          */
1215         for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1216                 rcu_read_lock();
1217 
1218                 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1219                         ret |= handler(kvm, &iter, range);
1220 
1221                 rcu_read_unlock();
1222         }
1223 
1224         return ret;
1225 }
1226 
1227 /*
1228  * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1229  * if any of the GFNs in the range have been accessed.
1230  *
1231  * No need to mark the corresponding PFN as accessed as this call is coming
1232  * from the clear_young() or clear_flush_young() notifier, which uses the
1233  * return value to determine if the page has been accessed.
1234  */
1235 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1236                           struct kvm_gfn_range *range)
1237 {
1238         u64 new_spte;
1239 
1240         /* If we have a non-accessed entry we don't need to change the pte. */
1241         if (!is_accessed_spte(iter->old_spte))
1242                 return false;
1243 
1244         if (spte_ad_enabled(iter->old_spte)) {
1245                 iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
1246                                                          iter->old_spte,
1247                                                          shadow_accessed_mask,
1248                                                          iter->level);
1249                 new_spte = iter->old_spte & ~shadow_accessed_mask;
1250         } else {
1251                 /*
1252                  * Capture the dirty status of the page, so that it doesn't get
1253                  * lost when the SPTE is marked for access tracking.
1254                  */
1255                 if (is_writable_pte(iter->old_spte))
1256                         kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
1257 
1258                 new_spte = mark_spte_for_access_track(iter->old_spte);
1259                 iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
1260                                                         iter->old_spte, new_spte,
1261                                                         iter->level);
1262         }
1263 
1264         trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1265                                        iter->old_spte, new_spte);
1266         return true;
1267 }
1268 
1269 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1270 {
1271         return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1272 }
1273 
1274 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1275                          struct kvm_gfn_range *range)
1276 {
1277         return is_accessed_spte(iter->old_spte);
1278 }
1279 
1280 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1281 {
1282         return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1283 }
1284 
1285 /*
1286  * Remove write access from all SPTEs at or above min_level that map GFNs
1287  * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1288  * be flushed.
1289  */
1290 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1291                              gfn_t start, gfn_t end, int min_level)
1292 {
1293         struct tdp_iter iter;
1294         u64 new_spte;
1295         bool spte_set = false;
1296 
1297         rcu_read_lock();
1298 
1299         BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1300 
1301         for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1302 retry:
1303                 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1304                         continue;
1305 
1306                 if (!is_shadow_present_pte(iter.old_spte) ||
1307                     !is_last_spte(iter.old_spte, iter.level) ||
1308                     !(iter.old_spte & PT_WRITABLE_MASK))
1309                         continue;
1310 
1311                 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1312 
1313                 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1314                         goto retry;
1315 
1316                 spte_set = true;
1317         }
1318 
1319         rcu_read_unlock();
1320         return spte_set;
1321 }
1322 
1323 /*
1324  * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1325  * only affect leaf SPTEs down to min_level.
1326  * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1327  */
1328 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1329                              const struct kvm_memory_slot *slot, int min_level)
1330 {
1331         struct kvm_mmu_page *root;
1332         bool spte_set = false;
1333 
1334         lockdep_assert_held_read(&kvm->mmu_lock);
1335 
1336         for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1337                 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1338                              slot->base_gfn + slot->npages, min_level);
1339 
1340         return spte_set;
1341 }
1342 
1343 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(void)
1344 {
1345         struct kvm_mmu_page *sp;
1346 
1347         sp = kmem_cache_zalloc(mmu_page_header_cache, GFP_KERNEL_ACCOUNT);
1348         if (!sp)
1349                 return NULL;
1350 
1351         sp->spt = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
1352         if (!sp->spt) {
1353                 kmem_cache_free(mmu_page_header_cache, sp);
1354                 return NULL;
1355         }
1356 
1357         return sp;
1358 }
1359 
1360 /* Note, the caller is responsible for initializing @sp. */
1361 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1362                                    struct kvm_mmu_page *sp, bool shared)
1363 {
1364         const u64 huge_spte = iter->old_spte;
1365         const int level = iter->level;
1366         int ret, i;
1367 
1368         /*
1369          * No need for atomics when writing to sp->spt since the page table has
1370          * not been linked in yet and thus is not reachable from any other CPU.
1371          */
1372         for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
1373                 sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1374 
1375         /*
1376          * Replace the huge spte with a pointer to the populated lower level
1377          * page table. Since we are making this change without a TLB flush vCPUs
1378          * will see a mix of the split mappings and the original huge mapping,
1379          * depending on what's currently in their TLB. This is fine from a
1380          * correctness standpoint since the translation will be the same either
1381          * way.
1382          */
1383         ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1384         if (ret)
1385                 goto out;
1386 
1387         /*
1388          * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1389          * are overwriting from the page stats. But we have to manually update
1390          * the page stats with the new present child pages.
1391          */
1392         kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1393 
1394 out:
1395         trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1396         return ret;
1397 }
1398 
1399 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1400                                          struct kvm_mmu_page *root,
1401                                          gfn_t start, gfn_t end,
1402                                          int target_level, bool shared)
1403 {
1404         struct kvm_mmu_page *sp = NULL;
1405         struct tdp_iter iter;
1406 
1407         rcu_read_lock();
1408 
1409         /*
1410          * Traverse the page table splitting all huge pages above the target
1411          * level into one lower level. For example, if we encounter a 1GB page
1412          * we split it into 512 2MB pages.
1413          *
1414          * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1415          * to visit an SPTE before ever visiting its children, which means we
1416          * will correctly recursively split huge pages that are more than one
1417          * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1418          * and then splitting each of those to 512 4KB pages).
1419          */
1420         for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1421 retry:
1422                 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1423                         continue;
1424 
1425                 if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1426                         continue;
1427 
1428                 if (!sp) {
1429                         rcu_read_unlock();
1430 
1431                         if (shared)
1432                                 read_unlock(&kvm->mmu_lock);
1433                         else
1434                                 write_unlock(&kvm->mmu_lock);
1435 
1436                         sp = tdp_mmu_alloc_sp_for_split();
1437 
1438                         if (shared)
1439                                 read_lock(&kvm->mmu_lock);
1440                         else
1441                                 write_lock(&kvm->mmu_lock);
1442 
1443                         if (!sp) {
1444                                 trace_kvm_mmu_split_huge_page(iter.gfn,
1445                                                               iter.old_spte,
1446                                                               iter.level, -ENOMEM);
1447                                 return -ENOMEM;
1448                         }
1449 
1450                         rcu_read_lock();
1451 
1452                         iter.yielded = true;
1453                         continue;
1454                 }
1455 
1456                 tdp_mmu_init_child_sp(sp, &iter);
1457 
1458                 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1459                         goto retry;
1460 
1461                 sp = NULL;
1462         }
1463 
1464         rcu_read_unlock();
1465 
1466         /*
1467          * It's possible to exit the loop having never used the last sp if, for
1468          * example, a vCPU doing HugePage NX splitting wins the race and
1469          * installs its own sp in place of the last sp we tried to split.
1470          */
1471         if (sp)
1472                 tdp_mmu_free_sp(sp);
1473 
1474         return 0;
1475 }
1476 
1477 
1478 /*
1479  * Try to split all huge pages mapped by the TDP MMU down to the target level.
1480  */
1481 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1482                                       const struct kvm_memory_slot *slot,
1483                                       gfn_t start, gfn_t end,
1484                                       int target_level, bool shared)
1485 {
1486         struct kvm_mmu_page *root;
1487         int r = 0;
1488 
1489         kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1490         for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {
1491                 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1492                 if (r) {
1493                         kvm_tdp_mmu_put_root(kvm, root);
1494                         break;
1495                 }
1496         }
1497 }
1498 
1499 static bool tdp_mmu_need_write_protect(struct kvm_mmu_page *sp)
1500 {
1501         /*
1502          * All TDP MMU shadow pages share the same role as their root, aside
1503          * from level, so it is valid to key off any shadow page to determine if
1504          * write protection is needed for an entire tree.
1505          */
1506         return kvm_mmu_page_ad_need_write_protect(sp) || !kvm_ad_enabled();
1507 }
1508 
1509 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1510                            gfn_t start, gfn_t end)
1511 {
1512         const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
1513                                                             shadow_dirty_mask;
1514         struct tdp_iter iter;
1515         bool spte_set = false;
1516 
1517         rcu_read_lock();
1518 
1519         tdp_root_for_each_pte(iter, root, start, end) {
1520 retry:
1521                 if (!is_shadow_present_pte(iter.old_spte) ||
1522                     !is_last_spte(iter.old_spte, iter.level))
1523                         continue;
1524 
1525                 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1526                         continue;
1527 
1528                 KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
1529                                 spte_ad_need_write_protect(iter.old_spte));
1530 
1531                 if (!(iter.old_spte & dbit))
1532                         continue;
1533 
1534                 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1535                         goto retry;
1536 
1537                 spte_set = true;
1538         }
1539 
1540         rcu_read_unlock();
1541         return spte_set;
1542 }
1543 
1544 /*
1545  * Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the
1546  * memslot. Returns true if an SPTE has been changed and the TLBs need to be
1547  * flushed.
1548  */
1549 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1550                                   const struct kvm_memory_slot *slot)
1551 {
1552         struct kvm_mmu_page *root;
1553         bool spte_set = false;
1554 
1555         lockdep_assert_held_read(&kvm->mmu_lock);
1556         for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1557                 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1558                                 slot->base_gfn + slot->npages);
1559 
1560         return spte_set;
1561 }
1562 
1563 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1564                                   gfn_t gfn, unsigned long mask, bool wrprot)
1565 {
1566         const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
1567                                                                         shadow_dirty_mask;
1568         struct tdp_iter iter;
1569 
1570         lockdep_assert_held_write(&kvm->mmu_lock);
1571 
1572         rcu_read_lock();
1573 
1574         tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1575                                     gfn + BITS_PER_LONG) {
1576                 if (!mask)
1577                         break;
1578 
1579                 KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&
1580                                 spte_ad_need_write_protect(iter.old_spte));
1581 
1582                 if (iter.level > PG_LEVEL_4K ||
1583                     !(mask & (1UL << (iter.gfn - gfn))))
1584                         continue;
1585 
1586                 mask &= ~(1UL << (iter.gfn - gfn));
1587 
1588                 if (!(iter.old_spte & dbit))
1589                         continue;
1590 
1591                 iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
1592                                                         iter.old_spte, dbit,
1593                                                         iter.level);
1594 
1595                 trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1596                                                iter.old_spte,
1597                                                iter.old_spte & ~dbit);
1598                 kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
1599         }
1600 
1601         rcu_read_unlock();
1602 }
1603 
1604 /*
1605  * Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for
1606  * which a bit is set in mask, starting at gfn. The given memslot is expected to
1607  * contain all the GFNs represented by set bits in the mask.
1608  */
1609 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1610                                        struct kvm_memory_slot *slot,
1611                                        gfn_t gfn, unsigned long mask,
1612                                        bool wrprot)
1613 {
1614         struct kvm_mmu_page *root;
1615 
1616         for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1617                 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1618 }
1619 
1620 static void zap_collapsible_spte_range(struct kvm *kvm,
1621                                        struct kvm_mmu_page *root,
1622                                        const struct kvm_memory_slot *slot)
1623 {
1624         gfn_t start = slot->base_gfn;
1625         gfn_t end = start + slot->npages;
1626         struct tdp_iter iter;
1627         int max_mapping_level;
1628 
1629         rcu_read_lock();
1630 
1631         for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1632 retry:
1633                 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1634                         continue;
1635 
1636                 if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1637                     !is_shadow_present_pte(iter.old_spte))
1638                         continue;
1639 
1640                 /*
1641                  * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
1642                  * a large page size, then its parent would have been zapped
1643                  * instead of stepping down.
1644                  */
1645                 if (is_last_spte(iter.old_spte, iter.level))
1646                         continue;
1647 
1648                 /*
1649                  * If iter.gfn resides outside of the slot, i.e. the page for
1650                  * the current level overlaps but is not contained by the slot,
1651                  * then the SPTE can't be made huge.  More importantly, trying
1652                  * to query that info from slot->arch.lpage_info will cause an
1653                  * out-of-bounds access.
1654                  */
1655                 if (iter.gfn < start || iter.gfn >= end)
1656                         continue;
1657 
1658                 max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1659                                                               iter.gfn, PG_LEVEL_NUM);
1660                 if (max_mapping_level < iter.level)
1661                         continue;
1662 
1663                 /* Note, a successful atomic zap also does a remote TLB flush. */
1664                 if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1665                         goto retry;
1666         }
1667 
1668         rcu_read_unlock();
1669 }
1670 
1671 /*
1672  * Zap non-leaf SPTEs (and free their associated page tables) which could
1673  * be replaced by huge pages, for GFNs within the slot.
1674  */
1675 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1676                                        const struct kvm_memory_slot *slot)
1677 {
1678         struct kvm_mmu_page *root;
1679 
1680         lockdep_assert_held_read(&kvm->mmu_lock);
1681         for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1682                 zap_collapsible_spte_range(kvm, root, slot);
1683 }
1684 
1685 /*
1686  * Removes write access on the last level SPTE mapping this GFN and unsets the
1687  * MMU-writable bit to ensure future writes continue to be intercepted.
1688  * Returns true if an SPTE was set and a TLB flush is needed.
1689  */
1690 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1691                               gfn_t gfn, int min_level)
1692 {
1693         struct tdp_iter iter;
1694         u64 new_spte;
1695         bool spte_set = false;
1696 
1697         BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1698 
1699         rcu_read_lock();
1700 
1701         for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1702                 if (!is_shadow_present_pte(iter.old_spte) ||
1703                     !is_last_spte(iter.old_spte, iter.level))
1704                         continue;
1705 
1706                 new_spte = iter.old_spte &
1707                         ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1708 
1709                 if (new_spte == iter.old_spte)
1710                         break;
1711 
1712                 tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1713                 spte_set = true;
1714         }
1715 
1716         rcu_read_unlock();
1717 
1718         return spte_set;
1719 }
1720 
1721 /*
1722  * Removes write access on the last level SPTE mapping this GFN and unsets the
1723  * MMU-writable bit to ensure future writes continue to be intercepted.
1724  * Returns true if an SPTE was set and a TLB flush is needed.
1725  */
1726 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1727                                    struct kvm_memory_slot *slot, gfn_t gfn,
1728                                    int min_level)
1729 {
1730         struct kvm_mmu_page *root;
1731         bool spte_set = false;
1732 
1733         lockdep_assert_held_write(&kvm->mmu_lock);
1734         for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1735                 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1736 
1737         return spte_set;
1738 }
1739 
1740 /*
1741  * Return the level of the lowest level SPTE added to sptes.
1742  * That SPTE may be non-present.
1743  *
1744  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1745  */
1746 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1747                          int *root_level)
1748 {
1749         struct tdp_iter iter;
1750         struct kvm_mmu *mmu = vcpu->arch.mmu;
1751         gfn_t gfn = addr >> PAGE_SHIFT;
1752         int leaf = -1;
1753 
1754         *root_level = vcpu->arch.mmu->root_role.level;
1755 
1756         tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1757                 leaf = iter.level;
1758                 sptes[leaf] = iter.old_spte;
1759         }
1760 
1761         return leaf;
1762 }
1763 
1764 /*
1765  * Returns the last level spte pointer of the shadow page walk for the given
1766  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1767  * walk could be performed, returns NULL and *spte does not contain valid data.
1768  *
1769  * Contract:
1770  *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1771  *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1772  *
1773  * WARNING: This function is only intended to be called during fast_page_fault.
1774  */
1775 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
1776                                         u64 *spte)
1777 {
1778         struct tdp_iter iter;
1779         struct kvm_mmu *mmu = vcpu->arch.mmu;
1780         tdp_ptep_t sptep = NULL;
1781 
1782         tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1783                 *spte = iter.old_spte;
1784                 sptep = iter.sptep;
1785         }
1786 
1787         /*
1788          * Perform the rcu_dereference to get the raw spte pointer value since
1789          * we are passing it up to fast_page_fault, which is shared with the
1790          * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1791          * annotation.
1792          *
1793          * This is safe since fast_page_fault obeys the contracts of this
1794          * function as well as all TDP MMU contracts around modifying SPTEs
1795          * outside of mmu_lock.
1796          */
1797         return rcu_dereference(sptep);
1798 }
1799 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php