1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885 |
- // SPDX-License-Identifier: GPL-2.0
- #include "mmu.h"
- #include "mmu_internal.h"
- #include "mmutrace.h"
- #include "tdp_iter.h"
- #include "tdp_mmu.h"
- #include "spte.h"
- #include <asm/cmpxchg.h>
- #include <trace/events/kvm.h>
- static bool __read_mostly tdp_mmu_enabled = true;
- module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
- /* Initializes the TDP MMU for the VM, if enabled. */
- void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
- {
- if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
- return;
- /* This should not be changed for the lifetime of the VM. */
- kvm->arch.tdp_mmu_enabled = true;
- INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
- spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
- INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
- }
- /* Arbitrarily returns true so that this may be used in if statements. */
- static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
- bool shared)
- {
- if (shared)
- lockdep_assert_held_read(&kvm->mmu_lock);
- else
- lockdep_assert_held_write(&kvm->mmu_lock);
- return true;
- }
- void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
- {
- if (!kvm->arch.tdp_mmu_enabled)
- return;
- /*
- * Invalidate all roots, which besides the obvious, schedules all roots
- * for zapping and thus puts the TDP MMU's reference to each root, i.e.
- * ultimately frees all roots.
- */
- kvm_tdp_mmu_invalidate_all_roots(kvm);
- kvm_tdp_mmu_zap_invalidated_roots(kvm);
- WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
- WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
- /*
- * Ensure that all the outstanding RCU callbacks to free shadow pages
- * can run before the VM is torn down. Putting the last reference to
- * zapped roots will create new callbacks.
- */
- rcu_barrier();
- }
- static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
- {
- free_page((unsigned long)sp->spt);
- kmem_cache_free(mmu_page_header_cache, sp);
- }
- /*
- * This is called through call_rcu in order to free TDP page table memory
- * safely with respect to other kernel threads that may be operating on
- * the memory.
- * By only accessing TDP MMU page table memory in an RCU read critical
- * section, and freeing it after a grace period, lockless access to that
- * memory won't use it after it is freed.
- */
- static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
- {
- struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
- rcu_head);
- tdp_mmu_free_sp(sp);
- }
- void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
- bool shared)
- {
- kvm_lockdep_assert_mmu_lock_held(kvm, shared);
- if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
- return;
- /*
- * The TDP MMU itself holds a reference to each root until the root is
- * explicitly invalidated, i.e. the final reference should be never be
- * put for a valid root.
- */
- KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
- spin_lock(&kvm->arch.tdp_mmu_pages_lock);
- list_del_rcu(&root->link);
- spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
- call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
- }
- /*
- * Returns the next root after @prev_root (or the first root if @prev_root is
- * NULL). A reference to the returned root is acquired, and the reference to
- * @prev_root is released (the caller obviously must hold a reference to
- * @prev_root if it's non-NULL).
- *
- * If @only_valid is true, invalid roots are skipped.
- *
- * Returns NULL if the end of tdp_mmu_roots was reached.
- */
- static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
- struct kvm_mmu_page *prev_root,
- bool shared, bool only_valid)
- {
- struct kvm_mmu_page *next_root;
- rcu_read_lock();
- if (prev_root)
- next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
- &prev_root->link,
- typeof(*prev_root), link);
- else
- next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
- typeof(*next_root), link);
- while (next_root) {
- if ((!only_valid || !next_root->role.invalid) &&
- kvm_tdp_mmu_get_root(next_root))
- break;
- next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
- &next_root->link, typeof(*next_root), link);
- }
- rcu_read_unlock();
- if (prev_root)
- kvm_tdp_mmu_put_root(kvm, prev_root, shared);
- return next_root;
- }
- /*
- * Note: this iterator gets and puts references to the roots it iterates over.
- * This makes it safe to release the MMU lock and yield within the loop, but
- * if exiting the loop early, the caller must drop the reference to the most
- * recent root. (Unless keeping a live reference is desirable.)
- *
- * If shared is set, this function is operating under the MMU lock in read
- * mode. In the unlikely event that this thread must free a root, the lock
- * will be temporarily dropped and reacquired in write mode.
- */
- #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
- _root; \
- _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
- if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
- kvm_mmu_page_as_id(_root) != _as_id) { \
- } else
- #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
- __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
- #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
- for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
- _root; \
- _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
- if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
- } else
- /*
- * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
- * the implication being that any flow that holds mmu_lock for read is
- * inherently yield-friendly and should use the yield-safe variant above.
- * Holding mmu_lock for write obviates the need for RCU protection as the list
- * is guaranteed to be stable.
- */
- #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
- list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
- if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
- kvm_mmu_page_as_id(_root) != _as_id) { \
- } else
- static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
- {
- struct kvm_mmu_page *sp;
- sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
- sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
- return sp;
- }
- static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
- gfn_t gfn, union kvm_mmu_page_role role)
- {
- set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
- sp->role = role;
- sp->gfn = gfn;
- sp->ptep = sptep;
- sp->tdp_mmu_page = true;
- trace_kvm_mmu_get_page(sp, true);
- }
- static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
- struct tdp_iter *iter)
- {
- struct kvm_mmu_page *parent_sp;
- union kvm_mmu_page_role role;
- parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
- role = parent_sp->role;
- role.level--;
- tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
- }
- hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
- {
- union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
- struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_page *root;
- lockdep_assert_held_write(&kvm->mmu_lock);
- /*
- * Check for an existing root before allocating a new one. Note, the
- * role check prevents consuming an invalid root.
- */
- for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
- if (root->role.word == role.word &&
- kvm_tdp_mmu_get_root(root))
- goto out;
- }
- root = tdp_mmu_alloc_sp(vcpu);
- tdp_mmu_init_sp(root, NULL, 0, role);
- /*
- * TDP MMU roots are kept until they are explicitly invalidated, either
- * by a memslot update or by the destruction of the VM. Initialize the
- * refcount to two; one reference for the vCPU, and one reference for
- * the TDP MMU itself, which is held until the root is invalidated and
- * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
- */
- refcount_set(&root->tdp_mmu_root_count, 2);
- spin_lock(&kvm->arch.tdp_mmu_pages_lock);
- list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
- spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
- out:
- return __pa(root->spt);
- }
- static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
- u64 old_spte, u64 new_spte, int level,
- bool shared);
- static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
- {
- if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
- return;
- if (is_accessed_spte(old_spte) &&
- (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
- spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
- kvm_set_pfn_accessed(spte_to_pfn(old_spte));
- }
- static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
- u64 old_spte, u64 new_spte, int level)
- {
- bool pfn_changed;
- struct kvm_memory_slot *slot;
- if (level > PG_LEVEL_4K)
- return;
- pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
- if ((!is_writable_pte(old_spte) || pfn_changed) &&
- is_writable_pte(new_spte)) {
- slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
- mark_page_dirty_in_slot(kvm, slot, gfn);
- }
- }
- static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
- {
- kvm_account_pgtable_pages((void *)sp->spt, +1);
- }
- static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
- {
- kvm_account_pgtable_pages((void *)sp->spt, -1);
- }
- /**
- * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
- *
- * @kvm: kvm instance
- * @sp: the page to be removed
- * @shared: This operation may not be running under the exclusive use of
- * the MMU lock and the operation must synchronize with other
- * threads that might be adding or removing pages.
- */
- static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
- bool shared)
- {
- tdp_unaccount_mmu_page(kvm, sp);
- if (shared)
- spin_lock(&kvm->arch.tdp_mmu_pages_lock);
- else
- lockdep_assert_held_write(&kvm->mmu_lock);
- list_del(&sp->link);
- if (sp->lpage_disallowed)
- unaccount_huge_nx_page(kvm, sp);
- if (shared)
- spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
- }
- /**
- * handle_removed_pt() - handle a page table removed from the TDP structure
- *
- * @kvm: kvm instance
- * @pt: the page removed from the paging structure
- * @shared: This operation may not be running under the exclusive use
- * of the MMU lock and the operation must synchronize with other
- * threads that might be modifying SPTEs.
- *
- * Given a page table that has been removed from the TDP paging structure,
- * iterates through the page table to clear SPTEs and free child page tables.
- *
- * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
- * protection. Since this thread removed it from the paging structure,
- * this thread will be responsible for ensuring the page is freed. Hence the
- * early rcu_dereferences in the function.
- */
- static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
- {
- struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
- int level = sp->role.level;
- gfn_t base_gfn = sp->gfn;
- int i;
- trace_kvm_mmu_prepare_zap_page(sp);
- tdp_mmu_unlink_sp(kvm, sp, shared);
- for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
- tdp_ptep_t sptep = pt + i;
- gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
- u64 old_spte;
- if (shared) {
- /*
- * Set the SPTE to a nonpresent value that other
- * threads will not overwrite. If the SPTE was
- * already marked as removed then another thread
- * handling a page fault could overwrite it, so
- * set the SPTE until it is set from some other
- * value to the removed SPTE value.
- */
- for (;;) {
- old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
- if (!is_removed_spte(old_spte))
- break;
- cpu_relax();
- }
- } else {
- /*
- * If the SPTE is not MMU-present, there is no backing
- * page associated with the SPTE and so no side effects
- * that need to be recorded, and exclusive ownership of
- * mmu_lock ensures the SPTE can't be made present.
- * Note, zapping MMIO SPTEs is also unnecessary as they
- * are guarded by the memslots generation, not by being
- * unreachable.
- */
- old_spte = kvm_tdp_mmu_read_spte(sptep);
- if (!is_shadow_present_pte(old_spte))
- continue;
- /*
- * Use the common helper instead of a raw WRITE_ONCE as
- * the SPTE needs to be updated atomically if it can be
- * modified by a different vCPU outside of mmu_lock.
- * Even though the parent SPTE is !PRESENT, the TLB
- * hasn't yet been flushed, and both Intel and AMD
- * document that A/D assists can use upper-level PxE
- * entries that are cached in the TLB, i.e. the CPU can
- * still access the page and mark it dirty.
- *
- * No retry is needed in the atomic update path as the
- * sole concern is dropping a Dirty bit, i.e. no other
- * task can zap/remove the SPTE as mmu_lock is held for
- * write. Marking the SPTE as a removed SPTE is not
- * strictly necessary for the same reason, but using
- * the remove SPTE value keeps the shared/exclusive
- * paths consistent and allows the handle_changed_spte()
- * call below to hardcode the new value to REMOVED_SPTE.
- *
- * Note, even though dropping a Dirty bit is the only
- * scenario where a non-atomic update could result in a
- * functional bug, simply checking the Dirty bit isn't
- * sufficient as a fast page fault could read the upper
- * level SPTE before it is zapped, and then make this
- * target SPTE writable, resume the guest, and set the
- * Dirty bit between reading the SPTE above and writing
- * it here.
- */
- old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
- REMOVED_SPTE, level);
- }
- handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
- old_spte, REMOVED_SPTE, level, shared);
- }
- call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
- }
- /**
- * __handle_changed_spte - handle bookkeeping associated with an SPTE change
- * @kvm: kvm instance
- * @as_id: the address space of the paging structure the SPTE was a part of
- * @gfn: the base GFN that was mapped by the SPTE
- * @old_spte: The value of the SPTE before the change
- * @new_spte: The value of the SPTE after the change
- * @level: the level of the PT the SPTE is part of in the paging structure
- * @shared: This operation may not be running under the exclusive use of
- * the MMU lock and the operation must synchronize with other
- * threads that might be modifying SPTEs.
- *
- * Handle bookkeeping that might result from the modification of a SPTE.
- * This function must be called for all TDP SPTE modifications.
- */
- static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
- u64 old_spte, u64 new_spte, int level,
- bool shared)
- {
- bool was_present = is_shadow_present_pte(old_spte);
- bool is_present = is_shadow_present_pte(new_spte);
- bool was_leaf = was_present && is_last_spte(old_spte, level);
- bool is_leaf = is_present && is_last_spte(new_spte, level);
- bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
- WARN_ON(level > PT64_ROOT_MAX_LEVEL);
- WARN_ON(level < PG_LEVEL_4K);
- WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
- /*
- * If this warning were to trigger it would indicate that there was a
- * missing MMU notifier or a race with some notifier handler.
- * A present, leaf SPTE should never be directly replaced with another
- * present leaf SPTE pointing to a different PFN. A notifier handler
- * should be zapping the SPTE before the main MM's page table is
- * changed, or the SPTE should be zeroed, and the TLBs flushed by the
- * thread before replacement.
- */
- if (was_leaf && is_leaf && pfn_changed) {
- pr_err("Invalid SPTE change: cannot replace a present leaf\n"
- "SPTE with another present leaf SPTE mapping a\n"
- "different PFN!\n"
- "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
- as_id, gfn, old_spte, new_spte, level);
- /*
- * Crash the host to prevent error propagation and guest data
- * corruption.
- */
- BUG();
- }
- if (old_spte == new_spte)
- return;
- trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
- if (is_leaf)
- check_spte_writable_invariants(new_spte);
- /*
- * The only times a SPTE should be changed from a non-present to
- * non-present state is when an MMIO entry is installed/modified/
- * removed. In that case, there is nothing to do here.
- */
- if (!was_present && !is_present) {
- /*
- * If this change does not involve a MMIO SPTE or removed SPTE,
- * it is unexpected. Log the change, though it should not
- * impact the guest since both the former and current SPTEs
- * are nonpresent.
- */
- if (WARN_ON(!is_mmio_spte(old_spte) &&
- !is_mmio_spte(new_spte) &&
- !is_removed_spte(new_spte)))
- pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
- "should not be replaced with another,\n"
- "different nonpresent SPTE, unless one or both\n"
- "are MMIO SPTEs, or the new SPTE is\n"
- "a temporary removed SPTE.\n"
- "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
- as_id, gfn, old_spte, new_spte, level);
- return;
- }
- if (is_leaf != was_leaf)
- kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
- if (was_leaf && is_dirty_spte(old_spte) &&
- (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
- kvm_set_pfn_dirty(spte_to_pfn(old_spte));
- /*
- * Recursively handle child PTs if the change removed a subtree from
- * the paging structure. Note the WARN on the PFN changing without the
- * SPTE being converted to a hugepage (leaf) or being zapped. Shadow
- * pages are kernel allocations and should never be migrated.
- */
- if (was_present && !was_leaf &&
- (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
- handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
- }
- static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
- u64 old_spte, u64 new_spte, int level,
- bool shared)
- {
- __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
- shared);
- handle_changed_spte_acc_track(old_spte, new_spte, level);
- handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
- new_spte, level);
- }
- /*
- * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
- * and handle the associated bookkeeping. Do not mark the page dirty
- * in KVM's dirty bitmaps.
- *
- * If setting the SPTE fails because it has changed, iter->old_spte will be
- * refreshed to the current value of the spte.
- *
- * @kvm: kvm instance
- * @iter: a tdp_iter instance currently on the SPTE that should be set
- * @new_spte: The value the SPTE should be set to
- * Return:
- * * 0 - If the SPTE was set.
- * * -EBUSY - If the SPTE cannot be set. In this case this function will have
- * no side-effects other than setting iter->old_spte to the last
- * known value of the spte.
- */
- static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
- struct tdp_iter *iter,
- u64 new_spte)
- {
- u64 *sptep = rcu_dereference(iter->sptep);
- /*
- * The caller is responsible for ensuring the old SPTE is not a REMOVED
- * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE,
- * and pre-checking before inserting a new SPTE is advantageous as it
- * avoids unnecessary work.
- */
- WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
- lockdep_assert_held_read(&kvm->mmu_lock);
- /*
- * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
- * does not hold the mmu_lock.
- */
- if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
- return -EBUSY;
- __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
- new_spte, iter->level, true);
- handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
- return 0;
- }
- static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
- struct tdp_iter *iter)
- {
- int ret;
- /*
- * Freeze the SPTE by setting it to a special,
- * non-present value. This will stop other threads from
- * immediately installing a present entry in its place
- * before the TLBs are flushed.
- */
- ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
- if (ret)
- return ret;
- kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
- KVM_PAGES_PER_HPAGE(iter->level));
- /*
- * No other thread can overwrite the removed SPTE as they must either
- * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
- * overwrite the special removed SPTE value. No bookkeeping is needed
- * here since the SPTE is going from non-present to non-present. Use
- * the raw write helper to avoid an unnecessary check on volatile bits.
- */
- __kvm_tdp_mmu_write_spte(iter->sptep, 0);
- return 0;
- }
- /*
- * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
- * @kvm: KVM instance
- * @as_id: Address space ID, i.e. regular vs. SMM
- * @sptep: Pointer to the SPTE
- * @old_spte: The current value of the SPTE
- * @new_spte: The new value that will be set for the SPTE
- * @gfn: The base GFN that was (or will be) mapped by the SPTE
- * @level: The level _containing_ the SPTE (its parent PT's level)
- * @record_acc_track: Notify the MM subsystem of changes to the accessed state
- * of the page. Should be set unless handling an MMU
- * notifier for access tracking. Leaving record_acc_track
- * unset in that case prevents page accesses from being
- * double counted.
- * @record_dirty_log: Record the page as dirty in the dirty bitmap if
- * appropriate for the change being made. Should be set
- * unless performing certain dirty logging operations.
- * Leaving record_dirty_log unset in that case prevents page
- * writes from being double counted.
- *
- * Returns the old SPTE value, which _may_ be different than @old_spte if the
- * SPTE had voldatile bits.
- */
- static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
- u64 old_spte, u64 new_spte, gfn_t gfn, int level,
- bool record_acc_track, bool record_dirty_log)
- {
- lockdep_assert_held_write(&kvm->mmu_lock);
- /*
- * No thread should be using this function to set SPTEs to or from the
- * temporary removed SPTE value.
- * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
- * should be used. If operating under the MMU lock in write mode, the
- * use of the removed SPTE should not be necessary.
- */
- WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
- old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
- __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
- if (record_acc_track)
- handle_changed_spte_acc_track(old_spte, new_spte, level);
- if (record_dirty_log)
- handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
- new_spte, level);
- return old_spte;
- }
- static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
- u64 new_spte, bool record_acc_track,
- bool record_dirty_log)
- {
- WARN_ON_ONCE(iter->yielded);
- iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
- iter->old_spte, new_spte,
- iter->gfn, iter->level,
- record_acc_track, record_dirty_log);
- }
- static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
- u64 new_spte)
- {
- _tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
- }
- static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
- struct tdp_iter *iter,
- u64 new_spte)
- {
- _tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
- }
- static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
- struct tdp_iter *iter,
- u64 new_spte)
- {
- _tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
- }
- #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
- for_each_tdp_pte(_iter, _root, _start, _end)
- #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
- tdp_root_for_each_pte(_iter, _root, _start, _end) \
- if (!is_shadow_present_pte(_iter.old_spte) || \
- !is_last_spte(_iter.old_spte, _iter.level)) \
- continue; \
- else
- #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
- for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
- /*
- * Yield if the MMU lock is contended or this thread needs to return control
- * to the scheduler.
- *
- * If this function should yield and flush is set, it will perform a remote
- * TLB flush before yielding.
- *
- * If this function yields, iter->yielded is set and the caller must skip to
- * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
- * over the paging structures to allow the iterator to continue its traversal
- * from the paging structure root.
- *
- * Returns true if this function yielded.
- */
- static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
- struct tdp_iter *iter,
- bool flush, bool shared)
- {
- WARN_ON(iter->yielded);
- /* Ensure forward progress has been made before yielding. */
- if (iter->next_last_level_gfn == iter->yielded_gfn)
- return false;
- if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
- if (flush)
- kvm_flush_remote_tlbs(kvm);
- rcu_read_unlock();
- if (shared)
- cond_resched_rwlock_read(&kvm->mmu_lock);
- else
- cond_resched_rwlock_write(&kvm->mmu_lock);
- rcu_read_lock();
- WARN_ON(iter->gfn > iter->next_last_level_gfn);
- iter->yielded = true;
- }
- return iter->yielded;
- }
- static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
- {
- /*
- * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with
- * a gpa range that would exceed the max gfn, and KVM does not create
- * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
- * the slow emulation path every time.
- */
- return kvm_mmu_max_gfn() + 1;
- }
- static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
- bool shared, int zap_level)
- {
- struct tdp_iter iter;
- gfn_t end = tdp_mmu_max_gfn_exclusive();
- gfn_t start = 0;
- for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
- retry:
- if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
- continue;
- if (!is_shadow_present_pte(iter.old_spte))
- continue;
- if (iter.level > zap_level)
- continue;
- if (!shared)
- tdp_mmu_set_spte(kvm, &iter, 0);
- else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
- goto retry;
- }
- }
- static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
- bool shared)
- {
- /*
- * The root must have an elevated refcount so that it's reachable via
- * mmu_notifier callbacks, which allows this path to yield and drop
- * mmu_lock. When handling an unmap/release mmu_notifier command, KVM
- * must drop all references to relevant pages prior to completing the
- * callback. Dropping mmu_lock with an unreachable root would result
- * in zapping SPTEs after a relevant mmu_notifier callback completes
- * and lead to use-after-free as zapping a SPTE triggers "writeback" of
- * dirty accessed bits to the SPTE's associated struct page.
- */
- WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
- kvm_lockdep_assert_mmu_lock_held(kvm, shared);
- rcu_read_lock();
- /*
- * To avoid RCU stalls due to recursively removing huge swaths of SPs,
- * split the zap into two passes. On the first pass, zap at the 1gb
- * level, and then zap top-level SPs on the second pass. "1gb" is not
- * arbitrary, as KVM must be able to zap a 1gb shadow page without
- * inducing a stall to allow in-place replacement with a 1gb hugepage.
- *
- * Because zapping a SP recurses on its children, stepping down to
- * PG_LEVEL_4K in the iterator itself is unnecessary.
- */
- __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
- __tdp_mmu_zap_root(kvm, root, shared, root->role.level);
- rcu_read_unlock();
- }
- bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
- {
- u64 old_spte;
- /*
- * This helper intentionally doesn't allow zapping a root shadow page,
- * which doesn't have a parent page table and thus no associated entry.
- */
- if (WARN_ON_ONCE(!sp->ptep))
- return false;
- old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
- if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
- return false;
- __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
- sp->gfn, sp->role.level + 1, true, true);
- return true;
- }
- /*
- * If can_yield is true, will release the MMU lock and reschedule if the
- * scheduler needs the CPU or there is contention on the MMU lock. If this
- * function cannot yield, it will not release the MMU lock or reschedule and
- * the caller must ensure it does not supply too large a GFN range, or the
- * operation can cause a soft lockup.
- */
- static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end, bool can_yield, bool flush)
- {
- struct tdp_iter iter;
- end = min(end, tdp_mmu_max_gfn_exclusive());
- lockdep_assert_held_write(&kvm->mmu_lock);
- rcu_read_lock();
- for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
- if (can_yield &&
- tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
- flush = false;
- continue;
- }
- if (!is_shadow_present_pte(iter.old_spte) ||
- !is_last_spte(iter.old_spte, iter.level))
- continue;
- tdp_mmu_set_spte(kvm, &iter, 0);
- flush = true;
- }
- rcu_read_unlock();
- /*
- * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
- * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
- */
- return flush;
- }
- /*
- * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns
- * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
- * more SPTEs were zapped since the MMU lock was last acquired.
- */
- bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
- {
- struct kvm_mmu_page *root;
- for_each_tdp_mmu_root_yield_safe(kvm, root, false)
- flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
- return flush;
- }
- void kvm_tdp_mmu_zap_all(struct kvm *kvm)
- {
- struct kvm_mmu_page *root;
- /*
- * Zap all roots, including invalid roots, as all SPTEs must be dropped
- * before returning to the caller. Zap directly even if the root is
- * also being zapped by a worker. Walking zapped top-level SPTEs isn't
- * all that expensive and mmu_lock is already held, which means the
- * worker has yielded, i.e. flushing the work instead of zapping here
- * isn't guaranteed to be any faster.
- *
- * A TLB flush is unnecessary, KVM zaps everything if and only the VM
- * is being destroyed or the userspace VMM has exited. In both cases,
- * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
- */
- for_each_tdp_mmu_root_yield_safe(kvm, root, false)
- tdp_mmu_zap_root(kvm, root, false);
- }
- /*
- * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
- * zap" completes.
- */
- void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
- {
- struct kvm_mmu_page *root;
- read_lock(&kvm->mmu_lock);
- for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
- if (!root->tdp_mmu_scheduled_root_to_zap)
- continue;
- root->tdp_mmu_scheduled_root_to_zap = false;
- KVM_BUG_ON(!root->role.invalid, kvm);
- /*
- * A TLB flush is not necessary as KVM performs a local TLB
- * flush when allocating a new root (see kvm_mmu_load()), and
- * when migrating a vCPU to a different pCPU. Note, the local
- * TLB flush on reuse also invalidates paging-structure-cache
- * entries, i.e. TLB entries for intermediate paging structures,
- * that may be zapped, as such entries are associated with the
- * ASID on both VMX and SVM.
- */
- tdp_mmu_zap_root(kvm, root, true);
- /*
- * The referenced needs to be put *after* zapping the root, as
- * the root must be reachable by mmu_notifiers while it's being
- * zapped
- */
- kvm_tdp_mmu_put_root(kvm, root, true);
- }
- read_unlock(&kvm->mmu_lock);
- }
- /*
- * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
- * is about to be zapped, e.g. in response to a memslots update. The actual
- * zapping is done separately so that it happens with mmu_lock with read,
- * whereas invalidating roots must be done with mmu_lock held for write (unless
- * the VM is being destroyed).
- *
- * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
- * See kvm_tdp_mmu_get_vcpu_root_hpa().
- */
- void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
- {
- struct kvm_mmu_page *root;
- /*
- * mmu_lock must be held for write to ensure that a root doesn't become
- * invalid while there are active readers (invalidating a root while
- * there are active readers may or may not be problematic in practice,
- * but it's uncharted territory and not supported).
- *
- * Waive the assertion if there are no users of @kvm, i.e. the VM is
- * being destroyed after all references have been put, or if no vCPUs
- * have been created (which means there are no roots), i.e. the VM is
- * being destroyed in an error path of KVM_CREATE_VM.
- */
- if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
- refcount_read(&kvm->users_count) && kvm->created_vcpus)
- lockdep_assert_held_write(&kvm->mmu_lock);
- /*
- * As above, mmu_lock isn't held when destroying the VM! There can't
- * be other references to @kvm, i.e. nothing else can invalidate roots
- * or get/put references to roots.
- */
- list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
- /*
- * Note, invalid roots can outlive a memslot update! Invalid
- * roots must be *zapped* before the memslot update completes,
- * but a different task can acquire a reference and keep the
- * root alive after its been zapped.
- */
- if (!root->role.invalid) {
- root->tdp_mmu_scheduled_root_to_zap = true;
- root->role.invalid = true;
- }
- }
- }
- /*
- * Installs a last-level SPTE to handle a TDP page fault.
- * (NPT/EPT violation/misconfiguration)
- */
- static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
- struct kvm_page_fault *fault,
- struct tdp_iter *iter)
- {
- struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
- u64 new_spte;
- int ret = RET_PF_FIXED;
- bool wrprot = false;
- WARN_ON(sp->role.level != fault->goal_level);
- if (unlikely(!fault->slot))
- new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
- else
- wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
- fault->pfn, iter->old_spte, fault->prefetch, true,
- fault->map_writable, &new_spte);
- if (new_spte == iter->old_spte)
- ret = RET_PF_SPURIOUS;
- else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
- return RET_PF_RETRY;
- else if (is_shadow_present_pte(iter->old_spte) &&
- !is_last_spte(iter->old_spte, iter->level))
- kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
- KVM_PAGES_PER_HPAGE(iter->level + 1));
- /*
- * If the page fault was caused by a write but the page is write
- * protected, emulation is needed. If the emulation was skipped,
- * the vCPU would have the same fault again.
- */
- if (wrprot) {
- if (fault->write)
- ret = RET_PF_EMULATE;
- }
- /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
- if (unlikely(is_mmio_spte(new_spte))) {
- vcpu->stat.pf_mmio_spte_created++;
- trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
- new_spte);
- ret = RET_PF_EMULATE;
- } else {
- trace_kvm_mmu_set_spte(iter->level, iter->gfn,
- rcu_dereference(iter->sptep));
- }
- return ret;
- }
- /*
- * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
- * provided page table.
- *
- * @kvm: kvm instance
- * @iter: a tdp_iter instance currently on the SPTE that should be set
- * @sp: The new TDP page table to install.
- * @account_nx: True if this page table is being installed to split a
- * non-executable huge page.
- * @shared: This operation is running under the MMU lock in read mode.
- *
- * Returns: 0 if the new page table was installed. Non-0 if the page table
- * could not be installed (e.g. the atomic compare-exchange failed).
- */
- static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
- struct kvm_mmu_page *sp, bool account_nx,
- bool shared)
- {
- u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
- int ret = 0;
- if (shared) {
- ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
- if (ret)
- return ret;
- } else {
- tdp_mmu_set_spte(kvm, iter, spte);
- }
- spin_lock(&kvm->arch.tdp_mmu_pages_lock);
- list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
- if (account_nx)
- account_huge_nx_page(kvm, sp);
- spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
- tdp_account_mmu_page(kvm, sp);
- return 0;
- }
- /*
- * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
- * page tables and SPTEs to translate the faulting guest physical address.
- */
- int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
- {
- struct kvm_mmu *mmu = vcpu->arch.mmu;
- struct tdp_iter iter;
- struct kvm_mmu_page *sp;
- int ret;
- kvm_mmu_hugepage_adjust(vcpu, fault);
- trace_kvm_mmu_spte_requested(fault);
- rcu_read_lock();
- tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
- if (fault->nx_huge_page_workaround_enabled)
- disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
- if (iter.level == fault->goal_level)
- break;
- /*
- * If there is an SPTE mapping a large page at a higher level
- * than the target, that SPTE must be cleared and replaced
- * with a non-leaf SPTE.
- */
- if (is_shadow_present_pte(iter.old_spte) &&
- is_large_pte(iter.old_spte)) {
- if (tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
- break;
- /*
- * The iter must explicitly re-read the spte here
- * because the new value informs the !present
- * path below.
- */
- iter.old_spte = kvm_tdp_mmu_read_spte(iter.sptep);
- }
- if (!is_shadow_present_pte(iter.old_spte)) {
- bool account_nx = fault->huge_page_disallowed &&
- fault->req_level >= iter.level;
- /*
- * If SPTE has been frozen by another thread, just
- * give up and retry, avoiding unnecessary page table
- * allocation and free.
- */
- if (is_removed_spte(iter.old_spte))
- break;
- sp = tdp_mmu_alloc_sp(vcpu);
- tdp_mmu_init_child_sp(sp, &iter);
- if (tdp_mmu_link_sp(vcpu->kvm, &iter, sp, account_nx, true)) {
- tdp_mmu_free_sp(sp);
- break;
- }
- }
- }
- /*
- * Force the guest to retry the access if the upper level SPTEs aren't
- * in place, or if the target leaf SPTE is frozen by another CPU.
- */
- if (iter.level != fault->goal_level || is_removed_spte(iter.old_spte)) {
- rcu_read_unlock();
- return RET_PF_RETRY;
- }
- ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
- rcu_read_unlock();
- return ret;
- }
- bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
- bool flush)
- {
- struct kvm_mmu_page *root;
- __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
- flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
- range->may_block, flush);
- return flush;
- }
- typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
- struct kvm_gfn_range *range);
- static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
- struct kvm_gfn_range *range,
- tdp_handler_t handler)
- {
- struct kvm_mmu_page *root;
- struct tdp_iter iter;
- bool ret = false;
- /*
- * Don't support rescheduling, none of the MMU notifiers that funnel
- * into this helper allow blocking; it'd be dead, wasteful code.
- */
- for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
- rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
- ret |= handler(kvm, &iter, range);
- rcu_read_unlock();
- }
- return ret;
- }
- /*
- * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
- * if any of the GFNs in the range have been accessed.
- */
- static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
- struct kvm_gfn_range *range)
- {
- u64 new_spte = 0;
- /* If we have a non-accessed entry we don't need to change the pte. */
- if (!is_accessed_spte(iter->old_spte))
- return false;
- new_spte = iter->old_spte;
- if (spte_ad_enabled(new_spte)) {
- new_spte &= ~shadow_accessed_mask;
- } else {
- /*
- * Capture the dirty status of the page, so that it doesn't get
- * lost when the SPTE is marked for access tracking.
- */
- if (is_writable_pte(new_spte))
- kvm_set_pfn_dirty(spte_to_pfn(new_spte));
- new_spte = mark_spte_for_access_track(new_spte);
- }
- tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
- return true;
- }
- bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
- {
- return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
- }
- static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
- struct kvm_gfn_range *range)
- {
- return is_accessed_spte(iter->old_spte);
- }
- bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
- {
- return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
- }
- static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
- struct kvm_gfn_range *range)
- {
- u64 new_spte;
- /* Huge pages aren't expected to be modified without first being zapped. */
- WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
- if (iter->level != PG_LEVEL_4K ||
- !is_shadow_present_pte(iter->old_spte))
- return false;
- /*
- * Note, when changing a read-only SPTE, it's not strictly necessary to
- * zero the SPTE before setting the new PFN, but doing so preserves the
- * invariant that the PFN of a present * leaf SPTE can never change.
- * See __handle_changed_spte().
- */
- tdp_mmu_set_spte(kvm, iter, 0);
- if (!pte_write(range->pte)) {
- new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
- pte_pfn(range->pte));
- tdp_mmu_set_spte(kvm, iter, new_spte);
- }
- return true;
- }
- /*
- * Handle the changed_pte MMU notifier for the TDP MMU.
- * data is a pointer to the new pte_t mapping the HVA specified by the MMU
- * notifier.
- * Returns non-zero if a flush is needed before releasing the MMU lock.
- */
- bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
- {
- /*
- * No need to handle the remote TLB flush under RCU protection, the
- * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
- * shadow page. See the WARN on pfn_changed in __handle_changed_spte().
- */
- return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
- }
- /*
- * Remove write access from all SPTEs at or above min_level that map GFNs
- * [start, end). Returns true if an SPTE has been changed and the TLBs need to
- * be flushed.
- */
- static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end, int min_level)
- {
- struct tdp_iter iter;
- u64 new_spte;
- bool spte_set = false;
- rcu_read_lock();
- BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
- for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
- retry:
- if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
- continue;
- if (!is_shadow_present_pte(iter.old_spte) ||
- !is_last_spte(iter.old_spte, iter.level) ||
- !(iter.old_spte & PT_WRITABLE_MASK))
- continue;
- new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
- if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
- goto retry;
- spte_set = true;
- }
- rcu_read_unlock();
- return spte_set;
- }
- /*
- * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
- * only affect leaf SPTEs down to min_level.
- * Returns true if an SPTE has been changed and the TLBs need to be flushed.
- */
- bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
- const struct kvm_memory_slot *slot, int min_level)
- {
- struct kvm_mmu_page *root;
- bool spte_set = false;
- lockdep_assert_held_read(&kvm->mmu_lock);
- for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
- spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
- slot->base_gfn + slot->npages, min_level);
- return spte_set;
- }
- static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
- {
- struct kvm_mmu_page *sp;
- gfp |= __GFP_ZERO;
- sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
- if (!sp)
- return NULL;
- sp->spt = (void *)__get_free_page(gfp);
- if (!sp->spt) {
- kmem_cache_free(mmu_page_header_cache, sp);
- return NULL;
- }
- return sp;
- }
- static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
- struct tdp_iter *iter,
- bool shared)
- {
- struct kvm_mmu_page *sp;
- /*
- * Since we are allocating while under the MMU lock we have to be
- * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
- * reclaim and to avoid making any filesystem callbacks (which can end
- * up invoking KVM MMU notifiers, resulting in a deadlock).
- *
- * If this allocation fails we drop the lock and retry with reclaim
- * allowed.
- */
- sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
- if (sp)
- return sp;
- rcu_read_unlock();
- if (shared)
- read_unlock(&kvm->mmu_lock);
- else
- write_unlock(&kvm->mmu_lock);
- iter->yielded = true;
- sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
- if (shared)
- read_lock(&kvm->mmu_lock);
- else
- write_lock(&kvm->mmu_lock);
- rcu_read_lock();
- return sp;
- }
- static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
- struct kvm_mmu_page *sp, bool shared)
- {
- const u64 huge_spte = iter->old_spte;
- const int level = iter->level;
- int ret, i;
- tdp_mmu_init_child_sp(sp, iter);
- /*
- * No need for atomics when writing to sp->spt since the page table has
- * not been linked in yet and thus is not reachable from any other CPU.
- */
- for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
- sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
- /*
- * Replace the huge spte with a pointer to the populated lower level
- * page table. Since we are making this change without a TLB flush vCPUs
- * will see a mix of the split mappings and the original huge mapping,
- * depending on what's currently in their TLB. This is fine from a
- * correctness standpoint since the translation will be the same either
- * way.
- */
- ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared);
- if (ret)
- goto out;
- /*
- * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
- * are overwriting from the page stats. But we have to manually update
- * the page stats with the new present child pages.
- */
- kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
- out:
- trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
- return ret;
- }
- static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
- struct kvm_mmu_page *root,
- gfn_t start, gfn_t end,
- int target_level, bool shared)
- {
- struct kvm_mmu_page *sp = NULL;
- struct tdp_iter iter;
- int ret = 0;
- rcu_read_lock();
- /*
- * Traverse the page table splitting all huge pages above the target
- * level into one lower level. For example, if we encounter a 1GB page
- * we split it into 512 2MB pages.
- *
- * Since the TDP iterator uses a pre-order traversal, we are guaranteed
- * to visit an SPTE before ever visiting its children, which means we
- * will correctly recursively split huge pages that are more than one
- * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
- * and then splitting each of those to 512 4KB pages).
- */
- for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
- retry:
- if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
- continue;
- if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
- continue;
- if (!sp) {
- sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
- if (!sp) {
- ret = -ENOMEM;
- trace_kvm_mmu_split_huge_page(iter.gfn,
- iter.old_spte,
- iter.level, ret);
- break;
- }
- if (iter.yielded)
- continue;
- }
- if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
- goto retry;
- sp = NULL;
- }
- rcu_read_unlock();
- /*
- * It's possible to exit the loop having never used the last sp if, for
- * example, a vCPU doing HugePage NX splitting wins the race and
- * installs its own sp in place of the last sp we tried to split.
- */
- if (sp)
- tdp_mmu_free_sp(sp);
- return ret;
- }
- /*
- * Try to split all huge pages mapped by the TDP MMU down to the target level.
- */
- void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- gfn_t start, gfn_t end,
- int target_level, bool shared)
- {
- struct kvm_mmu_page *root;
- int r = 0;
- kvm_lockdep_assert_mmu_lock_held(kvm, shared);
- for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
- r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
- if (r) {
- kvm_tdp_mmu_put_root(kvm, root, shared);
- break;
- }
- }
- }
- /*
- * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
- * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
- * If AD bits are not enabled, this will require clearing the writable bit on
- * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
- * be flushed.
- */
- static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end)
- {
- struct tdp_iter iter;
- u64 new_spte;
- bool spte_set = false;
- rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, start, end) {
- retry:
- if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
- continue;
- if (!is_shadow_present_pte(iter.old_spte))
- continue;
- if (spte_ad_need_write_protect(iter.old_spte)) {
- if (is_writable_pte(iter.old_spte))
- new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
- else
- continue;
- } else {
- if (iter.old_spte & shadow_dirty_mask)
- new_spte = iter.old_spte & ~shadow_dirty_mask;
- else
- continue;
- }
- if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
- goto retry;
- spte_set = true;
- }
- rcu_read_unlock();
- return spte_set;
- }
- /*
- * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
- * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
- * If AD bits are not enabled, this will require clearing the writable bit on
- * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
- * be flushed.
- */
- bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
- const struct kvm_memory_slot *slot)
- {
- struct kvm_mmu_page *root;
- bool spte_set = false;
- lockdep_assert_held_read(&kvm->mmu_lock);
- for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
- spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
- slot->base_gfn + slot->npages);
- return spte_set;
- }
- /*
- * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
- * set in mask, starting at gfn. The given memslot is expected to contain all
- * the GFNs represented by set bits in the mask. If AD bits are enabled,
- * clearing the dirty status will involve clearing the dirty bit on each SPTE
- * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
- */
- static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t gfn, unsigned long mask, bool wrprot)
- {
- struct tdp_iter iter;
- u64 new_spte;
- rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
- gfn + BITS_PER_LONG) {
- if (!mask)
- break;
- if (iter.level > PG_LEVEL_4K ||
- !(mask & (1UL << (iter.gfn - gfn))))
- continue;
- mask &= ~(1UL << (iter.gfn - gfn));
- if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
- if (is_writable_pte(iter.old_spte))
- new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
- else
- continue;
- } else {
- if (iter.old_spte & shadow_dirty_mask)
- new_spte = iter.old_spte & ~shadow_dirty_mask;
- else
- continue;
- }
- tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
- }
- rcu_read_unlock();
- }
- /*
- * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
- * set in mask, starting at gfn. The given memslot is expected to contain all
- * the GFNs represented by set bits in the mask. If AD bits are enabled,
- * clearing the dirty status will involve clearing the dirty bit on each SPTE
- * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
- */
- void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn, unsigned long mask,
- bool wrprot)
- {
- struct kvm_mmu_page *root;
- lockdep_assert_held_write(&kvm->mmu_lock);
- for_each_tdp_mmu_root(kvm, root, slot->as_id)
- clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
- }
- static void zap_collapsible_spte_range(struct kvm *kvm,
- struct kvm_mmu_page *root,
- const struct kvm_memory_slot *slot)
- {
- gfn_t start = slot->base_gfn;
- gfn_t end = start + slot->npages;
- struct tdp_iter iter;
- int max_mapping_level;
- rcu_read_lock();
- for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
- retry:
- if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
- continue;
- if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
- !is_shadow_present_pte(iter.old_spte))
- continue;
- /*
- * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
- * a large page size, then its parent would have been zapped
- * instead of stepping down.
- */
- if (is_last_spte(iter.old_spte, iter.level))
- continue;
- /*
- * If iter.gfn resides outside of the slot, i.e. the page for
- * the current level overlaps but is not contained by the slot,
- * then the SPTE can't be made huge. More importantly, trying
- * to query that info from slot->arch.lpage_info will cause an
- * out-of-bounds access.
- */
- if (iter.gfn < start || iter.gfn >= end)
- continue;
- max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
- iter.gfn, PG_LEVEL_NUM);
- if (max_mapping_level < iter.level)
- continue;
- /* Note, a successful atomic zap also does a remote TLB flush. */
- if (tdp_mmu_zap_spte_atomic(kvm, &iter))
- goto retry;
- }
- rcu_read_unlock();
- }
- /*
- * Zap non-leaf SPTEs (and free their associated page tables) which could
- * be replaced by huge pages, for GFNs within the slot.
- */
- void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- const struct kvm_memory_slot *slot)
- {
- struct kvm_mmu_page *root;
- lockdep_assert_held_read(&kvm->mmu_lock);
- for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
- zap_collapsible_spte_range(kvm, root, slot);
- }
- /*
- * Removes write access on the last level SPTE mapping this GFN and unsets the
- * MMU-writable bit to ensure future writes continue to be intercepted.
- * Returns true if an SPTE was set and a TLB flush is needed.
- */
- static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t gfn, int min_level)
- {
- struct tdp_iter iter;
- u64 new_spte;
- bool spte_set = false;
- BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
- rcu_read_lock();
- for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
- if (!is_shadow_present_pte(iter.old_spte) ||
- !is_last_spte(iter.old_spte, iter.level))
- continue;
- new_spte = iter.old_spte &
- ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
- if (new_spte == iter.old_spte)
- break;
- tdp_mmu_set_spte(kvm, &iter, new_spte);
- spte_set = true;
- }
- rcu_read_unlock();
- return spte_set;
- }
- /*
- * Removes write access on the last level SPTE mapping this GFN and unsets the
- * MMU-writable bit to ensure future writes continue to be intercepted.
- * Returns true if an SPTE was set and a TLB flush is needed.
- */
- bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn,
- int min_level)
- {
- struct kvm_mmu_page *root;
- bool spte_set = false;
- lockdep_assert_held_write(&kvm->mmu_lock);
- for_each_tdp_mmu_root(kvm, root, slot->as_id)
- spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
- return spte_set;
- }
- /*
- * Return the level of the lowest level SPTE added to sptes.
- * That SPTE may be non-present.
- *
- * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
- */
- int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
- int *root_level)
- {
- struct tdp_iter iter;
- struct kvm_mmu *mmu = vcpu->arch.mmu;
- gfn_t gfn = addr >> PAGE_SHIFT;
- int leaf = -1;
- *root_level = vcpu->arch.mmu->root_role.level;
- tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
- leaf = iter.level;
- sptes[leaf] = iter.old_spte;
- }
- return leaf;
- }
- /*
- * Returns the last level spte pointer of the shadow page walk for the given
- * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
- * walk could be performed, returns NULL and *spte does not contain valid data.
- *
- * Contract:
- * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
- * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
- *
- * WARNING: This function is only intended to be called during fast_page_fault.
- */
- u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
- u64 *spte)
- {
- struct tdp_iter iter;
- struct kvm_mmu *mmu = vcpu->arch.mmu;
- gfn_t gfn = addr >> PAGE_SHIFT;
- tdp_ptep_t sptep = NULL;
- tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
- *spte = iter.old_spte;
- sptep = iter.sptep;
- }
- /*
- * Perform the rcu_dereference to get the raw spte pointer value since
- * we are passing it up to fast_page_fault, which is shared with the
- * legacy MMU and thus does not retain the TDP MMU-specific __rcu
- * annotation.
- *
- * This is safe since fast_page_fault obeys the contracts of this
- * function as well as all TDP MMU contracts around modifying SPTEs
- * outside of mmu_lock.
- */
- return rcu_dereference(sptep);
- }
|