KVM: X86: Use vcpu->arch.walk_mmu for kvm_mmu_invlpg()
commit 05b29633c7a956d5675f5fbba70db0d26aa5e73e upstream. INVLPG operates on guest virtual address, which are represented by vcpu->arch.walk_mmu. In nested virtualization scenarios, kvm_mmu_invlpg() was using the wrong MMU structure; if L2's invlpg were emulated by L0 (in practice, it hardly happen) when nested two-dimensional paging is enabled, the call to ->tlb_flush_gva() would be skipped and the hardware TLB entry would not be invalidated. Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> Message-Id: <20211124122055.64424-5-jiangshanlai@gmail.com> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
c71b5f37b5
commit
d4af6d9749
@@ -5152,7 +5152,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
|
|||||||
|
|
||||||
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
|
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
|
||||||
{
|
{
|
||||||
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
|
kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
|
||||||
++vcpu->stat.invlpg;
|
++vcpu->stat.invlpg;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
|
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
|
||||||
|
Reference in New Issue
Block a user