kvm: x86: Move kvm_set_mmio_spte_mask() from x86.c to mmu.c
As a prerequisite to fix several SPTE reserved bits related calculation errors caused by MKTME, which requires kvm_set_mmio_spte_mask() to use local static variable defined in mmu.c. Also move call site of kvm_set_mmio_spte_mask() from kvm_arch_init() to kvm_mmu_module_init() so that kvm_set_mmio_spte_mask() can be static. Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Kai Huang <kai.huang@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
@@ -5998,6 +5998,35 @@ static void mmu_destroy_caches(void)
|
||||
kmem_cache_destroy(mmu_page_header_cache);
|
||||
}
|
||||
|
||||
static void kvm_set_mmio_spte_mask(void)
|
||||
{
|
||||
u64 mask;
|
||||
int maxphyaddr = boot_cpu_data.x86_phys_bits;
|
||||
|
||||
/*
|
||||
* Set the reserved bits and the present bit of an paging-structure
|
||||
* entry to generate page fault with PFER.RSV = 1.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Mask the uppermost physical address bit, which would be reserved as
|
||||
* long as the supported physical address width is less than 52.
|
||||
*/
|
||||
mask = 1ull << 51;
|
||||
|
||||
/* Set the present bit. */
|
||||
mask |= 1ull;
|
||||
|
||||
/*
|
||||
* If reserved bit is not supported, clear the present bit to disable
|
||||
* mmio page fault.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
|
||||
mask &= ~1ull;
|
||||
|
||||
kvm_mmu_set_mmio_spte_mask(mask, mask);
|
||||
}
|
||||
|
||||
int kvm_mmu_module_init(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
@@ -6014,6 +6043,8 @@ int kvm_mmu_module_init(void)
|
||||
|
||||
kvm_mmu_reset_all_pte_masks();
|
||||
|
||||
kvm_set_mmio_spte_mask();
|
||||
|
||||
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
|
||||
sizeof(struct pte_list_desc),
|
||||
0, SLAB_ACCOUNT, NULL);
|
||||
|
Reference in New Issue
Block a user