Merge branch 'kvm-master' into HEAD
Merge AMD fixes before doing more development work.
This commit is contained in:
@@ -247,7 +247,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
|
||||
static u64 __read_mostly shadow_user_mask;
|
||||
static u64 __read_mostly shadow_accessed_mask;
|
||||
static u64 __read_mostly shadow_dirty_mask;
|
||||
static u64 __read_mostly shadow_mmio_mask;
|
||||
static u64 __read_mostly shadow_mmio_value;
|
||||
static u64 __read_mostly shadow_mmio_access_mask;
|
||||
static u64 __read_mostly shadow_present_mask;
|
||||
@@ -334,19 +333,19 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
|
||||
kvm_flush_remote_tlbs_with_range(kvm, &range);
|
||||
}
|
||||
|
||||
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
|
||||
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
|
||||
{
|
||||
BUG_ON((u64)(unsigned)access_mask != access_mask);
|
||||
BUG_ON((mmio_mask & mmio_value) != mmio_value);
|
||||
WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
|
||||
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
|
||||
shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
|
||||
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
|
||||
shadow_mmio_access_mask = access_mask;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
|
||||
|
||||
static bool is_mmio_spte(u64 spte)
|
||||
{
|
||||
return (spte & shadow_mmio_mask) == shadow_mmio_value;
|
||||
return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK;
|
||||
}
|
||||
|
||||
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
|
||||
@@ -569,7 +568,6 @@ static void kvm_mmu_reset_all_pte_masks(void)
|
||||
shadow_dirty_mask = 0;
|
||||
shadow_nx_mask = 0;
|
||||
shadow_x_mask = 0;
|
||||
shadow_mmio_mask = 0;
|
||||
shadow_present_mask = 0;
|
||||
shadow_acc_track_mask = 0;
|
||||
|
||||
@@ -586,16 +584,15 @@ static void kvm_mmu_reset_all_pte_masks(void)
|
||||
* the most significant bits of legal physical address space.
|
||||
*/
|
||||
shadow_nonpresent_or_rsvd_mask = 0;
|
||||
low_phys_bits = boot_cpu_data.x86_cache_bits;
|
||||
if (boot_cpu_data.x86_cache_bits <
|
||||
52 - shadow_nonpresent_or_rsvd_mask_len) {
|
||||
low_phys_bits = boot_cpu_data.x86_phys_bits;
|
||||
if (boot_cpu_has_bug(X86_BUG_L1TF) &&
|
||||
!WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
|
||||
52 - shadow_nonpresent_or_rsvd_mask_len)) {
|
||||
low_phys_bits = boot_cpu_data.x86_cache_bits
|
||||
- shadow_nonpresent_or_rsvd_mask_len;
|
||||
shadow_nonpresent_or_rsvd_mask =
|
||||
rsvd_bits(boot_cpu_data.x86_cache_bits -
|
||||
shadow_nonpresent_or_rsvd_mask_len,
|
||||
boot_cpu_data.x86_cache_bits - 1);
|
||||
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
|
||||
} else
|
||||
WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
|
||||
rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
|
||||
}
|
||||
|
||||
shadow_nonpresent_or_rsvd_lower_gfn_mask =
|
||||
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
|
||||
@@ -6134,27 +6131,18 @@ static void kvm_set_mmio_spte_mask(void)
|
||||
u64 mask;
|
||||
|
||||
/*
|
||||
* Set the reserved bits and the present bit of an paging-structure
|
||||
* entry to generate page fault with PFER.RSV = 1.
|
||||
* Set a reserved PA bit in MMIO SPTEs to generate page faults with
|
||||
* PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
|
||||
* paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
|
||||
* 52-bit physical addresses then there are no reserved PA bits in the
|
||||
* PTEs and so the reserved PA approach must be disabled.
|
||||
*/
|
||||
if (shadow_phys_bits < 52)
|
||||
mask = BIT_ULL(51) | PT_PRESENT_MASK;
|
||||
else
|
||||
mask = 0;
|
||||
|
||||
/*
|
||||
* Mask the uppermost physical address bit, which would be reserved as
|
||||
* long as the supported physical address width is less than 52.
|
||||
*/
|
||||
mask = 1ull << 51;
|
||||
|
||||
/* Set the present bit. */
|
||||
mask |= 1ull;
|
||||
|
||||
/*
|
||||
* If reserved bit is not supported, clear the present bit to disable
|
||||
* mmio page fault.
|
||||
*/
|
||||
if (shadow_phys_bits == 52)
|
||||
mask &= ~1ull;
|
||||
|
||||
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
|
||||
kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK);
|
||||
}
|
||||
|
||||
static bool get_nx_auto_mode(void)
|
||||
|
Reference in New Issue
Block a user