Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: - ARM: GICv3 ITS emulation and various fixes. Removal of the old VGIC implementation. - s390: support for trapping software breakpoints, nested virtualization (vSIE), the STHYI opcode, initial extensions for CPU model support. - MIPS: support for MIPS64 hosts (32-bit guests only) and lots of cleanups, preliminary to this and the upcoming support for hardware virtualization extensions. - x86: support for execute-only mappings in nested EPT; reduced vmexit latency for TSC deadline timer (by about 30%) on Intel hosts; support for more than 255 vCPUs. - PPC: bugfixes. * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (302 commits) KVM: PPC: Introduce KVM_CAP_PPC_HTM MIPS: Select HAVE_KVM for MIPS64_R{2,6} MIPS: KVM: Reset CP0_PageMask during host TLB flush MIPS: KVM: Fix ptr->int cast via KVM_GUEST_KSEGX() MIPS: KVM: Sign extend MFC0/RDHWR results MIPS: KVM: Fix 64-bit big endian dynamic translation MIPS: KVM: Fail if ebase doesn't fit in CP0_EBase MIPS: KVM: Use 64-bit CP0_EBase when appropriate MIPS: KVM: Set CP0_Status.KX on MIPS64 MIPS: KVM: Make entry code MIPS64 friendly MIPS: KVM: Use kmap instead of CKSEG0ADDR() MIPS: KVM: Use virt_to_phys() to get commpage PFN MIPS: Fix definition of KSEGX() for 64-bit KVM: VMX: Add VMCS to CPU's loaded VMCSs before VMPTRLD kvm: x86: nVMX: maintain internal copy of current VMCS KVM: PPC: Book3S HV: Save/restore TM state in H_CEDE KVM: PPC: Book3S HV: Pull out TM state save/restore into separate procedures KVM: arm64: vgic-its: Simplify MAPI error handling KVM: arm64: vgic-its: Make vgic_its_cmd_handle_mapi similar to other handlers KVM: arm64: vgic-its: Turn device_id validation into generic ID validation ...
This commit is contained in:
@@ -176,6 +176,7 @@ static u64 __read_mostly shadow_user_mask;
|
||||
static u64 __read_mostly shadow_accessed_mask;
|
||||
static u64 __read_mostly shadow_dirty_mask;
|
||||
static u64 __read_mostly shadow_mmio_mask;
|
||||
static u64 __read_mostly shadow_present_mask;
|
||||
|
||||
static void mmu_spte_set(u64 *sptep, u64 spte);
|
||||
static void mmu_free_roots(struct kvm_vcpu *vcpu);
|
||||
@@ -283,13 +284,14 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
|
||||
}
|
||||
|
||||
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
||||
u64 dirty_mask, u64 nx_mask, u64 x_mask)
|
||||
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask)
|
||||
{
|
||||
shadow_user_mask = user_mask;
|
||||
shadow_accessed_mask = accessed_mask;
|
||||
shadow_dirty_mask = dirty_mask;
|
||||
shadow_nx_mask = nx_mask;
|
||||
shadow_x_mask = x_mask;
|
||||
shadow_present_mask = p_mask;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
|
||||
|
||||
@@ -305,7 +307,7 @@ static int is_nx(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int is_shadow_present_pte(u64 pte)
|
||||
{
|
||||
return pte & PT_PRESENT_MASK && !is_mmio_spte(pte);
|
||||
return (pte & 0xFFFFFFFFull) && !is_mmio_spte(pte);
|
||||
}
|
||||
|
||||
static int is_large_pte(u64 pte)
|
||||
@@ -524,7 +526,7 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
|
||||
}
|
||||
|
||||
/* Rules for using mmu_spte_update:
|
||||
* Update the state bits, it means the mapped pfn is not changged.
|
||||
* Update the state bits, it means the mapped pfn is not changed.
|
||||
*
|
||||
* Whenever we overwrite a writable spte with a read-only one we
|
||||
* should flush remote TLBs. Otherwise rmap_write_protect
|
||||
@@ -2246,10 +2248,9 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
|
||||
{
|
||||
u64 spte;
|
||||
|
||||
BUILD_BUG_ON(VMX_EPT_READABLE_MASK != PT_PRESENT_MASK ||
|
||||
VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
|
||||
BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
|
||||
|
||||
spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
|
||||
spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
|
||||
shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
|
||||
|
||||
mmu_spte_set(sptep, spte);
|
||||
@@ -2516,13 +2517,19 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
||||
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
|
||||
bool can_unsync, bool host_writable)
|
||||
{
|
||||
u64 spte;
|
||||
u64 spte = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
|
||||
return 0;
|
||||
|
||||
spte = PT_PRESENT_MASK;
|
||||
/*
|
||||
* For the EPT case, shadow_present_mask is 0 if hardware
|
||||
* supports exec-only page table entries. In that case,
|
||||
* ACC_USER_MASK and shadow_user_mask are used to represent
|
||||
* read access. See FNAME(gpte_access) in paging_tmpl.h.
|
||||
*/
|
||||
spte |= shadow_present_mask;
|
||||
if (!speculative)
|
||||
spte |= shadow_accessed_mask;
|
||||
|
||||
@@ -3190,7 +3197,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||
MMU_WARN_ON(VALID_PAGE(root));
|
||||
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
|
||||
pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
|
||||
if (!is_present_gpte(pdptr)) {
|
||||
if (!(pdptr & PT_PRESENT_MASK)) {
|
||||
vcpu->arch.mmu.pae_root[i] = 0;
|
||||
continue;
|
||||
}
|
||||
@@ -3915,9 +3922,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
|
||||
* clearer.
|
||||
*/
|
||||
smap = cr4_smap && u && !uf && !ff;
|
||||
} else
|
||||
/* Not really needed: no U/S accesses on ept */
|
||||
u = 1;
|
||||
}
|
||||
|
||||
fault = (ff && !x) || (uf && !u) || (wf && !w) ||
|
||||
(smapf && smap);
|
||||
|
Viittaa uudesa ongelmassa
Block a user