Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "Small release, the most interesting stuff is x86 nested virt improvements. x86: - userspace can now hide nested VMX features from guests - nested VMX can now run Hyper-V in a guest - support for AVX512_4VNNIW and AVX512_FMAPS in KVM - infrastructure support for virtual Intel GPUs. PPC: - support for KVM guests on POWER9 - improved support for interrupt polling - optimizations and cleanups. s390: - two small optimizations, more stuff is in flight and will be in 4.11. ARM: - support for the GICv3 ITS on 32bit platforms" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (94 commits) arm64: KVM: pmu: Reset PMSELR_EL0.SEL to a sane value before entering the guest KVM: arm/arm64: timer: Check for properly initialized timer on init KVM: arm/arm64: vgic-v2: Limit ITARGETSR bits to number of VCPUs KVM: x86: Handle the kthread worker using the new API KVM: nVMX: invvpid handling improvements KVM: nVMX: check host CR3 on vmentry and vmexit KVM: nVMX: introduce nested_vmx_load_cr3 and call it on vmentry KVM: nVMX: propagate errors from prepare_vmcs02 KVM: nVMX: fix CR3 load if L2 uses PAE paging and EPT KVM: nVMX: load GUEST_EFER after GUEST_CR0 during emulated VM-entry KVM: nVMX: generate MSR_IA32_CR{0,4}_FIXED1 from guest CPUID KVM: nVMX: fix checks on CR{0,4} during virtual VMX operation KVM: nVMX: support restore of VMX capability MSRs KVM: nVMX: generate non-true VMX MSRs based on true versions KVM: x86: Do not clear RFLAGS.TF when a singlestep trap occurs. KVM: x86: Add kvm_skip_emulated_instruction and use it. KVM: VMX: Move skip_emulated_instruction out of nested_vmx_check_vmcs12 KVM: VMX: Reorder some skip_emulated_instruction calls KVM: x86: Add a return value to kvm_emulate_cpuid KVM: PPC: Book3S: Move prototypes for KVM functions into kvm_ppc.h ...
This commit is contained in:
@@ -70,7 +70,9 @@
|
||||
|
||||
#define HPTE_V_SSIZE_SHIFT 62
|
||||
#define HPTE_V_AVPN_SHIFT 7
|
||||
#define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
|
||||
#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
|
||||
#define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
|
||||
#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
|
||||
#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
|
||||
#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
|
||||
@@ -80,14 +82,16 @@
|
||||
#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
|
||||
|
||||
/*
|
||||
* ISA 3.0 have a different HPTE format.
|
||||
* ISA 3.0 has a different HPTE format.
|
||||
*/
|
||||
#define HPTE_R_3_0_SSIZE_SHIFT 58
|
||||
#define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
|
||||
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
|
||||
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
|
||||
#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
|
||||
#define HPTE_R_RPN_SHIFT 12
|
||||
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
|
||||
#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
|
||||
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
|
||||
#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
|
||||
#define HPTE_R_N ASM_CONST(0x0000000000000004)
|
||||
@@ -316,11 +320,42 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
|
||||
*/
|
||||
v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
|
||||
v <<= HPTE_V_AVPN_SHIFT;
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
|
||||
v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
|
||||
return v;
|
||||
}
|
||||
|
||||
/*
|
||||
* ISA v3.0 defines a new HPTE format, which differs from the old
|
||||
* format in having smaller AVPN and ARPN fields, and the B field
|
||||
* in the second dword instead of the first.
|
||||
*/
|
||||
static inline unsigned long hpte_old_to_new_v(unsigned long v)
|
||||
{
|
||||
/* trim AVPN, drop B */
|
||||
return v & HPTE_V_COMMON_BITS;
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
|
||||
{
|
||||
/* move B field from 1st to 2nd dword, trim ARPN */
|
||||
return (r & ~HPTE_R_3_0_SSIZE_MASK) |
|
||||
(((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
|
||||
{
|
||||
/* insert B field */
|
||||
return (v & HPTE_V_COMMON_BITS) |
|
||||
((r & HPTE_R_3_0_SSIZE_MASK) <<
|
||||
(HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
|
||||
}
|
||||
|
||||
static inline unsigned long hpte_new_to_old_r(unsigned long r)
|
||||
{
|
||||
/* clear out B field */
|
||||
return r & ~HPTE_R_3_0_SSIZE_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function sets the AVPN and L fields of the HPTE appropriately
|
||||
* using the base page size and actual page size.
|
||||
@@ -341,12 +376,8 @@ static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
|
||||
* aligned for the requested page size
|
||||
*/
|
||||
static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
|
||||
int actual_psize, int ssize)
|
||||
int actual_psize)
|
||||
{
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
|
||||
|
||||
/* A 4K page needs no special encoding */
|
||||
if (actual_psize == MMU_PAGE_4K)
|
||||
return pa & HPTE_R_RPN;
|
||||
|
Reference in New Issue
Block a user