Merge tag 'kvm-arm-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm updates for 5.3

- Add support for chained PMU counters in guests
- Improve SError handling
- Handle Neoverse N1 erratum #1349291
- Allow side-channel mitigation status to be migrated
- Standardise most AArch64 system register accesses to msr_s/mrs_s
- Fix host MPIDR corruption on 32bit
This commit is contained in:
Paolo Bonzini
2019-07-11 15:14:16 +02:00
17500 changed files with 30989 additions and 128523 deletions

View File

@@ -1412,7 +1412,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
}
if (unlikely(!(evmcs->hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
vmcs12->exception_bitmap = evmcs->exception_bitmap;
}
@@ -1452,7 +1452,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
}
if (unlikely(!(evmcs->hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
vmcs12->pin_based_vm_exec_control =
evmcs->pin_based_vm_exec_control;
vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
@@ -5421,14 +5421,16 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12;
struct kvm_nested_state kvm_state = {
.flags = 0,
.format = 0,
.format = KVM_STATE_NESTED_FORMAT_VMX,
.size = sizeof(kvm_state),
.vmx.vmxon_pa = -1ull,
.vmx.vmcs_pa = -1ull,
.hdr.vmx.vmxon_pa = -1ull,
.hdr.vmx.vmcs12_pa = -1ull,
};
struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
&user_kvm_nested_state->data.vmx[0];
if (!vcpu)
return kvm_state.size + 2 * VMCS12_SIZE;
return kvm_state.size + sizeof(*user_vmx_nested_state);
vmx = to_vmx(vcpu);
vmcs12 = get_vmcs12(vcpu);
@@ -5438,23 +5440,23 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (nested_vmx_allowed(vcpu) &&
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
if (vmx_has_valid_vmcs12(vcpu)) {
kvm_state.size += VMCS12_SIZE;
kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
if (is_guest_mode(vcpu) &&
nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull)
kvm_state.size += VMCS12_SIZE;
kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
}
if (vmx->nested.smm.vmxon)
kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
if (vmx->nested.smm.guest_mode)
kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
if (is_guest_mode(vcpu)) {
kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
@@ -5490,16 +5492,19 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
copy_shadow_to_vmcs12(vmx);
}
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
/*
* Copy over the full allocated size of vmcs12 rather than just the size
* of the struct.
*/
if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
return -EFAULT;
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) {
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
get_shadow_vmcs12(vcpu), VMCS12_SIZE))
return -EFAULT;
}
@@ -5527,33 +5532,35 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12;
u32 exit_qual;
struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
&user_kvm_nested_state->data.vmx[0];
int ret;
if (kvm_state->format != 0)
if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
return -EINVAL;
if (!nested_vmx_allowed(vcpu))
return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
if (kvm_state->vmx.vmxon_pa == -1ull) {
if (kvm_state->vmx.smm.flags)
if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
if (kvm_state->hdr.vmx.smm.flags)
return -EINVAL;
if (kvm_state->vmx.vmcs_pa != -1ull)
if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
return -EINVAL;
vmx_leave_nested(vcpu);
return 0;
}
if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
return -EINVAL;
} else {
if (!nested_vmx_allowed(vcpu))
return -EINVAL;
if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
return -EINVAL;
if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
return -EINVAL;
}
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
return -EINVAL;
if (kvm_state->vmx.smm.flags &
if (kvm_state->hdr.vmx.smm.flags &
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
@@ -5562,21 +5569,25 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
* must be zero.
*/
if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
if (is_smm(vcpu) ? kvm_state->flags : kvm_state->hdr.vmx.smm.flags)
return -EINVAL;
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
!(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
!(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
vmx_leave_nested(vcpu);
if (kvm_state->vmx.vmxon_pa == -1ull)
if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
if (!nested_vmx_allowed(vcpu))
return -EINVAL;
nested_enable_evmcs(vcpu, NULL);
}
if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
return 0;
if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
nested_enable_evmcs(vcpu, NULL);
vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
ret = enter_vmx_operation(vcpu);
if (ret)
return ret;
@@ -5585,12 +5596,12 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
return 0;
if (kvm_state->vmx.vmcs_pa != -1ull) {
if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
!page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
!page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
return -EINVAL;
set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
} else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
/*
* Sync eVMCS upon entry as we may not have
@@ -5601,16 +5612,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
}
if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
vmx->nested.smm.vmxon = true;
vmx->nested.vmxon = false;
if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
vmx->nested.smm.guest_mode = true;
}
vmcs12 = get_vmcs12(vcpu);
if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
return -EFAULT;
if (vmcs12->hdr.revision_id != VMCS12_REVISION)
@@ -5627,12 +5638,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
vmcs12->vmcs_link_pointer != -1ull) {
struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
if (kvm_state->size < sizeof(*kvm_state) + VMCS12_SIZE + sizeof(*vmcs12))
if (kvm_state->size <
sizeof(*kvm_state) +
sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
goto error_guest_mode;
if (copy_from_user(shadow_vmcs12,
user_kvm_nested_state->data + VMCS12_SIZE,
sizeof(*vmcs12))) {
user_vmx_nested_state->shadow_vmcs12,
sizeof(*shadow_vmcs12))) {
ret = -EFAULT;
goto error_guest_mode;
}