KVM: x86: Refactor up kvm_{g,s}et_msr() to simplify callers
Refactor the top-level MSR accessors to take/return the index and value directly instead of requiring the caller to dump them into a msr_data struct. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:

committed by
Paolo Bonzini

parent
b274a29081
commit
f20935d85a
@@ -864,9 +864,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
||||
{
|
||||
u32 i;
|
||||
struct vmx_msr_entry e;
|
||||
struct msr_data msr;
|
||||
|
||||
msr.host_initiated = false;
|
||||
for (i = 0; i < count; i++) {
|
||||
if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
|
||||
&e, sizeof(e))) {
|
||||
@@ -881,9 +879,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
||||
__func__, i, e.index, e.reserved);
|
||||
goto fail;
|
||||
}
|
||||
msr.index = e.index;
|
||||
msr.data = e.value;
|
||||
if (kvm_set_msr(vcpu, &msr)) {
|
||||
if (kvm_set_msr(vcpu, e.index, e.value)) {
|
||||
pr_debug_ratelimited(
|
||||
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
|
||||
__func__, i, e.index, e.value);
|
||||
@@ -897,11 +893,11 @@ fail:
|
||||
|
||||
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
||||
{
|
||||
u64 data;
|
||||
u32 i;
|
||||
struct vmx_msr_entry e;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
struct msr_data msr_info;
|
||||
if (kvm_vcpu_read_guest(vcpu,
|
||||
gpa + i * sizeof(e),
|
||||
&e, 2 * sizeof(u32))) {
|
||||
@@ -916,9 +912,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
||||
__func__, i, e.index, e.reserved);
|
||||
return -EINVAL;
|
||||
}
|
||||
msr_info.host_initiated = false;
|
||||
msr_info.index = e.index;
|
||||
if (kvm_get_msr(vcpu, &msr_info)) {
|
||||
if (kvm_get_msr(vcpu, e.index, &data)) {
|
||||
pr_debug_ratelimited(
|
||||
"%s cannot read MSR (%u, 0x%x)\n",
|
||||
__func__, i, e.index);
|
||||
@@ -927,10 +921,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
||||
if (kvm_vcpu_write_guest(vcpu,
|
||||
gpa + i * sizeof(e) +
|
||||
offsetof(struct vmx_msr_entry, value),
|
||||
&msr_info.data, sizeof(msr_info.data))) {
|
||||
&data, sizeof(data))) {
|
||||
pr_debug_ratelimited(
|
||||
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
|
||||
__func__, i, e.index, msr_info.data);
|
||||
__func__, i, e.index, data);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@@ -3889,7 +3883,6 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct vmx_msr_entry g, h;
|
||||
struct msr_data msr;
|
||||
gpa_t gpa;
|
||||
u32 i, j;
|
||||
|
||||
@@ -3949,7 +3942,6 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
||||
* from the guest value. The intent is to stuff host state as
|
||||
* silently as possible, not to fully process the exit load list.
|
||||
*/
|
||||
msr.host_initiated = false;
|
||||
for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
|
||||
gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
|
||||
if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
|
||||
@@ -3979,9 +3971,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
||||
goto vmabort;
|
||||
}
|
||||
|
||||
msr.index = h.index;
|
||||
msr.data = h.value;
|
||||
if (kvm_set_msr(vcpu, &msr)) {
|
||||
if (kvm_set_msr(vcpu, h.index, h.value)) {
|
||||
pr_debug_ratelimited(
|
||||
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
|
||||
__func__, j, h.index, h.value);
|
||||
|
Reference in New Issue
Block a user