KVM: PPC: Clean up redundant 'kvm_run' parameters
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu' structure. For historical reasons, many kvm-related function parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This patch does a unified cleanup of these remaining redundant parameters. Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com> Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com> Reviewed-by: Paul Mackerras <paulus@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:

committed by
Paul Mackerras

parent
2610a57f64
commit
8c99d34578
@@ -279,7 +279,7 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
|
||||
|
||||
int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er;
|
||||
int r;
|
||||
@@ -295,7 +295,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
case EMULATE_DO_MMIO:
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
vcpu->run->exit_reason = KVM_EXIT_MMIO;
|
||||
/* We must reload nonvolatiles because "update" load/store
|
||||
* instructions modify register state. */
|
||||
/* Future optimization: only reload non-volatiles if they were
|
||||
@@ -1107,9 +1107,9 @@ static inline u32 dp_to_sp(u64 fprd)
|
||||
#define dp_to_sp(x) (x)
|
||||
#endif /* CONFIG_PPC_FPU */
|
||||
|
||||
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run)
|
||||
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u64 uninitialized_var(gpr);
|
||||
|
||||
if (run->mmio.len > sizeof(gpr)) {
|
||||
@@ -1219,10 +1219,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
}
|
||||
|
||||
static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian, int sign_extend)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
int idx, ret;
|
||||
bool host_swabbed;
|
||||
|
||||
@@ -1256,7 +1257,7 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
if (!ret) {
|
||||
kvmppc_complete_mmio_load(vcpu, run);
|
||||
kvmppc_complete_mmio_load(vcpu);
|
||||
vcpu->mmio_needed = 0;
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
@@ -1264,24 +1265,24 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return EMULATE_DO_MMIO;
|
||||
}
|
||||
|
||||
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int kvmppc_handle_load(struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian)
|
||||
{
|
||||
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
|
||||
return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_handle_load);
|
||||
|
||||
/* Same as above, but sign extends */
|
||||
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian)
|
||||
{
|
||||
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
|
||||
return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian, int mmio_sign_extend)
|
||||
{
|
||||
@@ -1292,13 +1293,13 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return EMULATE_FAIL;
|
||||
|
||||
while (vcpu->arch.mmio_vsx_copy_nums) {
|
||||
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
|
||||
emulated = __kvmppc_handle_load(vcpu, rt, bytes,
|
||||
is_default_endian, mmio_sign_extend);
|
||||
|
||||
if (emulated != EMULATE_DONE)
|
||||
break;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
|
||||
|
||||
vcpu->arch.mmio_vsx_copy_nums--;
|
||||
vcpu->arch.mmio_vsx_offset++;
|
||||
@@ -1307,9 +1308,10 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int kvmppc_handle_store(struct kvm_vcpu *vcpu,
|
||||
u64 val, unsigned int bytes, int is_default_endian)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
void *data = run->mmio.data;
|
||||
int idx, ret;
|
||||
bool host_swabbed;
|
||||
@@ -1423,7 +1425,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
|
||||
return result;
|
||||
}
|
||||
|
||||
int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
|
||||
int rs, unsigned int bytes, int is_default_endian)
|
||||
{
|
||||
u64 val;
|
||||
@@ -1439,13 +1441,13 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
|
||||
return EMULATE_FAIL;
|
||||
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
emulated = kvmppc_handle_store(vcpu,
|
||||
val, bytes, is_default_endian);
|
||||
|
||||
if (emulated != EMULATE_DONE)
|
||||
break;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
|
||||
|
||||
vcpu->arch.mmio_vsx_copy_nums--;
|
||||
vcpu->arch.mmio_vsx_offset++;
|
||||
@@ -1454,19 +1456,19 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return emulated;
|
||||
}
|
||||
|
||||
static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run)
|
||||
static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
enum emulation_result emulated = EMULATE_FAIL;
|
||||
int r;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
|
||||
if (!vcpu->mmio_is_write) {
|
||||
emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
|
||||
emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
|
||||
run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
|
||||
} else {
|
||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||
emulated = kvmppc_handle_vsx_store(vcpu,
|
||||
vcpu->arch.io_gpr, run->mmio.len, 1);
|
||||
}
|
||||
|
||||
@@ -1490,7 +1492,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes, int is_default_endian)
|
||||
{
|
||||
enum emulation_result emulated = EMULATE_DONE;
|
||||
@@ -1499,13 +1501,13 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return EMULATE_FAIL;
|
||||
|
||||
while (vcpu->arch.mmio_vmx_copy_nums) {
|
||||
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
|
||||
emulated = __kvmppc_handle_load(vcpu, rt, bytes,
|
||||
is_default_endian, 0);
|
||||
|
||||
if (emulated != EMULATE_DONE)
|
||||
break;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
|
||||
vcpu->arch.mmio_vmx_copy_nums--;
|
||||
vcpu->arch.mmio_vmx_offset++;
|
||||
}
|
||||
@@ -1585,7 +1587,7 @@ int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
|
||||
return result;
|
||||
}
|
||||
|
||||
int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
|
||||
unsigned int rs, unsigned int bytes, int is_default_endian)
|
||||
{
|
||||
u64 val = 0;
|
||||
@@ -1620,12 +1622,12 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
emulated = kvmppc_handle_store(run, vcpu, val, bytes,
|
||||
emulated = kvmppc_handle_store(vcpu, val, bytes,
|
||||
is_default_endian);
|
||||
if (emulated != EMULATE_DONE)
|
||||
break;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
|
||||
vcpu->arch.mmio_vmx_copy_nums--;
|
||||
vcpu->arch.mmio_vmx_offset++;
|
||||
}
|
||||
@@ -1633,19 +1635,19 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return emulated;
|
||||
}
|
||||
|
||||
static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run)
|
||||
static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
enum emulation_result emulated = EMULATE_FAIL;
|
||||
int r;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
|
||||
if (!vcpu->mmio_is_write) {
|
||||
emulated = kvmppc_handle_vmx_load(run, vcpu,
|
||||
emulated = kvmppc_handle_vmx_load(vcpu,
|
||||
vcpu->arch.io_gpr, run->mmio.len, 1);
|
||||
} else {
|
||||
emulated = kvmppc_handle_vmx_store(run, vcpu,
|
||||
emulated = kvmppc_handle_vmx_store(vcpu,
|
||||
vcpu->arch.io_gpr, run->mmio.len, 1);
|
||||
}
|
||||
|
||||
@@ -1775,7 +1777,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->mmio_needed) {
|
||||
vcpu->mmio_needed = 0;
|
||||
if (!vcpu->mmio_is_write)
|
||||
kvmppc_complete_mmio_load(vcpu, run);
|
||||
kvmppc_complete_mmio_load(vcpu);
|
||||
#ifdef CONFIG_VSX
|
||||
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
|
||||
vcpu->arch.mmio_vsx_copy_nums--;
|
||||
@@ -1783,7 +1785,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
|
||||
r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
|
||||
r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
|
||||
if (r == RESUME_HOST) {
|
||||
vcpu->mmio_needed = 1;
|
||||
goto out;
|
||||
@@ -1797,7 +1799,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
|
||||
r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
|
||||
r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
|
||||
if (r == RESUME_HOST) {
|
||||
vcpu->mmio_needed = 1;
|
||||
goto out;
|
||||
@@ -1830,7 +1832,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
if (run->immediate_exit)
|
||||
r = -EINTR;
|
||||
else
|
||||
r = kvmppc_vcpu_run(run, vcpu);
|
||||
r = kvmppc_vcpu_run(vcpu);
|
||||
|
||||
kvm_sigset_deactivate(vcpu);
|
||||
|
||||
|
Reference in New Issue
Block a user