KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions

This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.

The instructions that this adds emulation for are:

- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x

[paulus@ozlabs.org - some cleanups, fixes and rework, make it
 compile for Book E, fix build when PR KVM is built in]

Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
此提交包含在:
Bin Lu
2017-02-21 21:12:36 +08:00
提交者 Paul Mackerras
父節點 307d927967
當前提交 6f63e81bda
共有 6 個檔案被更改,包括 731 行新增7 行删除

查看文件

@@ -34,6 +34,30 @@
#include "timing.h"
#include "trace.h"
#ifdef CONFIG_PPC_FPU
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
kvmppc_core_queue_fpunavail(vcpu);
return true;
}
return false;
}
#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_VSX
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
kvmppc_core_queue_vsx_unavail(vcpu);
return true;
}
return false;
}
#endif /* CONFIG_VSX */
/* XXX to do:
* lhax
* lhaux
@@ -66,6 +90,19 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
rs = get_rs(inst);
rt = get_rt(inst);
/*
* if mmio_vsx_tx_sx_enabled == 0, copy data between
* VSR[0..31] and memory
* if mmio_vsx_tx_sx_enabled == 1, copy data between
* VSR[32..63] and memory
*/
vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
vcpu->arch.mmio_vsx_copy_nums = 0;
vcpu->arch.mmio_vsx_offset = 0;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
vcpu->arch.mmio_sp64_extend = 0;
vcpu->arch.mmio_sign_extend = 0;
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
@@ -157,6 +194,230 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
2, 0);
break;
case OP_31_XOP_LDX:
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
break;
case OP_31_XOP_LDUX:
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_LWAX:
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
break;
#ifdef CONFIG_PPC_FPU
case OP_31_XOP_LFSX:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_load(run, vcpu,
KVM_MMIO_REG_FPR|rt, 4, 1);
break;
case OP_31_XOP_LFSUX:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_load(run, vcpu,
KVM_MMIO_REG_FPR|rt, 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_LFDX:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
emulated = kvmppc_handle_load(run, vcpu,
KVM_MMIO_REG_FPR|rt, 8, 1);
break;
case OP_31_XOP_LFDUX:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
emulated = kvmppc_handle_load(run, vcpu,
KVM_MMIO_REG_FPR|rt, 8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_STFSX:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs), 4, 1);
break;
case OP_31_XOP_STFSUX:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs), 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_STFDX:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs),
8, 1);
break;
case OP_31_XOP_STFDUX:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs),
8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_STFIWX:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs),
4, 1);
break;
#endif
#ifdef CONFIG_VSX
case OP_31_XOP_LXSDX:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
emulated = kvmppc_handle_vsx_load(run, vcpu,
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
break;
case OP_31_XOP_LXSSPX:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_vsx_load(run, vcpu,
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
break;
case OP_31_XOP_LXSIWAX:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
emulated = kvmppc_handle_vsx_load(run, vcpu,
KVM_MMIO_REG_VSX|rt, 4, 1, 1);
break;
case OP_31_XOP_LXSIWZX:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
emulated = kvmppc_handle_vsx_load(run, vcpu,
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
break;
case OP_31_XOP_LXVD2X:
/*
* In this case, the official load/store process is like this:
* Step1, exit from vm by page fault isr, then kvm save vsr.
* Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
* as reference.
*
* Step2, copy data between memory and VCPU
* Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
* 2copies*8bytes or 4copies*4bytes
* to simulate one copy of 16bytes.
* Also there is an endian issue here, we should notice the
* layout of memory.
* Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
* If host is little-endian, kvm will call XXSWAPD for
* LXVD2X_ROT/STXVD2X_ROT.
* So, if host is little-endian,
* the postion of memeory should be swapped.
*
* Step3, return to guest, kvm reset register.
* Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
* as reference.
*/
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 2;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
emulated = kvmppc_handle_vsx_load(run, vcpu,
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
break;
case OP_31_XOP_LXVW4X:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 4;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
emulated = kvmppc_handle_vsx_load(run, vcpu,
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
break;
case OP_31_XOP_LXVDSX:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu->arch.mmio_vsx_copy_type =
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
emulated = kvmppc_handle_vsx_load(run, vcpu,
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
break;
case OP_31_XOP_STXSDX:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
emulated = kvmppc_handle_vsx_store(run, vcpu,
rs, 8, 1);
break;
case OP_31_XOP_STXSSPX:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_vsx_store(run, vcpu,
rs, 4, 1);
break;
case OP_31_XOP_STXSIWX:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_offset = 1;
vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
emulated = kvmppc_handle_vsx_store(run, vcpu,
rs, 4, 1);
break;
case OP_31_XOP_STXVD2X:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 2;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
emulated = kvmppc_handle_vsx_store(run, vcpu,
rs, 8, 1);
break;
case OP_31_XOP_STXVW4X:
if (kvmppc_check_vsx_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_vsx_copy_nums = 4;
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
emulated = kvmppc_handle_vsx_store(run, vcpu,
rs, 4, 1);
break;
#endif /* CONFIG_VSX */
default:
emulated = EMULATE_FAIL;
break;
@@ -167,7 +428,45 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
#ifdef CONFIG_PPC_FPU
case OP_STFS:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs),
4, 1);
break;
case OP_STFSU:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs),
4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STFD:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs),
8, 1);
break;
case OP_STFDU:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
emulated = kvmppc_handle_store(run, vcpu,
VCPU_FPR(vcpu, rs),
8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
#endif
/* TBD: Add support for other 64 bit load variants like ldu etc. */
case OP_LD:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
@@ -252,6 +551,40 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
#ifdef CONFIG_PPC_FPU
case OP_LFS:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_load(run, vcpu,
KVM_MMIO_REG_FPR|rt, 4, 1);
break;
case OP_LFSU:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_load(run, vcpu,
KVM_MMIO_REG_FPR|rt, 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LFD:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
emulated = kvmppc_handle_load(run, vcpu,
KVM_MMIO_REG_FPR|rt, 8, 1);
break;
case OP_LFDU:
if (kvmppc_check_fp_disabled(vcpu))
return EMULATE_DONE;
emulated = kvmppc_handle_load(run, vcpu,
KVM_MMIO_REG_FPR|rt, 8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
#endif
default:
emulated = EMULATE_FAIL;
break;