Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "s390: - implement diag318 x86: - Report last CPU for debugging - Emulate smaller MAXPHYADDR in the guest than in the host - .noinstr and tracing fixes from Thomas - nested SVM page table switching optimization and fixes Generic: - Unify shadow MMU cache data structures across architectures" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (127 commits) KVM: SVM: Fix sev_pin_memory() error handling KVM: LAPIC: Set the TDCR settable bits KVM: x86: Specify max TDP level via kvm_configure_mmu() KVM: x86/mmu: Rename max_page_level to max_huge_page_level KVM: x86: Dynamically calculate TDP level from max level and MAXPHYADDR KVM: VXM: Remove temporary WARN on expected vs. actual EPTP level mismatch KVM: x86: Pull the PGD's level from the MMU instead of recalculating it KVM: VMX: Make vmx_load_mmu_pgd() static KVM: x86/mmu: Add separate helper for shadow NPT root page role calc KVM: VMX: Drop a duplicate declaration of construct_eptp() KVM: nSVM: Correctly set the shadow NPT root level in its MMU role KVM: Using macros instead of magic values MIPS: KVM: Fix build error caused by 'kvm_run' cleanup KVM: nSVM: remove nonsensical EXITINFO1 adjustment on nested NPF KVM: x86: Add a capability for GUEST_MAXPHYADDR < HOST_MAXPHYADDR support KVM: VMX: optimize #PF injection when MAXPHYADDR does not match KVM: VMX: Add guest physical address check in EPT violation and misconfig KVM: VMX: introduce vmx_need_pf_intercept KVM: x86: update exception bitmap on CPUID changes KVM: x86: rename update_bp_intercept to update_exception_bitmap ...
This commit is contained in:
@@ -1,31 +0,0 @@
|
||||
KVM/MIPS Trap & Emulate Release Notes
|
||||
=====================================
|
||||
|
||||
(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
|
||||
Malta Board with FPGA based 34K
|
||||
Sigma Designs TangoX board with a 24K based 8654 SoC.
|
||||
Malta Board with 74K @ 1GHz
|
||||
|
||||
(2) Both Guest kernel and Guest Userspace execute in UM.
|
||||
Guest User address space: 0x00000000 -> 0x40000000
|
||||
Guest Kernel Unmapped: 0x40000000 -> 0x60000000
|
||||
Guest Kernel Mapped: 0x60000000 -> 0x80000000
|
||||
|
||||
Guest Usermode virtual memory is limited to 1GB.
|
||||
|
||||
(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
|
||||
Note that due to cache aliasing issues, 4K page sizes are NOT supported.
|
||||
|
||||
(3) No HugeTLB Support
|
||||
Both the host kernel and Guest kernel should have the page size set to 16K.
|
||||
This will be implemented in a future release.
|
||||
|
||||
(4) KVM/MIPS does not have support for SMP Guests
|
||||
Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
|
||||
LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
|
||||
when we ERET back to the guest. This causes the guest to hang in an infinite loop.
|
||||
This will be fixed in a future release.
|
||||
|
||||
(5) Use Host FPU
|
||||
Currently KVM/MIPS emulates a 24K CPU without a FPU.
|
||||
This will be fixed in a future release
|
@@ -37,10 +37,11 @@ choice
|
||||
|
||||
config KVM_MIPS_TE
|
||||
bool "Trap & Emulate"
|
||||
depends on CPU_MIPS32_R2
|
||||
help
|
||||
Use trap and emulate to virtualize 32-bit guests in user mode. This
|
||||
does not require any special hardware Virtualization support beyond
|
||||
standard MIPS32/64 r2 or later, but it does require the guest kernel
|
||||
standard MIPS32 r2 or later, but it does require the guest kernel
|
||||
to be configured with CONFIG_KVM_GUEST=y so that it resides in the
|
||||
user address segment.
|
||||
|
||||
|
@@ -1262,7 +1262,6 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
|
||||
|
||||
enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
|
||||
u32 *opc, u32 cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -1597,12 +1596,12 @@ dont_update_pc:
|
||||
|
||||
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
|
||||
u32 cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
enum emulation_result er;
|
||||
u32 rt;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
void *data = run->mmio.data;
|
||||
unsigned int imme;
|
||||
unsigned long curr_pc;
|
||||
@@ -1863,7 +1862,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
|
||||
vcpu->arch.gprs[rt], *(u64 *)data);
|
||||
break;
|
||||
default:
|
||||
kvm_err("Godson Exteneded GS-Store not yet supported (inst=0x%08x)\n",
|
||||
kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
|
||||
inst.word);
|
||||
break;
|
||||
}
|
||||
@@ -1896,9 +1895,9 @@ out_fail:
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
u32 cause, struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
u32 cause, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
int r;
|
||||
enum emulation_result er;
|
||||
unsigned long curr_pc;
|
||||
@@ -2107,7 +2106,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
vcpu->mmio_needed = 30; /* signed */
|
||||
break;
|
||||
default:
|
||||
kvm_err("Godson Exteneded GS-Load for float not yet supported (inst=0x%08x)\n",
|
||||
kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
|
||||
inst.word);
|
||||
break;
|
||||
}
|
||||
@@ -2128,7 +2127,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
run->mmio.phys_addr, run->mmio.len, run->mmio.data);
|
||||
|
||||
if (!r) {
|
||||
kvm_mips_complete_mmio_load(vcpu, run);
|
||||
kvm_mips_complete_mmio_load(vcpu);
|
||||
vcpu->mmio_needed = 0;
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
@@ -2140,7 +2139,6 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
|
||||
unsigned long curr_pc,
|
||||
unsigned long addr,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu,
|
||||
u32 cause)
|
||||
{
|
||||
@@ -2168,13 +2166,13 @@ static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
|
||||
/* no matching guest TLB */
|
||||
vcpu->arch.host_cp0_badvaddr = addr;
|
||||
vcpu->arch.pc = curr_pc;
|
||||
kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu);
|
||||
kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu);
|
||||
return EMULATE_EXCEPT;
|
||||
case KVM_MIPS_TLBINV:
|
||||
/* invalid matching guest TLB */
|
||||
vcpu->arch.host_cp0_badvaddr = addr;
|
||||
vcpu->arch.pc = curr_pc;
|
||||
kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu);
|
||||
kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu);
|
||||
return EMULATE_EXCEPT;
|
||||
default:
|
||||
break;
|
||||
@@ -2184,7 +2182,6 @@ static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
|
||||
|
||||
enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
|
||||
u32 *opc, u32 cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
@@ -2274,7 +2271,7 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
|
||||
* guest's behalf.
|
||||
*/
|
||||
er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
|
||||
curr_pc, va, run, vcpu, cause);
|
||||
curr_pc, va, vcpu, cause);
|
||||
if (er != EMULATE_DONE)
|
||||
goto done;
|
||||
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
|
||||
@@ -2287,11 +2284,11 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
|
||||
} else if (op_inst == Hit_Invalidate_I) {
|
||||
/* Perform the icache synchronisation on the guest's behalf */
|
||||
er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
|
||||
curr_pc, va, run, vcpu, cause);
|
||||
curr_pc, va, vcpu, cause);
|
||||
if (er != EMULATE_DONE)
|
||||
goto done;
|
||||
er = kvm_mips_guest_cache_op(protected_flush_icache_line,
|
||||
curr_pc, va, run, vcpu, cause);
|
||||
curr_pc, va, vcpu, cause);
|
||||
if (er != EMULATE_DONE)
|
||||
goto done;
|
||||
|
||||
@@ -2317,7 +2314,6 @@ done:
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union mips_instruction inst;
|
||||
@@ -2333,14 +2329,14 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
|
||||
|
||||
switch (inst.r_format.opcode) {
|
||||
case cop0_op:
|
||||
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
|
||||
er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu);
|
||||
break;
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
case cache_op:
|
||||
++vcpu->stat.cache_exits;
|
||||
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
|
||||
er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
|
||||
er = kvm_mips_emulate_cache(inst, opc, cause, vcpu);
|
||||
break;
|
||||
#else
|
||||
case spec3_op:
|
||||
@@ -2348,7 +2344,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
|
||||
case cache6_op:
|
||||
++vcpu->stat.cache_exits;
|
||||
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
|
||||
er = kvm_mips_emulate_cache(inst, opc, cause, run,
|
||||
er = kvm_mips_emulate_cache(inst, opc, cause,
|
||||
vcpu);
|
||||
break;
|
||||
default:
|
||||
@@ -2388,7 +2384,6 @@ long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
|
||||
|
||||
enum emulation_result kvm_mips_emulate_syscall(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2423,7 +2418,6 @@ enum emulation_result kvm_mips_emulate_syscall(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2467,7 +2461,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2509,7 +2502,6 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2551,7 +2543,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2592,7 +2583,6 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2632,7 +2622,6 @@ enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2661,7 +2650,6 @@ enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2696,7 +2684,6 @@ enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2731,7 +2718,6 @@ enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2766,7 +2752,6 @@ enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2801,7 +2786,6 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2836,7 +2820,6 @@ enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2870,7 +2853,6 @@ enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -2959,12 +2941,12 @@ emulate_ri:
|
||||
* branch target), and pass the RI exception to the guest OS.
|
||||
*/
|
||||
vcpu->arch.pc = curr_pc;
|
||||
return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
|
||||
return kvm_mips_emulate_ri_exc(cause, opc, vcpu);
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run)
|
||||
enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
@@ -3107,7 +3089,6 @@ done:
|
||||
|
||||
static enum emulation_result kvm_mips_emulate_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
||||
@@ -3145,7 +3126,6 @@ static enum emulation_result kvm_mips_emulate_exc(u32 cause,
|
||||
|
||||
enum emulation_result kvm_mips_check_privilege(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
@@ -3227,7 +3207,7 @@ enum emulation_result kvm_mips_check_privilege(u32 cause,
|
||||
}
|
||||
|
||||
if (er == EMULATE_PRIV_FAIL)
|
||||
kvm_mips_emulate_exc(cause, opc, run, vcpu);
|
||||
kvm_mips_emulate_exc(cause, opc, vcpu);
|
||||
|
||||
return er;
|
||||
}
|
||||
@@ -3241,7 +3221,6 @@ enum emulation_result kvm_mips_check_privilege(u32 cause,
|
||||
*/
|
||||
enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu,
|
||||
bool write_fault)
|
||||
{
|
||||
@@ -3265,9 +3244,9 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
|
||||
KVM_ENTRYHI_ASID));
|
||||
if (index < 0) {
|
||||
if (exccode == EXCCODE_TLBL) {
|
||||
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu);
|
||||
} else if (exccode == EXCCODE_TLBS) {
|
||||
er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu);
|
||||
} else {
|
||||
kvm_err("%s: invalid exc code: %d\n", __func__,
|
||||
exccode);
|
||||
@@ -3282,10 +3261,10 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
|
||||
*/
|
||||
if (!TLB_IS_VALID(*tlb, va)) {
|
||||
if (exccode == EXCCODE_TLBL) {
|
||||
er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
|
||||
er = kvm_mips_emulate_tlbinv_ld(cause, opc,
|
||||
vcpu);
|
||||
} else if (exccode == EXCCODE_TLBS) {
|
||||
er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
|
||||
er = kvm_mips_emulate_tlbinv_st(cause, opc,
|
||||
vcpu);
|
||||
} else {
|
||||
kvm_err("%s: invalid exc code: %d\n", __func__,
|
||||
|
@@ -450,7 +450,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
int r = -EINTR;
|
||||
|
||||
vcpu_load(vcpu);
|
||||
@@ -459,11 +458,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (vcpu->mmio_needed) {
|
||||
if (!vcpu->mmio_is_write)
|
||||
kvm_mips_complete_mmio_load(vcpu, run);
|
||||
kvm_mips_complete_mmio_load(vcpu);
|
||||
vcpu->mmio_needed = 0;
|
||||
}
|
||||
|
||||
if (run->immediate_exit)
|
||||
if (vcpu->run->immediate_exit)
|
||||
goto out;
|
||||
|
||||
lose_fpu(1);
|
||||
@@ -480,7 +479,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
|
||||
|
||||
r = kvm_mips_callbacks->vcpu_run(run, vcpu);
|
||||
r = kvm_mips_callbacks->vcpu_run(vcpu);
|
||||
|
||||
trace_kvm_out(vcpu);
|
||||
guest_exit_irqoff();
|
||||
@@ -1236,7 +1235,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
* end up causing an exception to be delivered to the Guest
|
||||
* Kernel
|
||||
*/
|
||||
er = kvm_mips_check_privilege(cause, opc, run, vcpu);
|
||||
er = kvm_mips_check_privilege(cause, opc, vcpu);
|
||||
if (er == EMULATE_PRIV_FAIL) {
|
||||
goto skip_emul;
|
||||
} else if (er == EMULATE_FAIL) {
|
||||
@@ -1385,7 +1384,7 @@ skip_emul:
|
||||
*/
|
||||
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
|
||||
|
||||
kvm_mips_callbacks->vcpu_reenter(run, vcpu);
|
||||
kvm_mips_callbacks->vcpu_reenter(vcpu);
|
||||
|
||||
/*
|
||||
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
|
||||
|
@@ -25,41 +25,9 @@
|
||||
#define KVM_MMU_CACHE_MIN_PAGES 2
|
||||
#endif
|
||||
|
||||
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
|
||||
int min, int max)
|
||||
{
|
||||
void *page;
|
||||
|
||||
BUG_ON(max > KVM_NR_MEM_OBJS);
|
||||
if (cache->nobjs >= min)
|
||||
return 0;
|
||||
while (cache->nobjs < max) {
|
||||
page = (void *)__get_free_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
cache->objects[cache->nobjs++] = page;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
|
||||
{
|
||||
while (mc->nobjs)
|
||||
free_page((unsigned long)mc->objects[--mc->nobjs]);
|
||||
}
|
||||
|
||||
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
|
||||
{
|
||||
void *p;
|
||||
|
||||
BUG_ON(!mc || !mc->nobjs);
|
||||
p = mc->objects[--mc->nobjs];
|
||||
return p;
|
||||
}
|
||||
|
||||
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
|
||||
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -153,7 +121,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
|
||||
|
||||
if (!cache)
|
||||
return NULL;
|
||||
new_pmd = mmu_memory_cache_alloc(cache);
|
||||
new_pmd = kvm_mmu_memory_cache_alloc(cache);
|
||||
pmd_init((unsigned long)new_pmd,
|
||||
(unsigned long)invalid_pte_table);
|
||||
pud_populate(NULL, pud, new_pmd);
|
||||
@@ -164,7 +132,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
|
||||
|
||||
if (!cache)
|
||||
return NULL;
|
||||
new_pte = mmu_memory_cache_alloc(cache);
|
||||
new_pte = kvm_mmu_memory_cache_alloc(cache);
|
||||
clear_page(new_pte);
|
||||
pmd_populate_kernel(NULL, pmd, new_pte);
|
||||
}
|
||||
@@ -711,8 +679,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
|
||||
goto out;
|
||||
|
||||
/* We need a minimum of cached pages ready for page table creation */
|
||||
err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
|
||||
KVM_NR_MEM_OBJS);
|
||||
err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -796,8 +763,7 @@ static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
|
||||
int ret;
|
||||
|
||||
/* We need a minimum of cached pages ready for page table creation */
|
||||
ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
|
||||
KVM_NR_MEM_OBJS);
|
||||
ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
|
@@ -67,7 +67,6 @@ static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
|
||||
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
@@ -81,14 +80,14 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
* Unusable/no FPU in guest:
|
||||
* deliver guest COP1 Unusable Exception
|
||||
*/
|
||||
er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu);
|
||||
} else {
|
||||
/* Restore FPU state */
|
||||
kvm_own_fpu(vcpu);
|
||||
er = EMULATE_DONE;
|
||||
}
|
||||
} else {
|
||||
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_inst(cause, opc, vcpu);
|
||||
}
|
||||
|
||||
switch (er) {
|
||||
@@ -97,12 +96,12 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case EMULATE_FAIL:
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
case EMULATE_WAIT:
|
||||
run->exit_reason = KVM_EXIT_INTR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTR;
|
||||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
@@ -116,8 +115,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er;
|
||||
union mips_instruction inst;
|
||||
@@ -125,7 +123,7 @@ static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
|
||||
|
||||
/* A code fetch fault doesn't count as an MMIO */
|
||||
if (kvm_is_ifetch_fault(&vcpu->arch)) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
@@ -134,23 +132,22 @@ static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
|
||||
opc += 1;
|
||||
err = kvm_get_badinstr(opc, vcpu, &inst.word);
|
||||
if (err) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
/* Emulate the load */
|
||||
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
|
||||
er = kvm_mips_emulate_load(inst, cause, vcpu);
|
||||
if (er == EMULATE_FAIL) {
|
||||
kvm_err("Emulate load from MMIO space failed\n");
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
vcpu->run->exit_reason = KVM_EXIT_MMIO;
|
||||
}
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er;
|
||||
union mips_instruction inst;
|
||||
@@ -161,34 +158,33 @@ static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
|
||||
opc += 1;
|
||||
err = kvm_get_badinstr(opc, vcpu, &inst.word);
|
||||
if (err) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
/* Emulate the store */
|
||||
er = kvm_mips_emulate_store(inst, cause, run, vcpu);
|
||||
er = kvm_mips_emulate_store(inst, cause, vcpu);
|
||||
if (er == EMULATE_FAIL) {
|
||||
kvm_err("Emulate store to MMIO space failed\n");
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
vcpu->run->exit_reason = KVM_EXIT_MMIO;
|
||||
}
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
|
||||
static int kvm_mips_bad_access(u32 cause, u32 *opc,
|
||||
struct kvm_vcpu *vcpu, bool store)
|
||||
{
|
||||
if (store)
|
||||
return kvm_mips_bad_store(cause, opc, run, vcpu);
|
||||
return kvm_mips_bad_store(cause, opc, vcpu);
|
||||
else
|
||||
return kvm_mips_bad_load(cause, opc, run, vcpu);
|
||||
return kvm_mips_bad_load(cause, opc, vcpu);
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
@@ -212,12 +208,12 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
|
||||
* They would indicate stale host TLB entries.
|
||||
*/
|
||||
if (unlikely(index < 0)) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return RESUME_HOST;
|
||||
}
|
||||
tlb = vcpu->arch.guest_tlb + index;
|
||||
if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
@@ -226,23 +222,23 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
|
||||
* exception. Relay that on to the guest so it can handle it.
|
||||
*/
|
||||
if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
|
||||
kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
|
||||
kvm_mips_emulate_tlbmod(cause, opc, vcpu);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
|
||||
true))
|
||||
/* Not writable, needs handling as MMIO */
|
||||
return kvm_mips_bad_store(cause, opc, run, vcpu);
|
||||
return kvm_mips_bad_store(cause, opc, vcpu);
|
||||
return RESUME_GUEST;
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
|
||||
if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
|
||||
/* Not writable, needs handling as MMIO */
|
||||
return kvm_mips_bad_store(cause, opc, run, vcpu);
|
||||
return kvm_mips_bad_store(cause, opc, vcpu);
|
||||
return RESUME_GUEST;
|
||||
} else {
|
||||
/* host kernel addresses are all handled as MMIO */
|
||||
return kvm_mips_bad_store(cause, opc, run, vcpu);
|
||||
return kvm_mips_bad_store(cause, opc, vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -276,7 +272,7 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
|
||||
* into the shadow host TLB
|
||||
*/
|
||||
|
||||
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
|
||||
er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
@@ -289,14 +285,14 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
|
||||
* not expect to ever get them
|
||||
*/
|
||||
if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
|
||||
ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
|
||||
ret = kvm_mips_bad_access(cause, opc, vcpu, store);
|
||||
} else if (KVM_GUEST_KERNEL_MODE(vcpu)
|
||||
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
|
||||
/*
|
||||
* With EVA we may get a TLB exception instead of an address
|
||||
* error when the guest performs MMIO to KSeg1 addresses.
|
||||
*/
|
||||
ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
|
||||
ret = kvm_mips_bad_access(cause, opc, vcpu, store);
|
||||
} else {
|
||||
kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
store ? "ST" : "LD", cause, opc, badvaddr);
|
||||
@@ -320,7 +316,6 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
@@ -328,11 +323,11 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (KVM_GUEST_KERNEL_MODE(vcpu)
|
||||
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
|
||||
ret = kvm_mips_bad_store(cause, opc, run, vcpu);
|
||||
ret = kvm_mips_bad_store(cause, opc, vcpu);
|
||||
} else {
|
||||
kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
@@ -340,18 +335,17 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
|
||||
ret = kvm_mips_bad_load(cause, opc, run, vcpu);
|
||||
ret = kvm_mips_bad_load(cause, opc, vcpu);
|
||||
} else {
|
||||
kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
@@ -359,17 +353,16 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_syscall(cause, opc, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
@@ -377,17 +370,16 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_handle_ri(cause, opc, run, vcpu);
|
||||
er = kvm_mips_handle_ri(cause, opc, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
@@ -395,17 +387,16 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_bp_exc(cause, opc, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
@@ -413,17 +404,16 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_trap_exc(cause, opc, vcpu);
|
||||
if (er == EMULATE_DONE) {
|
||||
ret = RESUME_GUEST;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
@@ -431,17 +421,16 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu);
|
||||
if (er == EMULATE_DONE) {
|
||||
ret = RESUME_GUEST;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
@@ -449,17 +438,16 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu);
|
||||
if (er == EMULATE_DONE) {
|
||||
ret = RESUME_GUEST;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
@@ -474,7 +462,6 @@ static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
|
||||
static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
@@ -486,10 +473,10 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
||||
* No MSA in guest, or FPU enabled and not in FR=1 mode,
|
||||
* guest reserved instruction exception
|
||||
*/
|
||||
er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_ri_exc(cause, opc, vcpu);
|
||||
} else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
|
||||
/* MSA disabled by guest, guest MSA disabled exception */
|
||||
er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
|
||||
er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu);
|
||||
} else {
|
||||
/* Restore MSA/FPU state */
|
||||
kvm_own_msa(vcpu);
|
||||
@@ -502,7 +489,7 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case EMULATE_FAIL:
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
@@ -1184,8 +1171,7 @@ void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
|
||||
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
|
||||
@@ -1228,7 +1214,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
|
||||
check_mmu_context(mm);
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int r;
|
||||
@@ -1237,7 +1223,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
kvm_mips_deliver_interrupts(vcpu,
|
||||
kvm_read_c0_guest_cause(vcpu->arch.cop0));
|
||||
|
||||
kvm_trap_emul_vcpu_reenter(run, vcpu);
|
||||
kvm_trap_emul_vcpu_reenter(vcpu);
|
||||
|
||||
/*
|
||||
* We use user accessors to access guest memory, but we don't want to
|
||||
@@ -1255,7 +1241,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
kvm_mips_suspend_mm(cpu);
|
||||
|
||||
r = vcpu->arch.vcpu_run(run, vcpu);
|
||||
r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
|
||||
|
||||
/* We may have migrated while handling guest exits */
|
||||
cpu = smp_processor_id();
|
||||
|
@@ -874,7 +874,6 @@ static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
|
||||
static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
|
||||
u32 *opc, u32 cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@@ -1074,7 +1073,6 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
|
||||
|
||||
static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
|
||||
u32 *opc, u32 cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
@@ -1217,7 +1215,6 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
|
||||
{
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
union mips_instruction inst;
|
||||
int rd, rt, sel;
|
||||
int err;
|
||||
@@ -1233,12 +1230,12 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
|
||||
|
||||
switch (inst.r_format.opcode) {
|
||||
case cop0_op:
|
||||
er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
|
||||
er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
|
||||
break;
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
case cache_op:
|
||||
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
|
||||
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
|
||||
er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_LOONGSON64
|
||||
@@ -1251,7 +1248,7 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
|
||||
#ifdef CONFIG_CPU_MIPSR6
|
||||
case cache6_op:
|
||||
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
|
||||
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
|
||||
er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
|
||||
break;
|
||||
#endif
|
||||
case rdhwr_op:
|
||||
@@ -1553,7 +1550,6 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_FAIL;
|
||||
int ret = RESUME_GUEST;
|
||||
@@ -1581,7 +1577,7 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case EMULATE_FAIL:
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
@@ -1600,8 +1596,6 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
/*
|
||||
* If MSA not present or not exposed to guest or FR=0, the MSA operation
|
||||
* should have been treated as a reserved instruction!
|
||||
@@ -1612,7 +1606,7 @@ static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
||||
(read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
|
||||
!(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
|
||||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
@@ -1648,7 +1642,7 @@ static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Treat as MMIO */
|
||||
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
|
||||
er = kvm_mips_emulate_load(inst, cause, vcpu);
|
||||
if (er == EMULATE_FAIL) {
|
||||
kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
|
||||
opc, badvaddr);
|
||||
@@ -1695,7 +1689,7 @@ static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Treat as MMIO */
|
||||
er = kvm_mips_emulate_store(inst, cause, run, vcpu);
|
||||
er = kvm_mips_emulate_store(inst, cause, vcpu);
|
||||
if (er == EMULATE_FAIL) {
|
||||
kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
|
||||
opc, badvaddr);
|
||||
@@ -3242,7 +3236,7 @@ static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
|
||||
kvm_vz_flush_shadow_all(kvm);
|
||||
}
|
||||
|
||||
static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int preserve_guest_tlb;
|
||||
@@ -3258,7 +3252,7 @@ static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
kvm_vz_vcpu_load_wired(vcpu);
|
||||
}
|
||||
|
||||
static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int r;
|
||||
@@ -3271,7 +3265,7 @@ static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
kvm_vz_vcpu_load_tlb(vcpu, cpu);
|
||||
kvm_vz_vcpu_load_wired(vcpu);
|
||||
|
||||
r = vcpu->arch.vcpu_run(run, vcpu);
|
||||
r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
|
||||
|
||||
kvm_vz_vcpu_save_wired(vcpu);
|
||||
|
||||
|
Reference in New Issue
Block a user