Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "One NULL pointer dereference, and two fixes for regressions introduced during the merge window. The rest are fixes for MIPS, s390 and nested VMX" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: kvm: x86: Check memopp before dereference (CVE-2016-8630) kvm: nVMX: VMCLEAR an active shadow VMCS after last use KVM: x86: drop TSC offsetting kvm_x86_ops to fix KVM_GET/SET_CLOCK KVM: x86: fix wbinvd_dirty_mask use-after-free kvm/x86: Show WRMSR data is in hex kvm: nVMX: Fix kernel panics induced by illegal INVEPT/INVVPID types KVM: document lock orders KVM: fix OOPS on flush_work KVM: s390: Fix STHYI buffer alignment for diag224 KVM: MIPS: Precalculate MMIO load resume PC KVM: MIPS: Make ERET handle ERL before EXL KVM: MIPS: Fix lazy user ASID regenerate for SMP
This commit is contained in:
@@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
|
||||
/* Host KSEG0 address of the EI/DI offset */
|
||||
void *kseg0_commpage;
|
||||
|
||||
u32 io_gpr; /* GPR used as IO source/target */
|
||||
/* Resume PC after MMIO completion */
|
||||
unsigned long io_pc;
|
||||
/* GPR used as IO source/target */
|
||||
u32 io_gpr;
|
||||
|
||||
struct hrtimer comparecount_timer;
|
||||
/* Count timer control KVM register */
|
||||
@@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
|
||||
/* Bitmask of pending exceptions to be cleared */
|
||||
unsigned long pending_exceptions_clr;
|
||||
|
||||
u32 pending_load_cause;
|
||||
|
||||
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
|
||||
unsigned long preempt_entryhi;
|
||||
|
||||
|
@@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
|
||||
if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
|
||||
kvm_clear_c0_guest_status(cop0, ST0_ERL);
|
||||
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
|
||||
} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
|
||||
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
|
||||
kvm_read_c0_guest_epc(cop0));
|
||||
kvm_clear_c0_guest_status(cop0, ST0_EXL);
|
||||
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
|
||||
|
||||
} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
|
||||
kvm_clear_c0_guest_status(cop0, ST0_ERL);
|
||||
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
|
||||
} else {
|
||||
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
|
||||
vcpu->arch.pc);
|
||||
@@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DO_MMIO;
|
||||
unsigned long curr_pc;
|
||||
u32 op, rt;
|
||||
u32 bytes;
|
||||
|
||||
rt = inst.i_format.rt;
|
||||
op = inst.i_format.opcode;
|
||||
|
||||
vcpu->arch.pending_load_cause = cause;
|
||||
/*
|
||||
* Find the resume PC now while we have safe and easy access to the
|
||||
* prior branch instruction, and save it for
|
||||
* kvm_mips_complete_mmio_load() to restore later.
|
||||
*/
|
||||
curr_pc = vcpu->arch.pc;
|
||||
er = update_pc(vcpu, cause);
|
||||
if (er == EMULATE_FAIL)
|
||||
return er;
|
||||
vcpu->arch.io_pc = vcpu->arch.pc;
|
||||
vcpu->arch.pc = curr_pc;
|
||||
|
||||
vcpu->arch.io_gpr = rt;
|
||||
|
||||
switch (op) {
|
||||
@@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
goto done;
|
||||
}
|
||||
|
||||
er = update_pc(vcpu, vcpu->arch.pending_load_cause);
|
||||
if (er == EMULATE_FAIL)
|
||||
return er;
|
||||
/* Restore saved resume PC */
|
||||
vcpu->arch.pc = vcpu->arch.io_pc;
|
||||
|
||||
switch (run->mmio.len) {
|
||||
case 4:
|
||||
@@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
}
|
||||
|
||||
if (vcpu->arch.pending_load_cause & CAUSEF_BD)
|
||||
kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
|
||||
vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
|
||||
vcpu->mmio_needed);
|
||||
|
||||
done:
|
||||
return er;
|
||||
}
|
||||
|
@@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
int cpu = smp_processor_id();
|
||||
int i, cpu = smp_processor_id();
|
||||
unsigned int gasid;
|
||||
|
||||
/*
|
||||
@@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
|
||||
vcpu);
|
||||
vcpu->arch.guest_user_asid[cpu] =
|
||||
vcpu->arch.guest_user_mm.context.asid[cpu];
|
||||
for_each_possible_cpu(i)
|
||||
if (i != cpu)
|
||||
vcpu->arch.guest_user_asid[cpu] = 0;
|
||||
vcpu->arch.last_user_gasid = gasid;
|
||||
}
|
||||
}
|
||||
|
@@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
||||
if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
|
||||
asid_version_mask(cpu)) {
|
||||
u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
|
||||
KVM_ENTRYHI_ASID;
|
||||
|
||||
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
|
||||
vcpu->arch.guest_user_asid[cpu] =
|
||||
vcpu->arch.guest_user_mm.context.asid[cpu];
|
||||
vcpu->arch.last_user_gasid = gasid;
|
||||
newasid++;
|
||||
|
||||
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
|
||||
|
Reference in New Issue
Block a user