Merge branch 'kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm-queue
Conflicts: arch/powerpc/kvm/book3s_hv_rmhandlers.S arch/powerpc/kvm/booke.c
This commit is contained in:
@@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||
local_irq_enable();
|
||||
kvm_vcpu_block(vcpu);
|
||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
|
||||
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
|
||||
r = 1;
|
||||
@@ -682,34 +682,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret, s;
|
||||
struct debug_reg debug;
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
struct thread_fp_state fp;
|
||||
int fpexc_mode;
|
||||
#endif
|
||||
|
||||
if (!vcpu->arch.sane) {
|
||||
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
s = kvmppc_prepare_to_enter(vcpu);
|
||||
if (s <= 0) {
|
||||
local_irq_enable();
|
||||
ret = s;
|
||||
goto out;
|
||||
}
|
||||
/* interrupts now hard-disabled */
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
/* Save userspace FPU state in stack */
|
||||
enable_kernel_fp();
|
||||
fp = current->thread.fp_state;
|
||||
fpexc_mode = current->thread.fpexc_mode;
|
||||
|
||||
/* Restore guest FPU state to thread */
|
||||
memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
|
||||
sizeof(vcpu->arch.fpr));
|
||||
current->thread.fp_state.fpscr = vcpu->arch.fpscr;
|
||||
|
||||
/*
|
||||
* Since we can't trap on MSR_FP in GS-mode, we consider the guest
|
||||
@@ -728,6 +716,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
debug = current->thread.debug;
|
||||
current->thread.debug = vcpu->arch.shadow_dbg_reg;
|
||||
|
||||
vcpu->arch.pgdir = current->mm->pgd;
|
||||
kvmppc_fix_ee_before_entry();
|
||||
|
||||
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
|
||||
@@ -743,15 +732,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
kvmppc_save_guest_fp(vcpu);
|
||||
|
||||
vcpu->fpu_active = 0;
|
||||
|
||||
/* Save guest FPU state from thread */
|
||||
memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
|
||||
sizeof(vcpu->arch.fpr));
|
||||
vcpu->arch.fpscr = current->thread.fp_state.fpscr;
|
||||
|
||||
/* Restore userspace FPU state from stack */
|
||||
current->thread.fp_state = fp;
|
||||
current->thread.fpexc_mode = fpexc_mode;
|
||||
#endif
|
||||
|
||||
out:
|
||||
@@ -898,17 +878,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int s;
|
||||
int idx;
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
WARN_ON(local_paca->irq_happened != 0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We enter with interrupts disabled in hardware, but
|
||||
* we need to call hard_irq_disable anyway to ensure that
|
||||
* the software state is kept in sync.
|
||||
*/
|
||||
hard_irq_disable();
|
||||
|
||||
/* update before a new last_exit_type is rewritten */
|
||||
kvmppc_update_timing_stats(vcpu);
|
||||
|
||||
@@ -1217,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
* aren't already exiting to userspace for some other reason.
|
||||
*/
|
||||
if (!(r & RESUME_HOST)) {
|
||||
local_irq_disable();
|
||||
s = kvmppc_prepare_to_enter(vcpu);
|
||||
if (s <= 0) {
|
||||
local_irq_enable();
|
||||
if (s <= 0)
|
||||
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
|
||||
} else {
|
||||
else {
|
||||
/* interrupts now hard-disabled */
|
||||
kvmppc_fix_ee_before_entry();
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user