KVM: X86: implement the logic for spinlock optimization
get_cpl requires vcpu_load, so we must cache the result (whether the vcpu was preempted when its cpl=0) in kvm_vcpu_arch. Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:

committed by
Paolo Bonzini

parent
199b5763d3
commit
de63ad4cf4
@@ -3749,7 +3749,10 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
|
||||
|
||||
static int pause_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
kvm_vcpu_on_spin(&svm->vcpu, false);
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
bool in_kernel = (svm_get_cpl(vcpu) == 0);
|
||||
|
||||
kvm_vcpu_on_spin(vcpu, in_kernel);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user