KVM: x86: preparatory changes for APICv cleanups
Add return value to __kvm_apic_update_irr/kvm_apic_update_irr. Move vmx_sync_pir_to_irr around. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
@@ -341,7 +341,7 @@ static int find_highest_vector(void *bitmap)
|
|||||||
vec >= 0; vec -= APIC_VECTORS_PER_REG) {
|
vec >= 0; vec -= APIC_VECTORS_PER_REG) {
|
||||||
reg = bitmap + REG_POS(vec);
|
reg = bitmap + REG_POS(vec);
|
||||||
if (*reg)
|
if (*reg)
|
||||||
return fls(*reg) - 1 + vec;
|
return __fls(*reg) + vec;
|
||||||
}
|
}
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
@@ -361,27 +361,36 @@ static u8 count_vectors(void *bitmap)
|
|||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kvm_apic_update_irr(u32 *pir, void *regs)
|
int __kvm_apic_update_irr(u32 *pir, void *regs)
|
||||||
{
|
{
|
||||||
u32 i, pir_val;
|
u32 i, vec;
|
||||||
|
u32 pir_val, irr_val;
|
||||||
|
int max_irr = -1;
|
||||||
|
|
||||||
for (i = 0; i <= 7; i++) {
|
for (i = vec = 0; i <= 7; i++, vec += 32) {
|
||||||
pir_val = READ_ONCE(pir[i]);
|
pir_val = READ_ONCE(pir[i]);
|
||||||
|
irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
|
||||||
if (pir_val) {
|
if (pir_val) {
|
||||||
pir_val = xchg(&pir[i], 0);
|
irr_val |= xchg(&pir[i], 0);
|
||||||
*((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
|
*((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
|
||||||
}
|
}
|
||||||
|
if (irr_val)
|
||||||
|
max_irr = __fls(irr_val) + vec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return max_irr;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
|
EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
|
||||||
|
|
||||||
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
|
int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
|
||||||
{
|
{
|
||||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||||
|
int max_irr;
|
||||||
|
|
||||||
__kvm_apic_update_irr(pir, apic->regs);
|
max_irr = __kvm_apic_update_irr(pir, apic->regs);
|
||||||
|
|
||||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||||
|
return max_irr;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
|
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
|
||||||
|
|
||||||
|
|||||||
@@ -71,8 +71,8 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
|
|||||||
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||||
int short_hand, unsigned int dest, int dest_mode);
|
int short_hand, unsigned int dest, int dest_mode);
|
||||||
|
|
||||||
void __kvm_apic_update_irr(u32 *pir, void *regs);
|
int __kvm_apic_update_irr(u32 *pir, void *regs);
|
||||||
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
|
int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
|
||||||
void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
|
void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
|
||||||
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
|
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
|
||||||
struct dest_map *dest_map);
|
struct dest_map *dest_map);
|
||||||
|
|||||||
@@ -5057,22 +5057,6 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
|
|||||||
kvm_vcpu_kick(vcpu);
|
kvm_vcpu_kick(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
||||||
|
|
||||||
if (!pi_test_on(&vmx->pi_desc))
|
|
||||||
return;
|
|
||||||
|
|
||||||
pi_clear_on(&vmx->pi_desc);
|
|
||||||
/*
|
|
||||||
* IOMMU can write to PIR.ON, so the barrier matters even on UP.
|
|
||||||
* But on x86 this is just a compiler barrier anyway.
|
|
||||||
*/
|
|
||||||
smp_mb__after_atomic();
|
|
||||||
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
|
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
|
||||||
* will not change in the lifetime of the guest.
|
* will not change in the lifetime of the guest.
|
||||||
@@ -8738,6 +8722,22 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
|
||||||
|
if (!pi_test_on(&vmx->pi_desc))
|
||||||
|
return;
|
||||||
|
|
||||||
|
pi_clear_on(&vmx->pi_desc);
|
||||||
|
/*
|
||||||
|
* IOMMU can write to PIR.ON, so the barrier matters even on UP.
|
||||||
|
* But on x86 this is just a compiler barrier anyway.
|
||||||
|
*/
|
||||||
|
smp_mb__after_atomic();
|
||||||
|
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
|
||||||
|
}
|
||||||
|
|
||||||
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||||
{
|
{
|
||||||
if (!kvm_vcpu_apicv_active(vcpu))
|
if (!kvm_vcpu_apicv_active(vcpu))
|
||||||
|
|||||||
Reference in New Issue
Block a user