Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "First batch of KVM changes for 4.1 The most interesting bit here is irqfd/ioeventfd support for ARM and ARM64. Summary: ARM/ARM64: fixes for live migration, irqfd and ioeventfd support (enabling vhost, too), page aging s390: interrupt handling rework, allowing to inject all local interrupts via new ioctl and to get/set the full local irq state for migration and introspection. New ioctls to access memory by virtual address, and to get/set the guest storage keys. SIMD support. MIPS: FPU and MIPS SIMD Architecture (MSA) support. Includes some patches from Ralf Baechle's MIPS tree. x86: bugfixes (notably for pvclock, the others are small) and cleanups. Another small latency improvement for the TSC deadline timer" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (146 commits) KVM: use slowpath for cross page cached accesses kvm: mmu: lazy collapse small sptes into large sptes KVM: x86: Clear CR2 on VCPU reset KVM: x86: DR0-DR3 are not clear on reset KVM: x86: BSP in MSR_IA32_APICBASE is writable KVM: x86: simplify kvm_apic_map KVM: x86: avoid logical_map when it is invalid KVM: x86: fix mixed APIC mode broadcast KVM: x86: use MDA for interrupt matching kvm/ppc/mpic: drop unused IRQ_testbit KVM: nVMX: remove unnecessary double caching of MAXPHYADDR KVM: nVMX: checks for address bits beyond MAXPHYADDR on VM-entry KVM: x86: cache maxphyaddr CPUID leaf in struct kvm_vcpu KVM: vmx: pass error code with internal error #2 x86: vdso: fix pvclock races with task migration KVM: remove kvm_read_hva and kvm_read_hva_atomic KVM: x86: optimize delivery of TSC deadline timer interrupt KVM: x86: extract blocking logic from __vcpu_run kvm: x86: fix x86 eflags fixed bit KVM: s390: migrate vcpu interrupt state ...
This commit is contained in:
@@ -133,6 +133,28 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
|
||||
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
|
||||
}
|
||||
|
||||
/* The logical map is definitely wrong if we have multiple
|
||||
* modes at the same time. (Physical map is always right.)
|
||||
*/
|
||||
static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map)
|
||||
{
|
||||
return !(map->mode & (map->mode - 1));
|
||||
}
|
||||
|
||||
static inline void
|
||||
apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid)
|
||||
{
|
||||
unsigned lid_bits;
|
||||
|
||||
BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4);
|
||||
BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8);
|
||||
BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16);
|
||||
lid_bits = map->mode;
|
||||
|
||||
*cid = dest_id >> lid_bits;
|
||||
*lid = dest_id & ((1 << lid_bits) - 1);
|
||||
}
|
||||
|
||||
static void recalculate_apic_map(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_apic_map *new, *old = NULL;
|
||||
@@ -146,48 +168,6 @@ static void recalculate_apic_map(struct kvm *kvm)
|
||||
if (!new)
|
||||
goto out;
|
||||
|
||||
new->ldr_bits = 8;
|
||||
/* flat mode is default */
|
||||
new->cid_shift = 8;
|
||||
new->cid_mask = 0;
|
||||
new->lid_mask = 0xff;
|
||||
new->broadcast = APIC_BROADCAST;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
|
||||
if (!kvm_apic_present(vcpu))
|
||||
continue;
|
||||
|
||||
if (apic_x2apic_mode(apic)) {
|
||||
new->ldr_bits = 32;
|
||||
new->cid_shift = 16;
|
||||
new->cid_mask = new->lid_mask = 0xffff;
|
||||
new->broadcast = X2APIC_BROADCAST;
|
||||
} else if (kvm_apic_get_reg(apic, APIC_LDR)) {
|
||||
if (kvm_apic_get_reg(apic, APIC_DFR) ==
|
||||
APIC_DFR_CLUSTER) {
|
||||
new->cid_shift = 4;
|
||||
new->cid_mask = 0xf;
|
||||
new->lid_mask = 0xf;
|
||||
} else {
|
||||
new->cid_shift = 8;
|
||||
new->cid_mask = 0;
|
||||
new->lid_mask = 0xff;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* All APICs have to be configured in the same mode by an OS.
|
||||
* We take advatage of this while building logical id loockup
|
||||
* table. After reset APICs are in software disabled mode, so if
|
||||
* we find apic with different setting we assume this is the mode
|
||||
* OS wants all apics to be in; build lookup table accordingly.
|
||||
*/
|
||||
if (kvm_apic_sw_enabled(apic))
|
||||
break;
|
||||
}
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
u16 cid, lid;
|
||||
@@ -198,11 +178,25 @@ static void recalculate_apic_map(struct kvm *kvm)
|
||||
|
||||
aid = kvm_apic_id(apic);
|
||||
ldr = kvm_apic_get_reg(apic, APIC_LDR);
|
||||
cid = apic_cluster_id(new, ldr);
|
||||
lid = apic_logical_id(new, ldr);
|
||||
|
||||
if (aid < ARRAY_SIZE(new->phys_map))
|
||||
new->phys_map[aid] = apic;
|
||||
|
||||
if (apic_x2apic_mode(apic)) {
|
||||
new->mode |= KVM_APIC_MODE_X2APIC;
|
||||
} else if (ldr) {
|
||||
ldr = GET_APIC_LOGICAL_ID(ldr);
|
||||
if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
|
||||
new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
|
||||
else
|
||||
new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
|
||||
}
|
||||
|
||||
if (!kvm_apic_logical_map_valid(new))
|
||||
continue;
|
||||
|
||||
apic_logical_id(new, ldr, &cid, &lid);
|
||||
|
||||
if (lid && cid < ARRAY_SIZE(new->logical_map))
|
||||
new->logical_map[cid][ffs(lid) - 1] = apic;
|
||||
}
|
||||
@@ -588,15 +582,23 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
|
||||
apic_update_ppr(apic);
|
||||
}
|
||||
|
||||
static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
|
||||
static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
|
||||
{
|
||||
return dest == (apic_x2apic_mode(apic) ?
|
||||
X2APIC_BROADCAST : APIC_BROADCAST);
|
||||
if (apic_x2apic_mode(apic))
|
||||
return mda == X2APIC_BROADCAST;
|
||||
|
||||
return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST;
|
||||
}
|
||||
|
||||
static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
|
||||
static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
|
||||
{
|
||||
return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest);
|
||||
if (kvm_apic_broadcast(apic, mda))
|
||||
return true;
|
||||
|
||||
if (apic_x2apic_mode(apic))
|
||||
return mda == kvm_apic_id(apic);
|
||||
|
||||
return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
|
||||
}
|
||||
|
||||
static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
|
||||
@@ -613,6 +615,7 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
|
||||
&& (logical_id & mda & 0xffff) != 0;
|
||||
|
||||
logical_id = GET_APIC_LOGICAL_ID(logical_id);
|
||||
mda = GET_APIC_DEST_FIELD(mda);
|
||||
|
||||
switch (kvm_apic_get_reg(apic, APIC_DFR)) {
|
||||
case APIC_DFR_FLAT:
|
||||
@@ -627,10 +630,27 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
|
||||
}
|
||||
}
|
||||
|
||||
/* KVM APIC implementation has two quirks
|
||||
* - dest always begins at 0 while xAPIC MDA has offset 24,
|
||||
* - IOxAPIC messages have to be delivered (directly) to x2APIC.
|
||||
*/
|
||||
static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source,
|
||||
struct kvm_lapic *target)
|
||||
{
|
||||
bool ipi = source != NULL;
|
||||
bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);
|
||||
|
||||
if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda)
|
||||
return X2APIC_BROADCAST;
|
||||
|
||||
return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
|
||||
}
|
||||
|
||||
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||
int short_hand, unsigned int dest, int dest_mode)
|
||||
{
|
||||
struct kvm_lapic *target = vcpu->arch.apic;
|
||||
u32 mda = kvm_apic_mda(dest, source, target);
|
||||
|
||||
apic_debug("target %p, source %p, dest 0x%x, "
|
||||
"dest_mode 0x%x, short_hand 0x%x\n",
|
||||
@@ -640,9 +660,9 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||
switch (short_hand) {
|
||||
case APIC_DEST_NOSHORT:
|
||||
if (dest_mode == APIC_DEST_PHYSICAL)
|
||||
return kvm_apic_match_physical_addr(target, dest);
|
||||
return kvm_apic_match_physical_addr(target, mda);
|
||||
else
|
||||
return kvm_apic_match_logical_addr(target, dest);
|
||||
return kvm_apic_match_logical_addr(target, mda);
|
||||
case APIC_DEST_SELF:
|
||||
return target == source;
|
||||
case APIC_DEST_ALLINC:
|
||||
@@ -664,6 +684,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
|
||||
struct kvm_lapic **dst;
|
||||
int i;
|
||||
bool ret = false;
|
||||
bool x2apic_ipi = src && apic_x2apic_mode(src);
|
||||
|
||||
*r = -1;
|
||||
|
||||
@@ -675,15 +696,15 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
|
||||
if (irq->shorthand)
|
||||
return false;
|
||||
|
||||
if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST))
|
||||
return false;
|
||||
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(kvm->arch.apic_map);
|
||||
|
||||
if (!map)
|
||||
goto out;
|
||||
|
||||
if (irq->dest_id == map->broadcast)
|
||||
goto out;
|
||||
|
||||
ret = true;
|
||||
|
||||
if (irq->dest_mode == APIC_DEST_PHYSICAL) {
|
||||
@@ -692,16 +713,20 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
|
||||
|
||||
dst = &map->phys_map[irq->dest_id];
|
||||
} else {
|
||||
u32 mda = irq->dest_id << (32 - map->ldr_bits);
|
||||
u16 cid = apic_cluster_id(map, mda);
|
||||
u16 cid;
|
||||
|
||||
if (!kvm_apic_logical_map_valid(map)) {
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
|
||||
|
||||
if (cid >= ARRAY_SIZE(map->logical_map))
|
||||
goto out;
|
||||
|
||||
dst = map->logical_map[cid];
|
||||
|
||||
bitmap = apic_logical_id(map, mda);
|
||||
|
||||
if (irq->delivery_mode == APIC_DM_LOWEST) {
|
||||
int l = -1;
|
||||
for_each_set_bit(i, &bitmap, 16) {
|
||||
@@ -1037,7 +1062,7 @@ static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
|
||||
addr < apic->base_address + LAPIC_MMIO_LENGTH;
|
||||
}
|
||||
|
||||
static int apic_mmio_read(struct kvm_io_device *this,
|
||||
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
|
||||
gpa_t address, int len, void *data)
|
||||
{
|
||||
struct kvm_lapic *apic = to_lapic(this);
|
||||
@@ -1357,7 +1382,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int apic_mmio_write(struct kvm_io_device *this,
|
||||
static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
|
||||
gpa_t address, int len, const void *data)
|
||||
{
|
||||
struct kvm_lapic *apic = to_lapic(this);
|
||||
@@ -1497,8 +1522,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!kvm_vcpu_is_bsp(apic->vcpu))
|
||||
value &= ~MSR_IA32_APICBASE_BSP;
|
||||
vcpu->arch.apic_base = value;
|
||||
|
||||
/* update jump label if enable bit changes */
|
||||
|
Reference in New Issue
Block a user