Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "One of the largest releases for KVM... Hardly any generic changes, but lots of architecture-specific updates. ARM: - VHE support so that we can run the kernel at EL2 on ARMv8.1 systems - PMU support for guests - 32bit world switch rewritten in C - various optimizations to the vgic save/restore code. PPC: - enabled KVM-VFIO integration ("VFIO device") - optimizations to speed up IPIs between vcpus - in-kernel handling of IOMMU hypercalls - support for dynamic DMA windows (DDW). s390: - provide the floating point registers via sync regs; - separated instruction vs. data accesses - dirty log improvements for huge guests - bugfixes and documentation improvements. x86: - Hyper-V VMBus hypercall userspace exit - alternative implementation of lowest-priority interrupts using vector hashing (for better VT-d posted interrupt support) - fixed guest debugging with nested virtualizations - improved interrupt tracking in the in-kernel IOAPIC - generic infrastructure for tracking writes to guest memory - currently its only use is to speedup the legacy shadow paging (pre-EPT) case, but in the future it will be used for virtual GPUs as well - much cleanup (LAPIC, kvmclock, MMU, PIT), including ubsan fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (217 commits) KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch KVM: x86: disable MPX if host did not enable MPX XSAVE features arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit arm64: KVM: vgic-v3: Reset LRs at boot time arm64: KVM: vgic-v3: Do not save an LR known to be empty arm64: KVM: vgic-v3: Save maintenance interrupt state only if required arm64: KVM: vgic-v3: Avoid accessing ICH registers KVM: arm/arm64: vgic-v2: Make GICD_SGIR quicker to hit KVM: arm/arm64: vgic-v2: Only wipe LRs on vcpu exit KVM: arm/arm64: vgic-v2: Reset LRs at boot time KVM: arm/arm64: vgic-v2: Do not save an LR known to be empty KVM: arm/arm64: vgic-v2: Move GICH_ELRSR saving to its own function KVM: arm/arm64: vgic-v2: Save maintenance interrupt state only if required KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers KVM: s390: allocate only one DMA page per VM KVM: s390: enable STFLE interpretation only if enabled for the guest KVM: s390: wake up when the VCPU cpu timer expires KVM: s390: step the VCPU timer while in enabled wait KVM: s390: protect VCPU cpu timer with a seqcount KVM: s390: step VCPU cpu timer during kvm_run ioctl ...
This commit is contained in:
@@ -34,6 +34,11 @@ static struct timecounter *timecounter;
|
||||
static struct workqueue_struct *wqueue;
|
||||
static unsigned int host_vtimer_irq;
|
||||
|
||||
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.timer_cpu.active_cleared_last = false;
|
||||
}
|
||||
|
||||
static cycle_t kvm_phys_timer_read(void)
|
||||
{
|
||||
return timecounter->cc->read(timecounter->cc);
|
||||
@@ -130,6 +135,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
|
||||
|
||||
BUG_ON(!vgic_initialized(vcpu->kvm));
|
||||
|
||||
timer->active_cleared_last = false;
|
||||
timer->irq.level = new_level;
|
||||
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq,
|
||||
timer->irq.level);
|
||||
@@ -245,10 +251,35 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
else
|
||||
phys_active = false;
|
||||
|
||||
/*
|
||||
* We want to avoid hitting the (re)distributor as much as
|
||||
* possible, as this is a potentially expensive MMIO access
|
||||
* (not to mention locks in the irq layer), and a solution for
|
||||
* this is to cache the "active" state in memory.
|
||||
*
|
||||
* Things to consider: we cannot cache an "active set" state,
|
||||
* because the HW can change this behind our back (it becomes
|
||||
* "clear" in the HW). We must then restrict the caching to
|
||||
* the "clear" state.
|
||||
*
|
||||
* The cache is invalidated on:
|
||||
* - vcpu put, indicating that the HW cannot be trusted to be
|
||||
* in a sane state on the next vcpu load,
|
||||
* - any change in the interrupt state
|
||||
*
|
||||
* Usage conditions:
|
||||
* - cached value is "active clear"
|
||||
* - value to be programmed is "active clear"
|
||||
*/
|
||||
if (timer->active_cleared_last && !phys_active)
|
||||
return;
|
||||
|
||||
ret = irq_set_irqchip_state(timer->map->irq,
|
||||
IRQCHIP_STATE_ACTIVE,
|
||||
phys_active);
|
||||
WARN_ON(ret);
|
||||
|
||||
timer->active_cleared_last = !phys_active;
|
||||
}
|
||||
|
||||
/**
|
||||
|
69
virt/kvm/arm/hyp/timer-sr.c
Normal file
69
virt/kvm/arm/hyp/timer-sr.c
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (C) 2012-2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <clocksource/arm_arch_timer.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
/* vcpu is already in the HYP VA space */
|
||||
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
u64 val;
|
||||
|
||||
if (kvm->arch.timer.enabled) {
|
||||
timer->cntv_ctl = read_sysreg_el0(cntv_ctl);
|
||||
timer->cntv_cval = read_sysreg_el0(cntv_cval);
|
||||
}
|
||||
|
||||
/* Disable the virtual timer */
|
||||
write_sysreg_el0(0, cntv_ctl);
|
||||
|
||||
/* Allow physical timer/counter access for the host */
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
|
||||
/* Clear cntvoff for the host */
|
||||
write_sysreg(0, cntvoff_el2);
|
||||
}
|
||||
|
||||
void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Disallow physical timer access for the guest
|
||||
* Physical counter access is allowed
|
||||
*/
|
||||
val = read_sysreg(cnthctl_el2);
|
||||
val &= ~CNTHCTL_EL1PCEN;
|
||||
val |= CNTHCTL_EL1PCTEN;
|
||||
write_sysreg(val, cnthctl_el2);
|
||||
|
||||
if (kvm->arch.timer.enabled) {
|
||||
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
|
||||
write_sysreg_el0(timer->cntv_cval, cntv_cval);
|
||||
isb();
|
||||
write_sysreg_el0(timer->cntv_ctl, cntv_ctl);
|
||||
}
|
||||
}
|
170
virt/kvm/arm/hyp/vgic-v2-sr.c
Normal file
170
virt/kvm/arm/hyp/vgic-v2-sr.c
Normal file
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
* Copyright (C) 2012-2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
|
||||
void __iomem *base)
|
||||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
|
||||
u32 eisr0, eisr1;
|
||||
int i;
|
||||
bool expect_mi;
|
||||
|
||||
expect_mi = !!(cpu_if->vgic_hcr & GICH_HCR_UIE);
|
||||
|
||||
for (i = 0; i < nr_lr; i++) {
|
||||
if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
|
||||
continue;
|
||||
|
||||
expect_mi |= (!(cpu_if->vgic_lr[i] & GICH_LR_HW) &&
|
||||
(cpu_if->vgic_lr[i] & GICH_LR_EOI));
|
||||
}
|
||||
|
||||
if (expect_mi) {
|
||||
cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
|
||||
|
||||
if (cpu_if->vgic_misr & GICH_MISR_EOI) {
|
||||
eisr0 = readl_relaxed(base + GICH_EISR0);
|
||||
if (unlikely(nr_lr > 32))
|
||||
eisr1 = readl_relaxed(base + GICH_EISR1);
|
||||
else
|
||||
eisr1 = 0;
|
||||
} else {
|
||||
eisr0 = eisr1 = 0;
|
||||
}
|
||||
} else {
|
||||
cpu_if->vgic_misr = 0;
|
||||
eisr0 = eisr1 = 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
|
||||
#else
|
||||
cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
|
||||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
|
||||
u32 elrsr0, elrsr1;
|
||||
|
||||
elrsr0 = readl_relaxed(base + GICH_ELRSR0);
|
||||
if (unlikely(nr_lr > 32))
|
||||
elrsr1 = readl_relaxed(base + GICH_ELRSR1);
|
||||
else
|
||||
elrsr1 = 0;
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
|
||||
#else
|
||||
cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
|
||||
{
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_lr; i++) {
|
||||
if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
|
||||
continue;
|
||||
|
||||
if (cpu_if->vgic_elrsr & (1UL << i)) {
|
||||
cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
|
||||
continue;
|
||||
}
|
||||
|
||||
cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
|
||||
writel_relaxed(0, base + GICH_LR0 + (i * 4));
|
||||
}
|
||||
}
|
||||
|
||||
/* vcpu is already in the HYP VA space */
|
||||
void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
struct vgic_dist *vgic = &kvm->arch.vgic;
|
||||
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
|
||||
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
|
||||
|
||||
if (vcpu->arch.vgic_cpu.live_lrs) {
|
||||
cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
|
||||
|
||||
save_maint_int_state(vcpu, base);
|
||||
save_elrsr(vcpu, base);
|
||||
save_lrs(vcpu, base);
|
||||
|
||||
writel_relaxed(0, base + GICH_HCR);
|
||||
|
||||
vcpu->arch.vgic_cpu.live_lrs = 0;
|
||||
} else {
|
||||
cpu_if->vgic_eisr = 0;
|
||||
cpu_if->vgic_elrsr = ~0UL;
|
||||
cpu_if->vgic_misr = 0;
|
||||
cpu_if->vgic_apr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* vcpu is already in the HYP VA space */
|
||||
void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||
struct vgic_dist *vgic = &kvm->arch.vgic;
|
||||
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
|
||||
int i, nr_lr;
|
||||
u64 live_lrs = 0;
|
||||
|
||||
if (!base)
|
||||
return;
|
||||
|
||||
nr_lr = vcpu->arch.vgic_cpu.nr_lr;
|
||||
|
||||
for (i = 0; i < nr_lr; i++)
|
||||
if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
|
||||
live_lrs |= 1UL << i;
|
||||
|
||||
if (live_lrs) {
|
||||
writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
|
||||
writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
|
||||
for (i = 0; i < nr_lr; i++) {
|
||||
if (!(live_lrs & (1UL << i)))
|
||||
continue;
|
||||
|
||||
writel_relaxed(cpu_if->vgic_lr[i],
|
||||
base + GICH_LR0 + (i * 4));
|
||||
}
|
||||
}
|
||||
|
||||
writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
|
||||
vcpu->arch.vgic_cpu.live_lrs = live_lrs;
|
||||
}
|
529
virt/kvm/arm/pmu.c
Normal file
529
virt/kvm/arm/pmu.c
Normal file
@@ -0,0 +1,529 @@
|
||||
/*
|
||||
* Copyright (C) 2015 Linaro Ltd.
|
||||
* Author: Shannon Zhao <shannon.zhao@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <kvm/arm_pmu.h>
|
||||
#include <kvm/arm_vgic.h>
|
||||
|
||||
/**
|
||||
* kvm_pmu_get_counter_value - get PMU counter value
|
||||
* @vcpu: The vcpu pointer
|
||||
* @select_idx: The counter index
|
||||
*/
|
||||
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||
{
|
||||
u64 counter, reg, enabled, running;
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
|
||||
|
||||
reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
|
||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
|
||||
counter = vcpu_sys_reg(vcpu, reg);
|
||||
|
||||
/* The real counter value is equal to the value of counter register plus
|
||||
* the value perf event counts.
|
||||
*/
|
||||
if (pmc->perf_event)
|
||||
counter += perf_event_read_value(pmc->perf_event, &enabled,
|
||||
&running);
|
||||
|
||||
return counter & pmc->bitmask;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_set_counter_value - set PMU counter value
|
||||
* @vcpu: The vcpu pointer
|
||||
* @select_idx: The counter index
|
||||
* @val: The counter value
|
||||
*/
|
||||
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
|
||||
{
|
||||
u64 reg;
|
||||
|
||||
reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
|
||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
|
||||
vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_stop_counter - stop PMU counter
|
||||
* @pmc: The PMU counter pointer
|
||||
*
|
||||
* If this counter has been configured to monitor some event, release it here.
|
||||
*/
|
||||
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
|
||||
{
|
||||
u64 counter, reg;
|
||||
|
||||
if (pmc->perf_event) {
|
||||
counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
|
||||
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
|
||||
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
|
||||
vcpu_sys_reg(vcpu, reg) = counter;
|
||||
perf_event_disable(pmc->perf_event);
|
||||
perf_event_release_kernel(pmc->perf_event);
|
||||
pmc->perf_event = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_vcpu_reset - reset pmu state for cpu
|
||||
* @vcpu: The vcpu pointer
|
||||
*
|
||||
*/
|
||||
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
|
||||
pmu->pmc[i].idx = i;
|
||||
pmu->pmc[i].bitmask = 0xffffffffUL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
|
||||
* @vcpu: The vcpu pointer
|
||||
*
|
||||
*/
|
||||
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
struct kvm_pmc *pmc = &pmu->pmc[i];
|
||||
|
||||
if (pmc->perf_event) {
|
||||
perf_event_disable(pmc->perf_event);
|
||||
perf_event_release_kernel(pmc->perf_event);
|
||||
pmc->perf_event = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
|
||||
|
||||
val &= ARMV8_PMU_PMCR_N_MASK;
|
||||
if (val == 0)
|
||||
return BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
else
|
||||
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_enable_counter - enable selected PMU counter
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENSET register
|
||||
*
|
||||
* Call perf_event_enable to start counting the perf event
|
||||
*/
|
||||
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
int i;
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = &pmu->pmc[i];
|
||||
if (pmc->perf_event) {
|
||||
perf_event_enable(pmc->perf_event);
|
||||
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
kvm_debug("fail to enable perf event\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_disable_counter - disable selected PMU counter
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCNTENCLR register
|
||||
*
|
||||
* Call perf_event_disable to stop counting the perf event
|
||||
*/
|
||||
void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
int i;
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc;
|
||||
|
||||
if (!val)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
|
||||
pmc = &pmu->pmc[i];
|
||||
if (pmc->perf_event)
|
||||
perf_event_disable(pmc->perf_event);
|
||||
}
|
||||
}
|
||||
|
||||
static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 reg = 0;
|
||||
|
||||
if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
|
||||
reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
|
||||
reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
|
||||
reg &= kvm_pmu_valid_counter_mask(vcpu);
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_overflow_set - set PMU overflow interrupt
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMOVSSET register
|
||||
*/
|
||||
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
u64 reg;
|
||||
|
||||
if (val == 0)
|
||||
return;
|
||||
|
||||
vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
|
||||
reg = kvm_pmu_overflow_status(vcpu);
|
||||
if (reg != 0)
|
||||
kvm_vcpu_kick(vcpu);
|
||||
}
|
||||
|
||||
static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
bool overflow;
|
||||
|
||||
if (!kvm_arm_pmu_v3_ready(vcpu))
|
||||
return;
|
||||
|
||||
overflow = !!kvm_pmu_overflow_status(vcpu);
|
||||
if (pmu->irq_level != overflow) {
|
||||
pmu->irq_level = overflow;
|
||||
kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
|
||||
pmu->irq_num, overflow);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_flush_hwstate - flush pmu state to cpu
|
||||
* @vcpu: The vcpu pointer
|
||||
*
|
||||
* Check if the PMU has overflowed while we were running in the host, and inject
|
||||
* an interrupt if that was the case.
|
||||
*/
|
||||
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_pmu_update_state(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_sync_hwstate - sync pmu state from cpu
|
||||
* @vcpu: The vcpu pointer
|
||||
*
|
||||
* Check if the PMU has overflowed while we were running in the guest, and
|
||||
* inject an interrupt if that was the case.
|
||||
*/
|
||||
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_pmu_update_state(vcpu);
|
||||
}
|
||||
|
||||
static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_pmu *pmu;
|
||||
struct kvm_vcpu_arch *vcpu_arch;
|
||||
|
||||
pmc -= pmc->idx;
|
||||
pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
|
||||
vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
|
||||
return container_of(vcpu_arch, struct kvm_vcpu, arch);
|
||||
}
|
||||
|
||||
/**
|
||||
* When perf event overflows, call kvm_pmu_overflow_set to set overflow status.
|
||||
*/
|
||||
static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
|
||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||
int idx = pmc->idx;
|
||||
|
||||
kvm_pmu_overflow_set(vcpu, BIT(idx));
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_software_increment - do software increment
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMSWINC register
|
||||
*/
|
||||
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
int i;
|
||||
u64 type, enable, reg;
|
||||
|
||||
if (val == 0)
|
||||
return;
|
||||
|
||||
enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
|
||||
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
|
||||
if (!(val & BIT(i)))
|
||||
continue;
|
||||
type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
|
||||
& ARMV8_PMU_EVTYPE_EVENT;
|
||||
if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
|
||||
&& (enable & BIT(i))) {
|
||||
reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
|
||||
reg = lower_32_bits(reg);
|
||||
vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
|
||||
if (!reg)
|
||||
kvm_pmu_overflow_set(vcpu, BIT(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_handle_pmcr - handle PMCR register
|
||||
* @vcpu: The vcpu pointer
|
||||
* @val: the value guest writes to PMCR register
|
||||
*/
|
||||
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
{
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc;
|
||||
u64 mask;
|
||||
int i;
|
||||
|
||||
mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
if (val & ARMV8_PMU_PMCR_E) {
|
||||
kvm_pmu_enable_counter(vcpu,
|
||||
vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
|
||||
} else {
|
||||
kvm_pmu_disable_counter(vcpu, mask);
|
||||
}
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_C)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_P) {
|
||||
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
|
||||
kvm_pmu_set_counter_value(vcpu, i, 0);
|
||||
}
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_LC) {
|
||||
pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
|
||||
pmc->bitmask = 0xffffffffffffffffUL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||
{
|
||||
return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
|
||||
(vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_pmu_set_counter_event_type - set selected counter to monitor some event
|
||||
* @vcpu: The vcpu pointer
|
||||
* @data: The data guest writes to PMXEVTYPER_EL0
|
||||
* @select_idx: The number of selected counter
|
||||
*
|
||||
* When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
|
||||
* event with given hardware event number. Here we call perf_event API to
|
||||
* emulate this action and create a kernel perf event for it.
|
||||
*/
|
||||
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
|
||||
u64 select_idx)
|
||||
{
|
||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
||||
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
|
||||
struct perf_event *event;
|
||||
struct perf_event_attr attr;
|
||||
u64 eventsel, counter;
|
||||
|
||||
kvm_pmu_stop_counter(vcpu, pmc);
|
||||
eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
|
||||
|
||||
/* Software increment event does't need to be backed by a perf event */
|
||||
if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
|
||||
return;
|
||||
|
||||
memset(&attr, 0, sizeof(struct perf_event_attr));
|
||||
attr.type = PERF_TYPE_RAW;
|
||||
attr.size = sizeof(attr);
|
||||
attr.pinned = 1;
|
||||
attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
|
||||
attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
|
||||
attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
|
||||
attr.exclude_hv = 1; /* Don't count EL2 events */
|
||||
attr.exclude_host = 1; /* Don't count host events */
|
||||
attr.config = eventsel;
|
||||
|
||||
counter = kvm_pmu_get_counter_value(vcpu, select_idx);
|
||||
/* The initial sample period (overflow count) of an event. */
|
||||
attr.sample_period = (-counter) & pmc->bitmask;
|
||||
|
||||
event = perf_event_create_kernel_counter(&attr, -1, current,
|
||||
kvm_pmu_perf_overflow, pmc);
|
||||
if (IS_ERR(event)) {
|
||||
pr_err_once("kvm: pmu event creation failed %ld\n",
|
||||
PTR_ERR(event));
|
||||
return;
|
||||
}
|
||||
|
||||
pmc->perf_event = event;
|
||||
}
|
||||
|
||||
bool kvm_arm_support_pmu_v3(void)
|
||||
{
|
||||
/*
|
||||
* Check if HW_PERF_EVENTS are supported by checking the number of
|
||||
* hardware performance counters. This could ensure the presence of
|
||||
* a physical PMU and CONFIG_PERF_EVENT is selected.
|
||||
*/
|
||||
return (perf_num_counters() > 0);
|
||||
}
|
||||
|
||||
static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_arm_support_pmu_v3())
|
||||
return -ENODEV;
|
||||
|
||||
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) ||
|
||||
!kvm_arm_pmu_irq_initialized(vcpu))
|
||||
return -ENXIO;
|
||||
|
||||
if (kvm_arm_pmu_v3_ready(vcpu))
|
||||
return -EBUSY;
|
||||
|
||||
kvm_pmu_vcpu_reset(vcpu);
|
||||
vcpu->arch.pmu.ready = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi)
|
||||
{
|
||||
int i;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (!kvm_arm_pmu_irq_initialized(vcpu))
|
||||
continue;
|
||||
|
||||
if (is_ppi) {
|
||||
if (vcpu->arch.pmu.irq_num != irq)
|
||||
return false;
|
||||
} else {
|
||||
if (vcpu->arch.pmu.irq_num == irq)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
|
||||
{
|
||||
switch (attr->attr) {
|
||||
case KVM_ARM_VCPU_PMU_V3_IRQ: {
|
||||
int __user *uaddr = (int __user *)(long)attr->addr;
|
||||
int irq;
|
||||
|
||||
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
|
||||
return -ENODEV;
|
||||
|
||||
if (get_user(irq, uaddr))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* The PMU overflow interrupt could be a PPI or SPI, but for one
|
||||
* VM the interrupt type must be same for each vcpu. As a PPI,
|
||||
* the interrupt number is the same for all vcpus, while as an
|
||||
* SPI it must be a separate number per vcpu.
|
||||
*/
|
||||
if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs ||
|
||||
!irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS))
|
||||
return -EINVAL;
|
||||
|
||||
if (kvm_arm_pmu_irq_initialized(vcpu))
|
||||
return -EBUSY;
|
||||
|
||||
kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
|
||||
vcpu->arch.pmu.irq_num = irq;
|
||||
return 0;
|
||||
}
|
||||
case KVM_ARM_VCPU_PMU_V3_INIT:
|
||||
return kvm_arm_pmu_v3_init(vcpu);
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
|
||||
{
|
||||
switch (attr->attr) {
|
||||
case KVM_ARM_VCPU_PMU_V3_IRQ: {
|
||||
int __user *uaddr = (int __user *)(long)attr->addr;
|
||||
int irq;
|
||||
|
||||
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
|
||||
return -ENODEV;
|
||||
|
||||
if (!kvm_arm_pmu_irq_initialized(vcpu))
|
||||
return -ENXIO;
|
||||
|
||||
irq = vcpu->arch.pmu.irq_num;
|
||||
return put_user(irq, uaddr);
|
||||
}
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
|
||||
{
|
||||
switch (attr->attr) {
|
||||
case KVM_ARM_VCPU_PMU_V3_IRQ:
|
||||
case KVM_ARM_VCPU_PMU_V3_INIT:
|
||||
if (kvm_arm_support_pmu_v3() &&
|
||||
test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
}
|
@@ -320,6 +320,11 @@ static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
static const struct vgic_io_range vgic_dist_ranges[] = {
|
||||
{
|
||||
.base = GIC_DIST_SOFTINT,
|
||||
.len = 4,
|
||||
.handle_mmio = handle_mmio_sgi_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_CTRL,
|
||||
.len = 12,
|
||||
@@ -386,11 +391,6 @@ static const struct vgic_io_range vgic_dist_ranges[] = {
|
||||
.bits_per_irq = 2,
|
||||
.handle_mmio = handle_mmio_cfg_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_SOFTINT,
|
||||
.len = 4,
|
||||
.handle_mmio = handle_mmio_sgi_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_SGI_PENDING_CLEAR,
|
||||
.len = VGIC_NR_SGIS,
|
||||
|
@@ -176,6 +176,15 @@ static const struct vgic_ops vgic_v2_ops = {
|
||||
|
||||
static struct vgic_params vgic_v2_params;
|
||||
|
||||
static void vgic_cpu_init_lrs(void *params)
|
||||
{
|
||||
struct vgic_params *vgic = params;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vgic->nr_lr; i++)
|
||||
writel_relaxed(0, vgic->vctrl_base + GICH_LR0 + (i * 4));
|
||||
}
|
||||
|
||||
/**
|
||||
* vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
|
||||
* @node: pointer to the DT node
|
||||
@@ -257,6 +266,9 @@ int vgic_v2_probe(struct device_node *vgic_node,
|
||||
|
||||
vgic->type = VGIC_V2;
|
||||
vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS;
|
||||
|
||||
on_each_cpu(vgic_cpu_init_lrs, vgic, 1);
|
||||
|
||||
*ops = &vgic_v2_ops;
|
||||
*params = vgic;
|
||||
goto out;
|
||||
|
@@ -42,7 +42,7 @@ static u32 ich_vtr_el2;
|
||||
static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
|
||||
{
|
||||
struct vgic_lr lr_desc;
|
||||
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)];
|
||||
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
|
||||
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
|
||||
lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
|
||||
@@ -106,7 +106,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
|
||||
lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT;
|
||||
}
|
||||
|
||||
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)] = lr_val;
|
||||
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = lr_val;
|
||||
|
||||
if (!(lr_desc.state & LR_STATE_MASK))
|
||||
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
|
||||
@@ -216,6 +216,11 @@ static const struct vgic_ops vgic_v3_ops = {
|
||||
|
||||
static struct vgic_params vgic_v3_params;
|
||||
|
||||
static void vgic_cpu_init_lrs(void *params)
|
||||
{
|
||||
kvm_call_hyp(__vgic_v3_init_lrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
|
||||
* @node: pointer to the DT node
|
||||
@@ -284,6 +289,8 @@ int vgic_v3_probe(struct device_node *vgic_node,
|
||||
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
|
||||
vcpu_res.start, vgic->maint_irq);
|
||||
|
||||
on_each_cpu(vgic_cpu_init_lrs, vgic, 1);
|
||||
|
||||
*ops = &vgic_v3_ops;
|
||||
*params = vgic;
|
||||
|
||||
|
@@ -109,8 +109,8 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
|
||||
/* cancel outstanding work queue item */
|
||||
while (!list_empty(&vcpu->async_pf.queue)) {
|
||||
struct kvm_async_pf *work =
|
||||
list_entry(vcpu->async_pf.queue.next,
|
||||
typeof(*work), queue);
|
||||
list_first_entry(&vcpu->async_pf.queue,
|
||||
typeof(*work), queue);
|
||||
list_del(&work->queue);
|
||||
|
||||
#ifdef CONFIG_KVM_ASYNC_PF_SYNC
|
||||
@@ -127,8 +127,8 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
|
||||
spin_lock(&vcpu->async_pf.lock);
|
||||
while (!list_empty(&vcpu->async_pf.done)) {
|
||||
struct kvm_async_pf *work =
|
||||
list_entry(vcpu->async_pf.done.next,
|
||||
typeof(*work), link);
|
||||
list_first_entry(&vcpu->async_pf.done,
|
||||
typeof(*work), link);
|
||||
list_del(&work->link);
|
||||
kmem_cache_free(async_pf_cache, work);
|
||||
}
|
||||
|
@@ -72,11 +72,11 @@ module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
|
||||
|
||||
/* Default doubles per-vcpu halt_poll_ns. */
|
||||
static unsigned int halt_poll_ns_grow = 2;
|
||||
module_param(halt_poll_ns_grow, int, S_IRUGO);
|
||||
module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR);
|
||||
|
||||
/* Default resets per-vcpu halt_poll_ns . */
|
||||
static unsigned int halt_poll_ns_shrink;
|
||||
module_param(halt_poll_ns_shrink, int, S_IRUGO);
|
||||
module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);
|
||||
|
||||
/*
|
||||
* Ordering of locks:
|
||||
@@ -619,13 +619,10 @@ void *kvm_kvzalloc(unsigned long size)
|
||||
|
||||
static void kvm_destroy_devices(struct kvm *kvm)
|
||||
{
|
||||
struct list_head *node, *tmp;
|
||||
struct kvm_device *dev, *tmp;
|
||||
|
||||
list_for_each_safe(node, tmp, &kvm->devices) {
|
||||
struct kvm_device *dev =
|
||||
list_entry(node, struct kvm_device, vm_node);
|
||||
|
||||
list_del(node);
|
||||
list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
|
||||
list_del(&dev->vm_node);
|
||||
dev->ops->destroy(dev);
|
||||
}
|
||||
}
|
||||
@@ -1436,11 +1433,17 @@ kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
{
|
||||
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
|
||||
|
||||
if (addr == KVM_HVA_ERR_RO_BAD)
|
||||
if (addr == KVM_HVA_ERR_RO_BAD) {
|
||||
if (writable)
|
||||
*writable = false;
|
||||
return KVM_PFN_ERR_RO_FAULT;
|
||||
}
|
||||
|
||||
if (kvm_is_error_hva(addr))
|
||||
if (kvm_is_error_hva(addr)) {
|
||||
if (writable)
|
||||
*writable = false;
|
||||
return KVM_PFN_NOSLOT;
|
||||
}
|
||||
|
||||
/* Do not map writable pfn in the readonly memslot. */
|
||||
if (writable && memslot_is_readonly(slot)) {
|
||||
@@ -1942,14 +1945,15 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
|
||||
|
||||
static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int old, val;
|
||||
unsigned int old, val, grow;
|
||||
|
||||
old = val = vcpu->halt_poll_ns;
|
||||
grow = READ_ONCE(halt_poll_ns_grow);
|
||||
/* 10us base */
|
||||
if (val == 0 && halt_poll_ns_grow)
|
||||
if (val == 0 && grow)
|
||||
val = 10000;
|
||||
else
|
||||
val *= halt_poll_ns_grow;
|
||||
val *= grow;
|
||||
|
||||
if (val > halt_poll_ns)
|
||||
val = halt_poll_ns;
|
||||
@@ -1960,13 +1964,14 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int old, val;
|
||||
unsigned int old, val, shrink;
|
||||
|
||||
old = val = vcpu->halt_poll_ns;
|
||||
if (halt_poll_ns_shrink == 0)
|
||||
shrink = READ_ONCE(halt_poll_ns_shrink);
|
||||
if (shrink == 0)
|
||||
val = 0;
|
||||
else
|
||||
val /= halt_poll_ns_shrink;
|
||||
val /= shrink;
|
||||
|
||||
vcpu->halt_poll_ns = val;
|
||||
trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
|
||||
|
Reference in New Issue
Block a user