Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "ARM: - support for chained PMU counters in guests - improved SError handling - handle Neoverse N1 erratum #1349291 - allow side-channel mitigation status to be migrated - standardise most AArch64 system register accesses to msr_s/mrs_s - fix host MPIDR corruption on 32bit - selftests ckleanups x86: - PMU event {white,black}listing - ability for the guest to disable host-side interrupt polling - fixes for enlightened VMCS (Hyper-V pv nested virtualization), - new hypercall to yield to IPI target - support for passing cstate MSRs through to the guest - lots of cleanups and optimizations Generic: - Some txt->rST conversions for the documentation" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (128 commits) Documentation: virtual: Add toctree hooks Documentation: kvm: Convert cpuid.txt to .rst Documentation: virtual: Convert paravirt_ops.txt to .rst KVM: x86: Unconditionally enable irqs in guest context KVM: x86: PMU Event Filter kvm: x86: Fix -Wmissing-prototypes warnings KVM: Properly check if "page" is valid in kvm_vcpu_unmap KVM: arm/arm64: Initialise host's MPIDRs by reading the actual register KVM: LAPIC: Retry tune per-vCPU timer_advance_ns if adaptive tuning goes insane kvm: LAPIC: write down valid APIC registers KVM: arm64: Migrate _elx sysreg accessors to msr_s/mrs_s KVM: doc: Add API documentation on the KVM_REG_ARM_WORKAROUNDS register KVM: arm/arm64: Add save/restore support for firmware workaround state arm64: KVM: Propagate full Spectre v2 workaround state to KVM guests KVM: arm/arm64: Support chained PMU counters KVM: arm/arm64: Remove pmc->bitmask KVM: arm/arm64: Re-create event when setting counter value KVM: arm/arm64: Extract duplicated code to own function KVM: arm/arm64: Rename kvm_pmu_{enable/disable}_counter functions KVM: LAPIC: ARBPRI is a reserved register for x2APIC ...
This commit is contained in:
@@ -19,6 +19,9 @@
|
||||
#include "lapic.h"
|
||||
#include "pmu.h"
|
||||
|
||||
/* This keeps the total size of the filter under 4k. */
|
||||
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 63
|
||||
|
||||
/* NOTE:
|
||||
* - Each perf counter is defined as "struct kvm_pmc";
|
||||
* - There are two types of perf counters: general purpose (gp) and fixed.
|
||||
@@ -141,6 +144,10 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
{
|
||||
unsigned config, type = PERF_TYPE_RAW;
|
||||
u8 event_select, unit_mask;
|
||||
struct kvm *kvm = pmc->vcpu->kvm;
|
||||
struct kvm_pmu_event_filter *filter;
|
||||
int i;
|
||||
bool allow_event = true;
|
||||
|
||||
if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
|
||||
printk_once("kvm pmu: pin control bit is ignored\n");
|
||||
@@ -152,6 +159,22 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
|
||||
return;
|
||||
|
||||
filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
|
||||
if (filter) {
|
||||
for (i = 0; i < filter->nevents; i++)
|
||||
if (filter->events[i] ==
|
||||
(eventsel & AMD64_RAW_EVENT_MASK_NB))
|
||||
break;
|
||||
if (filter->action == KVM_PMU_EVENT_ALLOW &&
|
||||
i == filter->nevents)
|
||||
allow_event = false;
|
||||
if (filter->action == KVM_PMU_EVENT_DENY &&
|
||||
i < filter->nevents)
|
||||
allow_event = false;
|
||||
}
|
||||
if (!allow_event)
|
||||
return;
|
||||
|
||||
event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
|
||||
unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
||||
|
||||
@@ -348,3 +371,43 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_pmu_reset(vcpu);
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
|
||||
{
|
||||
struct kvm_pmu_event_filter tmp, *filter;
|
||||
size_t size;
|
||||
int r;
|
||||
|
||||
if (copy_from_user(&tmp, argp, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
|
||||
if (tmp.action != KVM_PMU_EVENT_ALLOW &&
|
||||
tmp.action != KVM_PMU_EVENT_DENY)
|
||||
return -EINVAL;
|
||||
|
||||
if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
|
||||
return -E2BIG;
|
||||
|
||||
size = struct_size(filter, events, tmp.nevents);
|
||||
filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
|
||||
if (!filter)
|
||||
return -ENOMEM;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(filter, argp, size))
|
||||
goto cleanup;
|
||||
|
||||
/* Ensure nevents can't be changed between the user copies. */
|
||||
*filter = tmp;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
rcu_swap_protected(kvm->arch.pmu_event_filter, filter,
|
||||
mutex_is_locked(&kvm->lock));
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
synchronize_srcu_expedited(&kvm->srcu);
|
||||
r = 0;
|
||||
cleanup:
|
||||
kfree(filter);
|
||||
return r;
|
||||
}
|
||||
|
Reference in New Issue
Block a user