arm64: KVM: Enable VHE support for :G/:H perf event modifiers

With VHE different exception levels are used between the host (EL2) and
guest (EL1) with a shared exception level for userpace (EL0). We can take
advantage of this and use the PMU's exception level filtering to avoid
enabling/disabling counters in the world-switch code. Instead we just
modify the counter type to include or exclude EL0 at vcpu_{load,put} time.

We also ensure that trapped PMU system register writes do not re-enable
EL0 when reconfiguring the backing perf events.

This approach completely avoids blackout windows seen with !VHE.

Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Andrew Murray <andrew.murray@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
Andrew Murray
2019-04-09 20:22:15 +01:00
committed by Marc Zyngier
parent 3d91befbb3
commit 435e53fb5e
6 changed files with 103 additions and 4 deletions

View File

@@ -8,11 +8,19 @@
#include <asm/kvm_hyp.h>
/*
* Given the exclude_{host,guest} attributes, determine if we are going
* to need to switch counters at guest entry/exit.
* Given the perf event attributes and system type, determine
* if we are going to need to switch counters at guest entry/exit.
*/
static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
{
/**
* With VHE the guest kernel runs at EL1 and the host at EL2,
* where user (EL0) is excluded then we have no reason to switch
* counters.
*/
if (has_vhe() && attr->exclude_user)
return false;
/* Only switch if attributes are different */
return (attr->exclude_host != attr->exclude_guest);
}
@@ -82,3 +90,79 @@ void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
if (pmu->events_host)
write_sysreg(pmu->events_host, pmcntenset_el0);
}
/*
* Modify ARMv8 PMU events to include EL0 counting
*/
static void kvm_vcpu_pmu_enable_el0(unsigned long events)
{
u64 typer;
u32 counter;
for_each_set_bit(counter, &events, 32) {
write_sysreg(counter, pmselr_el0);
isb();
typer = read_sysreg(pmxevtyper_el0) & ~ARMV8_PMU_EXCLUDE_EL0;
write_sysreg(typer, pmxevtyper_el0);
isb();
}
}
/*
* Modify ARMv8 PMU events to exclude EL0 counting
*/
static void kvm_vcpu_pmu_disable_el0(unsigned long events)
{
u64 typer;
u32 counter;
for_each_set_bit(counter, &events, 32) {
write_sysreg(counter, pmselr_el0);
isb();
typer = read_sysreg(pmxevtyper_el0) | ARMV8_PMU_EXCLUDE_EL0;
write_sysreg(typer, pmxevtyper_el0);
isb();
}
}
/*
* On VHE ensure that only guest events have EL0 counting enabled
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
host_ctxt = vcpu->arch.host_cpu_context;
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
kvm_vcpu_pmu_enable_el0(events_guest);
kvm_vcpu_pmu_disable_el0(events_host);
}
/*
* On VHE ensure that only host events have EL0 counting enabled
*/
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
host_ctxt = vcpu->arch.host_cpu_context;
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
kvm_vcpu_pmu_enable_el0(events_host);
kvm_vcpu_pmu_disable_el0(events_guest);
}