perf: Rework the PMU methods
Replace pmu::{enable,disable,start,stop,unthrottle} with pmu::{add,del,start,stop}, all of which take a flags argument. The new interface extends the capability to stop a counter while keeping it scheduled on the PMU. We replace the throttled state with the generic stopped state. This also allows us to efficiently stop/start counters over certain code paths (like IRQ handlers). It also allows scheduling a counter without it starting, allowing for a generic frozen state (useful for rotating stopped counters). The stopped state is implemented in two different ways, depending on how the architecture implemented the throttled state: 1) We disable the counter: a) the pmu has per-counter enable bits, we flip that b) we program a NOP event, preserving the counter state 2) We store the counter state and ignore all read/overflow events Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:

committed by
Ingo Molnar

parent
fa407f35e0
commit
a4eaf7f146
@@ -583,7 +583,7 @@ static void x86_pmu_disable_all(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void x86_pmu_pmu_disable(struct pmu *pmu)
|
||||
static void x86_pmu_disable(struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
@@ -800,10 +800,10 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
|
||||
hwc->last_tag == cpuc->tags[i];
|
||||
}
|
||||
|
||||
static int x86_pmu_start(struct perf_event *event);
|
||||
static void x86_pmu_stop(struct perf_event *event);
|
||||
static void x86_pmu_start(struct perf_event *event, int flags);
|
||||
static void x86_pmu_stop(struct perf_event *event, int flags);
|
||||
|
||||
static void x86_pmu_pmu_enable(struct pmu *pmu)
|
||||
static void x86_pmu_enable(struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct perf_event *event;
|
||||
@@ -839,7 +839,14 @@ static void x86_pmu_pmu_enable(struct pmu *pmu)
|
||||
match_prev_assignment(hwc, cpuc, i))
|
||||
continue;
|
||||
|
||||
x86_pmu_stop(event);
|
||||
/*
|
||||
* Ensure we don't accidentally enable a stopped
|
||||
* counter simply because we rescheduled.
|
||||
*/
|
||||
if (hwc->state & PERF_HES_STOPPED)
|
||||
hwc->state |= PERF_HES_ARCH;
|
||||
|
||||
x86_pmu_stop(event, PERF_EF_UPDATE);
|
||||
}
|
||||
|
||||
for (i = 0; i < cpuc->n_events; i++) {
|
||||
@@ -851,7 +858,10 @@ static void x86_pmu_pmu_enable(struct pmu *pmu)
|
||||
else if (i < n_running)
|
||||
continue;
|
||||
|
||||
x86_pmu_start(event);
|
||||
if (hwc->state & PERF_HES_ARCH)
|
||||
continue;
|
||||
|
||||
x86_pmu_start(event, PERF_EF_RELOAD);
|
||||
}
|
||||
cpuc->n_added = 0;
|
||||
perf_events_lapic_init();
|
||||
@@ -952,15 +962,12 @@ static void x86_pmu_enable_event(struct perf_event *event)
|
||||
}
|
||||
|
||||
/*
|
||||
* activate a single event
|
||||
* Add a single event to the PMU.
|
||||
*
|
||||
* The event is added to the group of enabled events
|
||||
* but only if it can be scehduled with existing events.
|
||||
*
|
||||
* Called with PMU disabled. If successful and return value 1,
|
||||
* then guaranteed to call perf_enable() and hw_perf_enable()
|
||||
*/
|
||||
static int x86_pmu_enable(struct perf_event *event)
|
||||
static int x86_pmu_add(struct perf_event *event, int flags)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc;
|
||||
@@ -975,10 +982,14 @@ static int x86_pmu_enable(struct perf_event *event)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
||||
if (!(flags & PERF_EF_START))
|
||||
hwc->state |= PERF_HES_ARCH;
|
||||
|
||||
/*
|
||||
* If group events scheduling transaction was started,
|
||||
* skip the schedulability test here, it will be peformed
|
||||
* at commit time(->commit_txn) as a whole
|
||||
* at commit time (->commit_txn) as a whole
|
||||
*/
|
||||
if (cpuc->group_flag & PERF_EVENT_TXN)
|
||||
goto done_collect;
|
||||
@@ -1003,27 +1014,28 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int x86_pmu_start(struct perf_event *event)
|
||||
static void x86_pmu_start(struct perf_event *event, int flags)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
int idx = event->hw.idx;
|
||||
|
||||
if (idx == -1)
|
||||
return -EAGAIN;
|
||||
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
|
||||
return;
|
||||
|
||||
if (WARN_ON_ONCE(idx == -1))
|
||||
return;
|
||||
|
||||
if (flags & PERF_EF_RELOAD) {
|
||||
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
|
||||
x86_perf_event_set_period(event);
|
||||
}
|
||||
|
||||
event->hw.state = 0;
|
||||
|
||||
x86_perf_event_set_period(event);
|
||||
cpuc->events[idx] = event;
|
||||
__set_bit(idx, cpuc->active_mask);
|
||||
x86_pmu.enable(event);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void x86_pmu_unthrottle(struct perf_event *event)
|
||||
{
|
||||
int ret = x86_pmu_start(event);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
void perf_event_print_debug(void)
|
||||
@@ -1080,27 +1092,29 @@ void perf_event_print_debug(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void x86_pmu_stop(struct perf_event *event)
|
||||
static void x86_pmu_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
|
||||
if (!__test_and_clear_bit(idx, cpuc->active_mask))
|
||||
return;
|
||||
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
|
||||
x86_pmu.disable(event);
|
||||
cpuc->events[hwc->idx] = NULL;
|
||||
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
||||
hwc->state |= PERF_HES_STOPPED;
|
||||
}
|
||||
|
||||
x86_pmu.disable(event);
|
||||
|
||||
/*
|
||||
* Drain the remaining delta count out of a event
|
||||
* that we are disabling:
|
||||
*/
|
||||
x86_perf_event_update(event);
|
||||
|
||||
cpuc->events[idx] = NULL;
|
||||
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
|
||||
/*
|
||||
* Drain the remaining delta count out of a event
|
||||
* that we are disabling:
|
||||
*/
|
||||
x86_perf_event_update(event);
|
||||
hwc->state |= PERF_HES_UPTODATE;
|
||||
}
|
||||
}
|
||||
|
||||
static void x86_pmu_disable(struct perf_event *event)
|
||||
static void x86_pmu_del(struct perf_event *event, int flags)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
int i;
|
||||
@@ -1113,7 +1127,7 @@ static void x86_pmu_disable(struct perf_event *event)
|
||||
if (cpuc->group_flag & PERF_EVENT_TXN)
|
||||
return;
|
||||
|
||||
x86_pmu_stop(event);
|
||||
x86_pmu_stop(event, PERF_EF_UPDATE);
|
||||
|
||||
for (i = 0; i < cpuc->n_events; i++) {
|
||||
if (event == cpuc->event_list[i]) {
|
||||
@@ -1165,7 +1179,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 1, &data, regs))
|
||||
x86_pmu_stop(event);
|
||||
x86_pmu_stop(event, 0);
|
||||
}
|
||||
|
||||
if (handled)
|
||||
@@ -1605,15 +1619,17 @@ int x86_pmu_event_init(struct perf_event *event)
|
||||
}
|
||||
|
||||
static struct pmu pmu = {
|
||||
.pmu_enable = x86_pmu_pmu_enable,
|
||||
.pmu_disable = x86_pmu_pmu_disable,
|
||||
.pmu_enable = x86_pmu_enable,
|
||||
.pmu_disable = x86_pmu_disable,
|
||||
|
||||
.event_init = x86_pmu_event_init,
|
||||
.enable = x86_pmu_enable,
|
||||
.disable = x86_pmu_disable,
|
||||
|
||||
.add = x86_pmu_add,
|
||||
.del = x86_pmu_del,
|
||||
.start = x86_pmu_start,
|
||||
.stop = x86_pmu_stop,
|
||||
.read = x86_pmu_read,
|
||||
.unthrottle = x86_pmu_unthrottle,
|
||||
|
||||
.start_txn = x86_pmu_start_txn,
|
||||
.cancel_txn = x86_pmu_cancel_txn,
|
||||
.commit_txn = x86_pmu_commit_txn,
|
||||
|
Reference in New Issue
Block a user