perf evsel: Rename perf_evsel__is_*() to evsel__is*()
As those are 'struct evsel' methods, not part of tools/lib/perf/, aka libperf, to whom the perf_ prefix belongs. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
@@ -216,9 +216,9 @@ void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
|
||||
|
||||
count *= counter->scale;
|
||||
|
||||
if (perf_evsel__is_clock(counter))
|
||||
if (evsel__is_clock(counter))
|
||||
update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
|
||||
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
|
||||
else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
|
||||
update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
|
||||
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
|
||||
update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
|
||||
@@ -241,25 +241,25 @@ void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
|
||||
else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
|
||||
update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
|
||||
ctx, cpu, count);
|
||||
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
|
||||
else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
|
||||
update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
|
||||
ctx, cpu, count);
|
||||
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
|
||||
else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
|
||||
update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
|
||||
ctx, cpu, count);
|
||||
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
|
||||
else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
|
||||
update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
|
||||
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
|
||||
else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
|
||||
update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
|
||||
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
|
||||
else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
|
||||
update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
|
||||
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
|
||||
else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
|
||||
update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
|
||||
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
|
||||
else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
|
||||
update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
|
||||
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
|
||||
else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
|
||||
update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
|
||||
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
|
||||
else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
|
||||
update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
|
||||
else if (perf_stat_evsel__is(counter, SMI_NUM))
|
||||
update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
|
||||
@@ -833,7 +833,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
|
||||
struct metric_event *me;
|
||||
int num = 1;
|
||||
|
||||
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
|
||||
if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
|
||||
total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
|
||||
|
||||
if (total) {
|
||||
@@ -858,7 +858,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
|
||||
"stalled cycles per insn",
|
||||
ratio);
|
||||
}
|
||||
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
|
||||
} else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
|
||||
if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
|
||||
print_branch_misses(config, cpu, evsel, avg, out, st);
|
||||
else
|
||||
@@ -913,7 +913,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
|
||||
print_ll_cache_misses(config, cpu, evsel, avg, out, st);
|
||||
else
|
||||
print_metric(config, ctxp, NULL, NULL, "of all LL-cache hits", 0);
|
||||
} else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
|
||||
} else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
|
||||
total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
|
||||
|
||||
if (total)
|
||||
@@ -924,11 +924,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
|
||||
"of all cache refs", ratio);
|
||||
else
|
||||
print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
|
||||
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
|
||||
} else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
|
||||
print_stalled_cycles_frontend(config, cpu, evsel, avg, out, st);
|
||||
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
|
||||
} else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
|
||||
print_stalled_cycles_backend(config, cpu, evsel, avg, out, st);
|
||||
} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
|
||||
} else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
|
||||
total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
|
||||
|
||||
if (total) {
|
||||
@@ -979,7 +979,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
|
||||
ratio = total / avg;
|
||||
|
||||
print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio);
|
||||
} else if (perf_evsel__is_clock(evsel)) {
|
||||
} else if (evsel__is_clock(evsel)) {
|
||||
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
|
||||
print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
|
||||
avg / (ratio * evsel->scale));
|
||||
|
Reference in New Issue
Block a user