Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "Mostly tooling and PMU driver fixes, but also a number of late updates such as the reworking of the call-chain size limiting logic to make call-graph recording more robust, plus tooling side changes for the new 'backwards ring-buffer' extension to the perf ring-buffer" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits) perf record: Read from backward ring buffer perf record: Rename variable to make code clear perf record: Prevent reading invalid data in record__mmap_read perf evlist: Add API to pause/resume perf trace: Use the ptr->name beautifier as default for "filename" args perf trace: Use the fd->name beautifier as default for "fd" args perf report: Add srcline_from/to branch sort keys perf evsel: Record fd into perf_mmap perf evsel: Add overwrite attribute and check write_backward perf tools: Set buildid dir under symfs when --symfs is provided perf trace: Only auto set call-graph to "dwarf" when syscalls are being traced perf annotate: Sort list of recognised instructions perf annotate: Fix identification of ARM blt and bls instructions perf tools: Fix usage of max_stack sysctl perf callchain: Stop validating callchains by the max_stack sysctl perf trace: Fix exit_group() formatting perf top: Use machine->kptr_restrict_warned perf trace: Warn when trying to resolve kernel addresses with kptr_restrict=1 perf machine: Do not bail out if not managing to read ref reloc symbol perf/x86/intel/p4: Trival indentation fix, remove space ...
This commit is contained in:
@@ -354,9 +354,6 @@ static struct ins_ops nop_ops = {
|
||||
.scnprintf = nop__scnprintf,
|
||||
};
|
||||
|
||||
/*
|
||||
* Must be sorted by name!
|
||||
*/
|
||||
static struct ins instructions[] = {
|
||||
{ .name = "add", .ops = &mov_ops, },
|
||||
{ .name = "addl", .ops = &mov_ops, },
|
||||
@@ -372,8 +369,8 @@ static struct ins instructions[] = {
|
||||
{ .name = "bgt", .ops = &jump_ops, },
|
||||
{ .name = "bhi", .ops = &jump_ops, },
|
||||
{ .name = "bl", .ops = &call_ops, },
|
||||
{ .name = "blt", .ops = &jump_ops, },
|
||||
{ .name = "bls", .ops = &jump_ops, },
|
||||
{ .name = "blt", .ops = &jump_ops, },
|
||||
{ .name = "blx", .ops = &call_ops, },
|
||||
{ .name = "bne", .ops = &jump_ops, },
|
||||
#endif
|
||||
@@ -449,18 +446,39 @@ static struct ins instructions[] = {
|
||||
{ .name = "xbeginq", .ops = &jump_ops, },
|
||||
};
|
||||
|
||||
static int ins__cmp(const void *name, const void *insp)
|
||||
static int ins__key_cmp(const void *name, const void *insp)
|
||||
{
|
||||
const struct ins *ins = insp;
|
||||
|
||||
return strcmp(name, ins->name);
|
||||
}
|
||||
|
||||
static struct ins *ins__find(const char *name)
|
||||
static int ins__cmp(const void *a, const void *b)
|
||||
{
|
||||
const struct ins *ia = a;
|
||||
const struct ins *ib = b;
|
||||
|
||||
return strcmp(ia->name, ib->name);
|
||||
}
|
||||
|
||||
static void ins__sort(void)
|
||||
{
|
||||
const int nmemb = ARRAY_SIZE(instructions);
|
||||
|
||||
return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp);
|
||||
qsort(instructions, nmemb, sizeof(struct ins), ins__cmp);
|
||||
}
|
||||
|
||||
static struct ins *ins__find(const char *name)
|
||||
{
|
||||
const int nmemb = ARRAY_SIZE(instructions);
|
||||
static bool sorted;
|
||||
|
||||
if (!sorted) {
|
||||
ins__sort();
|
||||
sorted = true;
|
||||
}
|
||||
|
||||
return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__key_cmp);
|
||||
}
|
||||
|
||||
int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
|
||||
@@ -1122,7 +1140,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
|
||||
} else if (dso__is_kcore(dso)) {
|
||||
goto fallback;
|
||||
} else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
|
||||
strstr(command, "[kernel.kallsyms]") ||
|
||||
strstr(command, DSO__NAME_KALLSYMS) ||
|
||||
access(symfs_filename, R_OK)) {
|
||||
free(filename);
|
||||
fallback:
|
||||
|
@@ -256,7 +256,7 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
|
||||
size_t name_len;
|
||||
bool in_kernel = false;
|
||||
|
||||
if (!pos->hit)
|
||||
if (!pos->hit && !dso__is_vdso(pos))
|
||||
continue;
|
||||
|
||||
if (dso__is_vdso(pos)) {
|
||||
|
@@ -298,8 +298,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
|
||||
*/
|
||||
callchain_param.order = ORDER_CALLER;
|
||||
err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
|
||||
sample, NULL, NULL,
|
||||
sysctl_perf_event_max_stack);
|
||||
sample, NULL, NULL, PERF_MAX_STACK_DEPTH);
|
||||
if (err) {
|
||||
callchain_param.order = saved_order;
|
||||
return NULL;
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include "auxtrace.h"
|
||||
#include "util.h"
|
||||
#include "debug.h"
|
||||
#include "vdso.h"
|
||||
|
||||
char dso__symtab_origin(const struct dso *dso)
|
||||
{
|
||||
@@ -62,9 +63,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
|
||||
}
|
||||
break;
|
||||
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
|
||||
/* skip the locally configured cache if a symfs is given */
|
||||
if (symbol_conf.symfs[0] ||
|
||||
(dso__build_id_filename(dso, filename, size) == NULL))
|
||||
if (dso__build_id_filename(dso, filename, size) == NULL)
|
||||
ret = -1;
|
||||
break;
|
||||
|
||||
@@ -1169,7 +1168,7 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
|
||||
struct dso *pos;
|
||||
|
||||
list_for_each_entry(pos, head, node) {
|
||||
if (with_hits && !pos->hit)
|
||||
if (with_hits && !pos->hit && !dso__is_vdso(pos))
|
||||
continue;
|
||||
if (pos->has_build_id) {
|
||||
have_build_id = true;
|
||||
|
@@ -44,6 +44,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
|
||||
perf_evlist__set_maps(evlist, cpus, threads);
|
||||
fdarray__init(&evlist->pollfd, 64);
|
||||
evlist->workload.pid = -1;
|
||||
evlist->backward = false;
|
||||
}
|
||||
|
||||
struct perf_evlist *perf_evlist__new(void)
|
||||
@@ -679,6 +680,33 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < evlist->nr_mmaps; i++) {
|
||||
int fd = evlist->mmap[i].fd;
|
||||
int err;
|
||||
|
||||
if (fd < 0)
|
||||
continue;
|
||||
err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_evlist__pause(struct perf_evlist *evlist)
|
||||
{
|
||||
return perf_evlist__set_paused(evlist, true);
|
||||
}
|
||||
|
||||
int perf_evlist__resume(struct perf_evlist *evlist)
|
||||
{
|
||||
return perf_evlist__set_paused(evlist, false);
|
||||
}
|
||||
|
||||
/* When check_messup is true, 'end' must points to a good entry */
|
||||
static union perf_event *
|
||||
perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
|
||||
@@ -881,6 +909,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
|
||||
if (evlist->mmap[idx].base != NULL) {
|
||||
munmap(evlist->mmap[idx].base, evlist->mmap_len);
|
||||
evlist->mmap[idx].base = NULL;
|
||||
evlist->mmap[idx].fd = -1;
|
||||
atomic_set(&evlist->mmap[idx].refcnt, 0);
|
||||
}
|
||||
auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
|
||||
@@ -901,10 +930,14 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
|
||||
|
||||
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
|
||||
{
|
||||
int i;
|
||||
|
||||
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
|
||||
if (cpu_map__empty(evlist->cpus))
|
||||
evlist->nr_mmaps = thread_map__nr(evlist->threads);
|
||||
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
||||
for (i = 0; i < evlist->nr_mmaps; i++)
|
||||
evlist->mmap[i].fd = -1;
|
||||
return evlist->mmap != NULL ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -941,6 +974,7 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
|
||||
evlist->mmap[idx].base = NULL;
|
||||
return -1;
|
||||
}
|
||||
evlist->mmap[idx].fd = fd;
|
||||
|
||||
if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
|
||||
&mp->auxtrace_mp, evlist->mmap[idx].base, fd))
|
||||
|
@@ -28,6 +28,7 @@ struct record_opts;
|
||||
struct perf_mmap {
|
||||
void *base;
|
||||
int mask;
|
||||
int fd;
|
||||
atomic_t refcnt;
|
||||
u64 prev;
|
||||
struct auxtrace_mmap auxtrace_mmap;
|
||||
@@ -43,6 +44,7 @@ struct perf_evlist {
|
||||
bool overwrite;
|
||||
bool enabled;
|
||||
bool has_user_cpus;
|
||||
bool backward;
|
||||
size_t mmap_len;
|
||||
int id_pos;
|
||||
int is_pos;
|
||||
@@ -135,6 +137,8 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
|
||||
|
||||
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
|
||||
|
||||
int perf_evlist__pause(struct perf_evlist *evlist);
|
||||
int perf_evlist__resume(struct perf_evlist *evlist);
|
||||
int perf_evlist__open(struct perf_evlist *evlist);
|
||||
void perf_evlist__close(struct perf_evlist *evlist);
|
||||
|
||||
|
@@ -37,6 +37,7 @@ static struct {
|
||||
bool clockid;
|
||||
bool clockid_wrong;
|
||||
bool lbr_flags;
|
||||
bool write_backward;
|
||||
} perf_missing_features;
|
||||
|
||||
static clockid_t clockid;
|
||||
@@ -1376,6 +1377,8 @@ fallback_missing_features:
|
||||
if (perf_missing_features.lbr_flags)
|
||||
evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
|
||||
PERF_SAMPLE_BRANCH_NO_CYCLES);
|
||||
if (perf_missing_features.write_backward)
|
||||
evsel->attr.write_backward = false;
|
||||
retry_sample_id:
|
||||
if (perf_missing_features.sample_id_all)
|
||||
evsel->attr.sample_id_all = 0;
|
||||
@@ -1438,6 +1441,12 @@ retry_open:
|
||||
err = -EINVAL;
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
if (evsel->overwrite &&
|
||||
perf_missing_features.write_backward) {
|
||||
err = -EINVAL;
|
||||
goto out_close;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1500,6 +1509,10 @@ try_fallback:
|
||||
PERF_SAMPLE_BRANCH_NO_FLAGS))) {
|
||||
perf_missing_features.lbr_flags = true;
|
||||
goto fallback_missing_features;
|
||||
} else if (!perf_missing_features.write_backward &&
|
||||
evsel->attr.write_backward) {
|
||||
perf_missing_features.write_backward = true;
|
||||
goto fallback_missing_features;
|
||||
}
|
||||
|
||||
out_close:
|
||||
|
@@ -112,6 +112,7 @@ struct perf_evsel {
|
||||
bool tracking;
|
||||
bool per_pkg;
|
||||
bool precise_max;
|
||||
bool overwrite;
|
||||
/* parse modifier helper */
|
||||
int exclude_GH;
|
||||
int nr_members;
|
||||
|
@@ -117,6 +117,13 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
|
||||
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
|
||||
hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
|
||||
}
|
||||
|
||||
if (h->branch_info->srcline_from)
|
||||
hists__new_col_len(hists, HISTC_SRCLINE_FROM,
|
||||
strlen(h->branch_info->srcline_from));
|
||||
if (h->branch_info->srcline_to)
|
||||
hists__new_col_len(hists, HISTC_SRCLINE_TO,
|
||||
strlen(h->branch_info->srcline_to));
|
||||
}
|
||||
|
||||
if (h->mem_info) {
|
||||
@@ -1042,6 +1049,8 @@ void hist_entry__delete(struct hist_entry *he)
|
||||
if (he->branch_info) {
|
||||
map__zput(he->branch_info->from.map);
|
||||
map__zput(he->branch_info->to.map);
|
||||
free_srcline(he->branch_info->srcline_from);
|
||||
free_srcline(he->branch_info->srcline_to);
|
||||
zfree(&he->branch_info);
|
||||
}
|
||||
|
||||
|
@@ -52,6 +52,8 @@ enum hist_column {
|
||||
HISTC_MEM_IADDR_SYMBOL,
|
||||
HISTC_TRANSACTION,
|
||||
HISTC_CYCLES,
|
||||
HISTC_SRCLINE_FROM,
|
||||
HISTC_SRCLINE_TO,
|
||||
HISTC_TRACE,
|
||||
HISTC_NR_COLS, /* Last entry */
|
||||
};
|
||||
|
@@ -43,6 +43,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
|
||||
|
||||
machine->symbol_filter = NULL;
|
||||
machine->id_hdr_size = 0;
|
||||
machine->kptr_restrict_warned = false;
|
||||
machine->comm_exec = false;
|
||||
machine->kernel_start = 0;
|
||||
|
||||
@@ -709,7 +710,7 @@ static struct dso *machine__get_kernel(struct machine *machine)
|
||||
if (machine__is_host(machine)) {
|
||||
vmlinux_name = symbol_conf.vmlinux_name;
|
||||
if (!vmlinux_name)
|
||||
vmlinux_name = "[kernel.kallsyms]";
|
||||
vmlinux_name = DSO__NAME_KALLSYMS;
|
||||
|
||||
kernel = machine__findnew_kernel(machine, vmlinux_name,
|
||||
"[kernel]", DSO_TYPE_KERNEL);
|
||||
@@ -1135,10 +1136,10 @@ int machine__create_kernel_maps(struct machine *machine)
|
||||
{
|
||||
struct dso *kernel = machine__get_kernel(machine);
|
||||
const char *name;
|
||||
u64 addr = machine__get_running_kernel_start(machine, &name);
|
||||
u64 addr;
|
||||
int ret;
|
||||
|
||||
if (!addr || kernel == NULL)
|
||||
if (kernel == NULL)
|
||||
return -1;
|
||||
|
||||
ret = __machine__create_kernel_maps(machine, kernel);
|
||||
@@ -1160,8 +1161,9 @@ int machine__create_kernel_maps(struct machine *machine)
|
||||
*/
|
||||
map_groups__fixup_end(&machine->kmaps);
|
||||
|
||||
if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
|
||||
addr)) {
|
||||
addr = machine__get_running_kernel_start(machine, &name);
|
||||
if (!addr) {
|
||||
} else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
|
||||
machine__destroy_kernel_maps(machine);
|
||||
return -1;
|
||||
}
|
||||
@@ -1769,11 +1771,6 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
|
||||
*/
|
||||
int mix_chain_nr = i + 1 + lbr_nr + 1;
|
||||
|
||||
if (mix_chain_nr > (int)sysctl_perf_event_max_stack + PERF_MAX_BRANCH_DEPTH) {
|
||||
pr_warning("corrupted callchain. skipping...\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (j = 0; j < mix_chain_nr; j++) {
|
||||
if (callchain_param.order == ORDER_CALLEE) {
|
||||
if (j < i + 1)
|
||||
@@ -1811,9 +1808,9 @@ static int thread__resolve_callchain_sample(struct thread *thread,
|
||||
{
|
||||
struct branch_stack *branch = sample->branch_stack;
|
||||
struct ip_callchain *chain = sample->callchain;
|
||||
int chain_nr = min(max_stack, (int)chain->nr);
|
||||
int chain_nr = chain->nr;
|
||||
u8 cpumode = PERF_RECORD_MISC_USER;
|
||||
int i, j, err;
|
||||
int i, j, err, nr_entries;
|
||||
int skip_idx = -1;
|
||||
int first_call = 0;
|
||||
|
||||
@@ -1828,8 +1825,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
|
||||
* Based on DWARF debug information, some architectures skip
|
||||
* a callchain entry saved by the kernel.
|
||||
*/
|
||||
if (chain->nr < sysctl_perf_event_max_stack)
|
||||
skip_idx = arch_skip_callchain_idx(thread, chain);
|
||||
skip_idx = arch_skip_callchain_idx(thread, chain);
|
||||
|
||||
/*
|
||||
* Add branches to call stack for easier browsing. This gives
|
||||
@@ -1889,12 +1885,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
|
||||
}
|
||||
|
||||
check_calls:
|
||||
if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) {
|
||||
pr_warning("corrupted callchain. skipping...\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = first_call; i < chain_nr; i++) {
|
||||
for (i = first_call, nr_entries = 0;
|
||||
i < chain_nr && nr_entries < max_stack; i++) {
|
||||
u64 ip;
|
||||
|
||||
if (callchain_param.order == ORDER_CALLEE)
|
||||
@@ -1908,6 +1900,9 @@ check_calls:
|
||||
#endif
|
||||
ip = chain->ips[j];
|
||||
|
||||
if (ip < PERF_CONTEXT_MAX)
|
||||
++nr_entries;
|
||||
|
||||
err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
|
||||
|
||||
if (err)
|
||||
|
@@ -28,6 +28,7 @@ struct machine {
|
||||
pid_t pid;
|
||||
u16 id_hdr_size;
|
||||
bool comm_exec;
|
||||
bool kptr_restrict_warned;
|
||||
char *root_dir;
|
||||
struct rb_root threads;
|
||||
pthread_rwlock_t threads_lock;
|
||||
|
@@ -264,8 +264,7 @@ static SV *perl_process_callchain(struct perf_sample *sample,
|
||||
goto exit;
|
||||
|
||||
if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
|
||||
sample, NULL, NULL,
|
||||
sysctl_perf_event_max_stack) != 0) {
|
||||
sample, NULL, NULL, scripting_max_stack) != 0) {
|
||||
pr_err("Failed to resolve callchain. Skipping\n");
|
||||
goto exit;
|
||||
}
|
||||
|
@@ -353,6 +353,88 @@ struct sort_entry sort_srcline = {
|
||||
.se_width_idx = HISTC_SRCLINE,
|
||||
};
|
||||
|
||||
/* --sort srcline_from */
|
||||
|
||||
static int64_t
|
||||
sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
if (!left->branch_info->srcline_from) {
|
||||
struct map *map = left->branch_info->from.map;
|
||||
if (!map)
|
||||
left->branch_info->srcline_from = SRCLINE_UNKNOWN;
|
||||
else
|
||||
left->branch_info->srcline_from = get_srcline(map->dso,
|
||||
map__rip_2objdump(map,
|
||||
left->branch_info->from.al_addr),
|
||||
left->branch_info->from.sym, true);
|
||||
}
|
||||
if (!right->branch_info->srcline_from) {
|
||||
struct map *map = right->branch_info->from.map;
|
||||
if (!map)
|
||||
right->branch_info->srcline_from = SRCLINE_UNKNOWN;
|
||||
else
|
||||
right->branch_info->srcline_from = get_srcline(map->dso,
|
||||
map__rip_2objdump(map,
|
||||
right->branch_info->from.al_addr),
|
||||
right->branch_info->from.sym, true);
|
||||
}
|
||||
return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
|
||||
}
|
||||
|
||||
static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
|
||||
size_t size, unsigned int width)
|
||||
{
|
||||
return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
|
||||
}
|
||||
|
||||
struct sort_entry sort_srcline_from = {
|
||||
.se_header = "From Source:Line",
|
||||
.se_cmp = sort__srcline_from_cmp,
|
||||
.se_snprintf = hist_entry__srcline_from_snprintf,
|
||||
.se_width_idx = HISTC_SRCLINE_FROM,
|
||||
};
|
||||
|
||||
/* --sort srcline_to */
|
||||
|
||||
static int64_t
|
||||
sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
|
||||
{
|
||||
if (!left->branch_info->srcline_to) {
|
||||
struct map *map = left->branch_info->to.map;
|
||||
if (!map)
|
||||
left->branch_info->srcline_to = SRCLINE_UNKNOWN;
|
||||
else
|
||||
left->branch_info->srcline_to = get_srcline(map->dso,
|
||||
map__rip_2objdump(map,
|
||||
left->branch_info->to.al_addr),
|
||||
left->branch_info->from.sym, true);
|
||||
}
|
||||
if (!right->branch_info->srcline_to) {
|
||||
struct map *map = right->branch_info->to.map;
|
||||
if (!map)
|
||||
right->branch_info->srcline_to = SRCLINE_UNKNOWN;
|
||||
else
|
||||
right->branch_info->srcline_to = get_srcline(map->dso,
|
||||
map__rip_2objdump(map,
|
||||
right->branch_info->to.al_addr),
|
||||
right->branch_info->to.sym, true);
|
||||
}
|
||||
return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
|
||||
}
|
||||
|
||||
static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
|
||||
size_t size, unsigned int width)
|
||||
{
|
||||
return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
|
||||
}
|
||||
|
||||
struct sort_entry sort_srcline_to = {
|
||||
.se_header = "To Source:Line",
|
||||
.se_cmp = sort__srcline_to_cmp,
|
||||
.se_snprintf = hist_entry__srcline_to_snprintf,
|
||||
.se_width_idx = HISTC_SRCLINE_TO,
|
||||
};
|
||||
|
||||
/* --sort srcfile */
|
||||
|
||||
static char no_srcfile[1];
|
||||
@@ -1347,6 +1429,8 @@ static struct sort_dimension bstack_sort_dimensions[] = {
|
||||
DIM(SORT_IN_TX, "in_tx", sort_in_tx),
|
||||
DIM(SORT_ABORT, "abort", sort_abort),
|
||||
DIM(SORT_CYCLES, "cycles", sort_cycles),
|
||||
DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
|
||||
DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
|
||||
};
|
||||
|
||||
#undef DIM
|
||||
|
@@ -215,6 +215,8 @@ enum sort_type {
|
||||
SORT_ABORT,
|
||||
SORT_IN_TX,
|
||||
SORT_CYCLES,
|
||||
SORT_SRCLINE_FROM,
|
||||
SORT_SRCLINE_TO,
|
||||
|
||||
/* memory mode specific sort keys */
|
||||
__SORT_MEMORY_MODE,
|
||||
|
@@ -94,7 +94,8 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
|
||||
{
|
||||
int ctx = evsel_context(counter);
|
||||
|
||||
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
|
||||
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
|
||||
perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
|
||||
update_stats(&runtime_nsecs_stats[cpu], count[0]);
|
||||
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
|
||||
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
|
||||
@@ -188,7 +189,7 @@ static void print_stalled_cycles_backend(int cpu,
|
||||
|
||||
color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
|
||||
|
||||
out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio);
|
||||
out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
|
||||
}
|
||||
|
||||
static void print_branch_misses(int cpu,
|
||||
@@ -444,7 +445,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
|
||||
ratio = total / avg;
|
||||
|
||||
print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
|
||||
} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) {
|
||||
} else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
|
||||
perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
|
||||
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
|
||||
print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
|
||||
avg / ratio);
|
||||
|
@@ -1662,8 +1662,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
|
||||
|
||||
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
|
||||
|
||||
scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
|
||||
sbuild_id);
|
||||
scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir,
|
||||
DSO__NAME_KCORE, sbuild_id);
|
||||
|
||||
/* Use /proc/kallsyms if possible */
|
||||
if (is_host) {
|
||||
@@ -1699,8 +1699,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
|
||||
if (!find_matching_kcore(map, path, sizeof(path)))
|
||||
return strdup(path);
|
||||
|
||||
scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
|
||||
buildid_dir, sbuild_id);
|
||||
scnprintf(path, sizeof(path), "%s/%s/%s",
|
||||
buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
|
||||
|
||||
if (access(path, F_OK)) {
|
||||
pr_err("No kallsyms or vmlinux with build-id %s was found\n",
|
||||
@@ -1769,7 +1769,7 @@ do_kallsyms:
|
||||
|
||||
if (err > 0 && !dso__is_kcore(dso)) {
|
||||
dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
|
||||
dso__set_long_name(dso, "[kernel.kallsyms]", false);
|
||||
dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
|
||||
map__fixup_start(map);
|
||||
map__fixup_end(map);
|
||||
}
|
||||
@@ -2033,3 +2033,26 @@ void symbol__exit(void)
|
||||
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
|
||||
symbol_conf.initialized = false;
|
||||
}
|
||||
|
||||
int symbol__config_symfs(const struct option *opt __maybe_unused,
|
||||
const char *dir, int unset __maybe_unused)
|
||||
{
|
||||
char *bf = NULL;
|
||||
int ret;
|
||||
|
||||
symbol_conf.symfs = strdup(dir);
|
||||
if (symbol_conf.symfs == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* skip the locally configured cache if a symfs is given, and
|
||||
* config buildid dir to symfs/.debug
|
||||
*/
|
||||
ret = asprintf(&bf, "%s/%s", dir, ".debug");
|
||||
if (ret < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
set_buildid_dir(bf);
|
||||
|
||||
free(bf);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -44,6 +44,9 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
|
||||
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
|
||||
#endif
|
||||
|
||||
#define DSO__NAME_KALLSYMS "[kernel.kallsyms]"
|
||||
#define DSO__NAME_KCORE "[kernel.kcore]"
|
||||
|
||||
/** struct symbol - symtab entry
|
||||
*
|
||||
* @ignore - resolvable but tools ignore it (e.g. idle routines)
|
||||
@@ -183,6 +186,8 @@ struct branch_info {
|
||||
struct addr_map_symbol from;
|
||||
struct addr_map_symbol to;
|
||||
struct branch_flags flags;
|
||||
char *srcline_from;
|
||||
char *srcline_to;
|
||||
};
|
||||
|
||||
struct mem_info {
|
||||
@@ -287,6 +292,8 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type);
|
||||
bool symbol__restricted_filename(const char *filename,
|
||||
const char *restricted_filename);
|
||||
bool symbol__is_idle(struct symbol *sym);
|
||||
int symbol__config_symfs(const struct option *opt __maybe_unused,
|
||||
const char *dir, int unset __maybe_unused);
|
||||
|
||||
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
|
||||
struct symsrc *runtime_ss, symbol_filter_t filter,
|
||||
|
@@ -27,7 +27,6 @@ struct perf_top {
|
||||
int max_stack;
|
||||
bool hide_kernel_symbols, hide_user_symbols, zero;
|
||||
bool use_tui, use_stdio;
|
||||
bool kptr_restrict_warned;
|
||||
bool vmlinux_warned;
|
||||
bool dump_symtab;
|
||||
struct hist_entry *sym_filter_entry;
|
||||
|
@@ -33,7 +33,8 @@ struct callchain_param callchain_param = {
|
||||
unsigned int page_size;
|
||||
int cacheline_size;
|
||||
|
||||
unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
|
||||
int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
|
||||
int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
|
||||
|
||||
bool test_attr__enabled;
|
||||
|
||||
|
@@ -261,7 +261,8 @@ void sighandler_dump_stack(int sig);
|
||||
|
||||
extern unsigned int page_size;
|
||||
extern int cacheline_size;
|
||||
extern unsigned int sysctl_perf_event_max_stack;
|
||||
extern int sysctl_perf_event_max_stack;
|
||||
extern int sysctl_perf_event_max_contexts_per_stack;
|
||||
|
||||
struct parse_tag {
|
||||
char tag;
|
||||
|
Reference in New Issue
Block a user