Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "Lots of tooling updates - too many to list, here's a few highlights: - Various subcommand updates to 'perf trace', 'perf report', 'perf record', 'perf annotate', 'perf script', 'perf test', etc. - CPU and NUMA topology and affinity handling improvements, - HW tracing and HW support updates: - Intel PT updates - ARM CoreSight updates - vendor HW event updates - BPF updates - Tons of infrastructure updates, both on the build system and the library support side - Documentation updates. - ... and lots of other changes, see the changelog for details. Kernel side updates: - Tighten up kprobes blacklist handling, reduce the number of places where developers can install a kprobe and hang/crash the system. - Fix/enhance vma address filter handling. - Various PMU driver updates, small fixes and additions. - refcount_t conversions - BPF updates - error code propagation enhancements - misc other changes" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (238 commits) perf script python: Add Python3 support to syscall-counts-by-pid.py perf script python: Add Python3 support to syscall-counts.py perf script python: Add Python3 support to stat-cpi.py perf script python: Add Python3 support to stackcollapse.py perf script python: Add Python3 support to sctop.py perf script python: Add Python3 support to powerpc-hcalls.py perf script python: Add Python3 support to net_dropmonitor.py perf script python: Add Python3 support to mem-phys-addr.py perf script python: Add Python3 support to failed-syscalls-by-pid.py perf script python: Add Python3 support to netdev-times.py perf tools: Add perf_exe() helper to find perf binary perf script: Handle missing fields with -F +.. perf data: Add perf_data__open_dir_data function perf data: Add perf_data__(create_dir|close_dir) functions perf data: Fail check_backup in case of error perf data: Make check_backup work over directories perf tools: Add rm_rf_perf_data function perf tools: Add pattern name checking to rm_rf perf tools: Add depth checking to rm_rf perf data: Add global path holder ...
This commit is contained in:
@@ -539,7 +539,7 @@ bpf_get_prog_addr_region(const struct bpf_prog *prog,
|
||||
*symbol_end = addr + hdr->pages * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
|
||||
void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
|
||||
{
|
||||
const char *end = sym + KSYM_NAME_LEN;
|
||||
const struct btf_type *type;
|
||||
|
@@ -1258,6 +1258,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
|
||||
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
|
||||
{
|
||||
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
|
||||
/* bpf_prog_free_id() must be called first */
|
||||
bpf_prog_free_id(prog, do_idr_lock);
|
||||
bpf_prog_kallsyms_del_all(prog);
|
||||
@@ -1631,6 +1632,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
|
||||
}
|
||||
|
||||
bpf_prog_kallsyms_add(prog);
|
||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
|
||||
return err;
|
||||
|
||||
free_used_maps:
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Performance events callchain code, extracted from core.c:
|
||||
*
|
||||
@@ -5,8 +6,6 @@
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
*/
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Performance events core code:
|
||||
*
|
||||
@@ -5,8 +6,6 @@
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
@@ -385,6 +384,8 @@ static atomic_t nr_namespaces_events __read_mostly;
|
||||
static atomic_t nr_task_events __read_mostly;
|
||||
static atomic_t nr_freq_events __read_mostly;
|
||||
static atomic_t nr_switch_events __read_mostly;
|
||||
static atomic_t nr_ksymbol_events __read_mostly;
|
||||
static atomic_t nr_bpf_events __read_mostly;
|
||||
|
||||
static LIST_HEAD(pmus);
|
||||
static DEFINE_MUTEX(pmus_lock);
|
||||
@@ -1171,7 +1172,7 @@ static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
|
||||
|
||||
static void get_ctx(struct perf_event_context *ctx)
|
||||
{
|
||||
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
|
||||
refcount_inc(&ctx->refcount);
|
||||
}
|
||||
|
||||
static void free_ctx(struct rcu_head *head)
|
||||
@@ -1185,7 +1186,7 @@ static void free_ctx(struct rcu_head *head)
|
||||
|
||||
static void put_ctx(struct perf_event_context *ctx)
|
||||
{
|
||||
if (atomic_dec_and_test(&ctx->refcount)) {
|
||||
if (refcount_dec_and_test(&ctx->refcount)) {
|
||||
if (ctx->parent_ctx)
|
||||
put_ctx(ctx->parent_ctx);
|
||||
if (ctx->task && ctx->task != TASK_TOMBSTONE)
|
||||
@@ -1254,6 +1255,7 @@ static void put_ctx(struct perf_event_context *ctx)
|
||||
* perf_event_context::lock
|
||||
* perf_event::mmap_mutex
|
||||
* mmap_sem
|
||||
* perf_addr_filters_head::lock
|
||||
*
|
||||
* cpu_hotplug_lock
|
||||
* pmus_lock
|
||||
@@ -1267,7 +1269,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
|
||||
again:
|
||||
rcu_read_lock();
|
||||
ctx = READ_ONCE(event->ctx);
|
||||
if (!atomic_inc_not_zero(&ctx->refcount)) {
|
||||
if (!refcount_inc_not_zero(&ctx->refcount)) {
|
||||
rcu_read_unlock();
|
||||
goto again;
|
||||
}
|
||||
@@ -1400,7 +1402,7 @@ retry:
|
||||
}
|
||||
|
||||
if (ctx->task == TASK_TOMBSTONE ||
|
||||
!atomic_inc_not_zero(&ctx->refcount)) {
|
||||
!refcount_inc_not_zero(&ctx->refcount)) {
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
ctx = NULL;
|
||||
} else {
|
||||
@@ -2797,7 +2799,7 @@ static int perf_event_stop(struct perf_event *event, int restart)
|
||||
*
|
||||
* (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
|
||||
* we update the addresses of corresponding vmas in
|
||||
* event::addr_filters_offs array and bump the event::addr_filters_gen;
|
||||
* event::addr_filter_ranges array and bump the event::addr_filters_gen;
|
||||
* (p2) when an event is scheduled in (pmu::add), it calls
|
||||
* perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
|
||||
* if the generation has changed since the previous call.
|
||||
@@ -4056,7 +4058,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
|
||||
INIT_LIST_HEAD(&ctx->event_list);
|
||||
INIT_LIST_HEAD(&ctx->pinned_active);
|
||||
INIT_LIST_HEAD(&ctx->flexible_active);
|
||||
atomic_set(&ctx->refcount, 1);
|
||||
refcount_set(&ctx->refcount, 1);
|
||||
}
|
||||
|
||||
static struct perf_event_context *
|
||||
@@ -4235,7 +4237,7 @@ static bool is_sb_event(struct perf_event *event)
|
||||
|
||||
if (attr->mmap || attr->mmap_data || attr->mmap2 ||
|
||||
attr->comm || attr->comm_exec ||
|
||||
attr->task ||
|
||||
attr->task || attr->ksymbol ||
|
||||
attr->context_switch)
|
||||
return true;
|
||||
return false;
|
||||
@@ -4305,6 +4307,10 @@ static void unaccount_event(struct perf_event *event)
|
||||
dec = true;
|
||||
if (has_branch_stack(event))
|
||||
dec = true;
|
||||
if (event->attr.ksymbol)
|
||||
atomic_dec(&nr_ksymbol_events);
|
||||
if (event->attr.bpf_event)
|
||||
atomic_dec(&nr_bpf_events);
|
||||
|
||||
if (dec) {
|
||||
if (!atomic_add_unless(&perf_sched_count, -1, 1))
|
||||
@@ -4440,7 +4446,7 @@ static void _free_event(struct perf_event *event)
|
||||
|
||||
perf_event_free_bpf_prog(event);
|
||||
perf_addr_filters_splice(event, NULL);
|
||||
kfree(event->addr_filters_offs);
|
||||
kfree(event->addr_filter_ranges);
|
||||
|
||||
if (event->destroy)
|
||||
event->destroy(event);
|
||||
@@ -5396,7 +5402,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
|
||||
rcu_read_lock();
|
||||
rb = rcu_dereference(event->rb);
|
||||
if (rb) {
|
||||
if (!atomic_inc_not_zero(&rb->refcount))
|
||||
if (!refcount_inc_not_zero(&rb->refcount))
|
||||
rb = NULL;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
@@ -5406,7 +5412,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
|
||||
|
||||
void ring_buffer_put(struct ring_buffer *rb)
|
||||
{
|
||||
if (!atomic_dec_and_test(&rb->refcount))
|
||||
if (!refcount_dec_and_test(&rb->refcount))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&rb->event_list));
|
||||
@@ -5471,7 +5477,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
||||
|
||||
/* this has to be the last one */
|
||||
rb_free_aux(rb);
|
||||
WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
|
||||
WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
|
||||
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
}
|
||||
@@ -6497,7 +6503,7 @@ void perf_prepare_sample(struct perf_event_header *header,
|
||||
data->phys_addr = perf_virt_to_phys(data->addr);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
static __always_inline int
|
||||
__perf_event_output(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs,
|
||||
@@ -6507,13 +6513,15 @@ __perf_event_output(struct perf_event *event,
|
||||
{
|
||||
struct perf_output_handle handle;
|
||||
struct perf_event_header header;
|
||||
int err;
|
||||
|
||||
/* protect the callchain buffers */
|
||||
rcu_read_lock();
|
||||
|
||||
perf_prepare_sample(&header, data, event, regs);
|
||||
|
||||
if (output_begin(&handle, event, header.size))
|
||||
err = output_begin(&handle, event, header.size);
|
||||
if (err)
|
||||
goto exit;
|
||||
|
||||
perf_output_sample(&handle, &header, data, event);
|
||||
@@ -6522,6 +6530,7 @@ __perf_event_output(struct perf_event *event,
|
||||
|
||||
exit:
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -6540,12 +6549,12 @@ perf_event_output_backward(struct perf_event *event,
|
||||
__perf_event_output(event, data, regs, perf_output_begin_backward);
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
perf_event_output(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
__perf_event_output(event, data, regs, perf_output_begin);
|
||||
return __perf_event_output(event, data, regs, perf_output_begin);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -6686,7 +6695,8 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
|
||||
raw_spin_lock_irqsave(&ifh->lock, flags);
|
||||
list_for_each_entry(filter, &ifh->list, entry) {
|
||||
if (filter->path.dentry) {
|
||||
event->addr_filters_offs[count] = 0;
|
||||
event->addr_filter_ranges[count].start = 0;
|
||||
event->addr_filter_ranges[count].size = 0;
|
||||
restart++;
|
||||
}
|
||||
|
||||
@@ -7366,28 +7376,47 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
|
||||
struct vm_area_struct *vma,
|
||||
struct perf_addr_filter_range *fr)
|
||||
{
|
||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
|
||||
struct file *file = vma->vm_file;
|
||||
|
||||
if (!perf_addr_filter_match(filter, file, off, vma_size))
|
||||
return false;
|
||||
|
||||
if (filter->offset < off) {
|
||||
fr->start = vma->vm_start;
|
||||
fr->size = min(vma_size, filter->size - (off - filter->offset));
|
||||
} else {
|
||||
fr->start = vma->vm_start + filter->offset - off;
|
||||
fr->size = min(vma->vm_end - fr->start, filter->size);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
|
||||
{
|
||||
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
|
||||
struct vm_area_struct *vma = data;
|
||||
unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
|
||||
struct file *file = vma->vm_file;
|
||||
struct perf_addr_filter *filter;
|
||||
unsigned int restart = 0, count = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (!has_addr_filter(event))
|
||||
return;
|
||||
|
||||
if (!file)
|
||||
if (!vma->vm_file)
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&ifh->lock, flags);
|
||||
list_for_each_entry(filter, &ifh->list, entry) {
|
||||
if (perf_addr_filter_match(filter, file, off,
|
||||
vma->vm_end - vma->vm_start)) {
|
||||
event->addr_filters_offs[count] = vma->vm_start;
|
||||
if (perf_addr_filter_vma_adjust(filter, vma,
|
||||
&event->addr_filter_ranges[count]))
|
||||
restart++;
|
||||
}
|
||||
|
||||
count++;
|
||||
}
|
||||
@@ -7658,6 +7687,207 @@ static void perf_log_throttle(struct perf_event *event, int enable)
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
/*
|
||||
* ksymbol register/unregister tracking
|
||||
*/
|
||||
|
||||
struct perf_ksymbol_event {
|
||||
const char *name;
|
||||
int name_len;
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
u64 addr;
|
||||
u32 len;
|
||||
u16 ksym_type;
|
||||
u16 flags;
|
||||
} event_id;
|
||||
};
|
||||
|
||||
static int perf_event_ksymbol_match(struct perf_event *event)
|
||||
{
|
||||
return event->attr.ksymbol;
|
||||
}
|
||||
|
||||
static void perf_event_ksymbol_output(struct perf_event *event, void *data)
|
||||
{
|
||||
struct perf_ksymbol_event *ksymbol_event = data;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int ret;
|
||||
|
||||
if (!perf_event_ksymbol_match(event))
|
||||
return;
|
||||
|
||||
perf_event_header__init_id(&ksymbol_event->event_id.header,
|
||||
&sample, event);
|
||||
ret = perf_output_begin(&handle, event,
|
||||
ksymbol_event->event_id.header.size);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_put(&handle, ksymbol_event->event_id);
|
||||
__output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len);
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
|
||||
const char *sym)
|
||||
{
|
||||
struct perf_ksymbol_event ksymbol_event;
|
||||
char name[KSYM_NAME_LEN];
|
||||
u16 flags = 0;
|
||||
int name_len;
|
||||
|
||||
if (!atomic_read(&nr_ksymbol_events))
|
||||
return;
|
||||
|
||||
if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX ||
|
||||
ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN)
|
||||
goto err;
|
||||
|
||||
strlcpy(name, sym, KSYM_NAME_LEN);
|
||||
name_len = strlen(name) + 1;
|
||||
while (!IS_ALIGNED(name_len, sizeof(u64)))
|
||||
name[name_len++] = '\0';
|
||||
BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64));
|
||||
|
||||
if (unregister)
|
||||
flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER;
|
||||
|
||||
ksymbol_event = (struct perf_ksymbol_event){
|
||||
.name = name,
|
||||
.name_len = name_len,
|
||||
.event_id = {
|
||||
.header = {
|
||||
.type = PERF_RECORD_KSYMBOL,
|
||||
.size = sizeof(ksymbol_event.event_id) +
|
||||
name_len,
|
||||
},
|
||||
.addr = addr,
|
||||
.len = len,
|
||||
.ksym_type = ksym_type,
|
||||
.flags = flags,
|
||||
},
|
||||
};
|
||||
|
||||
perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL);
|
||||
return;
|
||||
err:
|
||||
WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
|
||||
}
|
||||
|
||||
/*
|
||||
* bpf program load/unload tracking
|
||||
*/
|
||||
|
||||
struct perf_bpf_event {
|
||||
struct bpf_prog *prog;
|
||||
struct {
|
||||
struct perf_event_header header;
|
||||
u16 type;
|
||||
u16 flags;
|
||||
u32 id;
|
||||
u8 tag[BPF_TAG_SIZE];
|
||||
} event_id;
|
||||
};
|
||||
|
||||
static int perf_event_bpf_match(struct perf_event *event)
|
||||
{
|
||||
return event->attr.bpf_event;
|
||||
}
|
||||
|
||||
static void perf_event_bpf_output(struct perf_event *event, void *data)
|
||||
{
|
||||
struct perf_bpf_event *bpf_event = data;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_sample_data sample;
|
||||
int ret;
|
||||
|
||||
if (!perf_event_bpf_match(event))
|
||||
return;
|
||||
|
||||
perf_event_header__init_id(&bpf_event->event_id.header,
|
||||
&sample, event);
|
||||
ret = perf_output_begin(&handle, event,
|
||||
bpf_event->event_id.header.size);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
perf_output_put(&handle, bpf_event->event_id);
|
||||
perf_event__output_id_sample(event, &handle, &sample);
|
||||
|
||||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
|
||||
enum perf_bpf_event_type type)
|
||||
{
|
||||
bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
|
||||
char sym[KSYM_NAME_LEN];
|
||||
int i;
|
||||
|
||||
if (prog->aux->func_cnt == 0) {
|
||||
bpf_get_prog_name(prog, sym);
|
||||
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
|
||||
(u64)(unsigned long)prog->bpf_func,
|
||||
prog->jited_len, unregister, sym);
|
||||
} else {
|
||||
for (i = 0; i < prog->aux->func_cnt; i++) {
|
||||
struct bpf_prog *subprog = prog->aux->func[i];
|
||||
|
||||
bpf_get_prog_name(subprog, sym);
|
||||
perf_event_ksymbol(
|
||||
PERF_RECORD_KSYMBOL_TYPE_BPF,
|
||||
(u64)(unsigned long)subprog->bpf_func,
|
||||
subprog->jited_len, unregister, sym);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void perf_event_bpf_event(struct bpf_prog *prog,
|
||||
enum perf_bpf_event_type type,
|
||||
u16 flags)
|
||||
{
|
||||
struct perf_bpf_event bpf_event;
|
||||
|
||||
if (type <= PERF_BPF_EVENT_UNKNOWN ||
|
||||
type >= PERF_BPF_EVENT_MAX)
|
||||
return;
|
||||
|
||||
switch (type) {
|
||||
case PERF_BPF_EVENT_PROG_LOAD:
|
||||
case PERF_BPF_EVENT_PROG_UNLOAD:
|
||||
if (atomic_read(&nr_ksymbol_events))
|
||||
perf_event_bpf_emit_ksymbols(prog, type);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!atomic_read(&nr_bpf_events))
|
||||
return;
|
||||
|
||||
bpf_event = (struct perf_bpf_event){
|
||||
.prog = prog,
|
||||
.event_id = {
|
||||
.header = {
|
||||
.type = PERF_RECORD_BPF_EVENT,
|
||||
.size = sizeof(bpf_event.event_id),
|
||||
},
|
||||
.type = type,
|
||||
.flags = flags,
|
||||
.id = prog->aux->id,
|
||||
},
|
||||
};
|
||||
|
||||
BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
|
||||
|
||||
memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
|
||||
perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
|
||||
}
|
||||
|
||||
void perf_event_itrace_started(struct perf_event *event)
|
||||
{
|
||||
event->attach_state |= PERF_ATTACH_ITRACE;
|
||||
@@ -8776,26 +9006,19 @@ static void perf_addr_filters_splice(struct perf_event *event,
|
||||
* @filter; if so, adjust filter's address range.
|
||||
* Called with mm::mmap_sem down for reading.
|
||||
*/
|
||||
static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
|
||||
struct mm_struct *mm)
|
||||
static void perf_addr_filter_apply(struct perf_addr_filter *filter,
|
||||
struct mm_struct *mm,
|
||||
struct perf_addr_filter_range *fr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
struct file *file = vma->vm_file;
|
||||
unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
|
||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||
|
||||
if (!file)
|
||||
if (!vma->vm_file)
|
||||
continue;
|
||||
|
||||
if (!perf_addr_filter_match(filter, file, off, vma_size))
|
||||
continue;
|
||||
|
||||
return vma->vm_start;
|
||||
if (perf_addr_filter_vma_adjust(filter, vma, fr))
|
||||
return;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -8829,15 +9052,15 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
|
||||
|
||||
raw_spin_lock_irqsave(&ifh->lock, flags);
|
||||
list_for_each_entry(filter, &ifh->list, entry) {
|
||||
event->addr_filters_offs[count] = 0;
|
||||
event->addr_filter_ranges[count].start = 0;
|
||||
event->addr_filter_ranges[count].size = 0;
|
||||
|
||||
/*
|
||||
* Adjust base offset if the filter is associated to a binary
|
||||
* that needs to be mapped:
|
||||
*/
|
||||
if (filter->path.dentry)
|
||||
event->addr_filters_offs[count] =
|
||||
perf_addr_filter_apply(filter, mm);
|
||||
perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
|
||||
|
||||
count++;
|
||||
}
|
||||
@@ -9788,6 +10011,15 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
|
||||
if (ctx)
|
||||
perf_event_ctx_unlock(event->group_leader, ctx);
|
||||
|
||||
if (!ret) {
|
||||
if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
|
||||
event_has_any_exclude_flag(event)) {
|
||||
if (event->destroy)
|
||||
event->destroy(event);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
module_put(pmu->module);
|
||||
|
||||
@@ -9916,6 +10148,10 @@ static void account_event(struct perf_event *event)
|
||||
inc = true;
|
||||
if (is_cgroup_event(event))
|
||||
inc = true;
|
||||
if (event->attr.ksymbol)
|
||||
atomic_inc(&nr_ksymbol_events);
|
||||
if (event->attr.bpf_event)
|
||||
atomic_inc(&nr_bpf_events);
|
||||
|
||||
if (inc) {
|
||||
/*
|
||||
@@ -10098,14 +10334,28 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
goto err_pmu;
|
||||
|
||||
if (has_addr_filter(event)) {
|
||||
event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
|
||||
sizeof(unsigned long),
|
||||
GFP_KERNEL);
|
||||
if (!event->addr_filters_offs) {
|
||||
event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
|
||||
sizeof(struct perf_addr_filter_range),
|
||||
GFP_KERNEL);
|
||||
if (!event->addr_filter_ranges) {
|
||||
err = -ENOMEM;
|
||||
goto err_per_task;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clone the parent's vma offsets: they are valid until exec()
|
||||
* even if the mm is not shared with the parent.
|
||||
*/
|
||||
if (event->parent) {
|
||||
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
|
||||
|
||||
raw_spin_lock_irq(&ifh->lock);
|
||||
memcpy(event->addr_filter_ranges,
|
||||
event->parent->addr_filter_ranges,
|
||||
pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
|
||||
raw_spin_unlock_irq(&ifh->lock);
|
||||
}
|
||||
|
||||
/* force hw sync on the address filters */
|
||||
event->addr_filters_gen = 1;
|
||||
}
|
||||
@@ -10124,7 +10374,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
return event;
|
||||
|
||||
err_addr_filters:
|
||||
kfree(event->addr_filters_offs);
|
||||
kfree(event->addr_filter_ranges);
|
||||
|
||||
err_per_task:
|
||||
exclusive_event_destroy(event);
|
||||
@@ -10407,7 +10657,7 @@ __perf_event_ctx_lock_double(struct perf_event *group_leader,
|
||||
again:
|
||||
rcu_read_lock();
|
||||
gctx = READ_ONCE(group_leader->ctx);
|
||||
if (!atomic_inc_not_zero(&gctx->refcount)) {
|
||||
if (!refcount_inc_not_zero(&gctx->refcount)) {
|
||||
rcu_read_unlock();
|
||||
goto again;
|
||||
}
|
||||
|
@@ -1,18 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) 2007 Alan Stern
|
||||
* Copyright (C) IBM Corporation, 2009
|
||||
* Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
|
||||
|
@@ -4,13 +4,14 @@
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/refcount.h>
|
||||
|
||||
/* Buffer handling */
|
||||
|
||||
#define RING_BUFFER_WRITABLE 0x01
|
||||
|
||||
struct ring_buffer {
|
||||
atomic_t refcount;
|
||||
refcount_t refcount;
|
||||
struct rcu_head rcu_head;
|
||||
#ifdef CONFIG_PERF_USE_VMALLOC
|
||||
struct work_struct work;
|
||||
@@ -48,7 +49,7 @@ struct ring_buffer {
|
||||
atomic_t aux_mmap_count;
|
||||
unsigned long aux_mmap_locked;
|
||||
void (*free_aux)(void *);
|
||||
atomic_t aux_refcount;
|
||||
refcount_t aux_refcount;
|
||||
void **aux_pages;
|
||||
void *aux_priv;
|
||||
|
||||
|
@@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Performance events ring-buffer code:
|
||||
*
|
||||
@@ -5,8 +6,6 @@
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* For licensing details see kernel-base/COPYING
|
||||
*/
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
@@ -285,7 +284,7 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
|
||||
else
|
||||
rb->overwrite = 1;
|
||||
|
||||
atomic_set(&rb->refcount, 1);
|
||||
refcount_set(&rb->refcount, 1);
|
||||
|
||||
INIT_LIST_HEAD(&rb->event_list);
|
||||
spin_lock_init(&rb->event_lock);
|
||||
@@ -358,7 +357,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
|
||||
if (!atomic_read(&rb->aux_mmap_count))
|
||||
goto err;
|
||||
|
||||
if (!atomic_inc_not_zero(&rb->aux_refcount))
|
||||
if (!refcount_inc_not_zero(&rb->aux_refcount))
|
||||
goto err;
|
||||
|
||||
/*
|
||||
@@ -658,7 +657,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
|
||||
goto out;
|
||||
}
|
||||
|
||||
rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
|
||||
rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
|
||||
overwrite);
|
||||
if (!rb->aux_priv)
|
||||
goto out;
|
||||
@@ -671,7 +670,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
|
||||
* we keep a refcount here to make sure either of the two can
|
||||
* reference them safely.
|
||||
*/
|
||||
atomic_set(&rb->aux_refcount, 1);
|
||||
refcount_set(&rb->aux_refcount, 1);
|
||||
|
||||
rb->aux_overwrite = overwrite;
|
||||
rb->aux_watermark = watermark;
|
||||
@@ -690,7 +689,7 @@ out:
|
||||
|
||||
void rb_free_aux(struct ring_buffer *rb)
|
||||
{
|
||||
if (atomic_dec_and_test(&rb->aux_refcount))
|
||||
if (refcount_dec_and_test(&rb->aux_refcount))
|
||||
__rb_free_aux(rb);
|
||||
}
|
||||
|
||||
|
@@ -1,20 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* User-space Probes (UProbes)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2008-2012
|
||||
* Authors:
|
||||
* Srikar Dronamraju
|
||||
|
@@ -494,7 +494,7 @@ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
|
||||
|
||||
static int get_ksymbol_bpf(struct kallsym_iter *iter)
|
||||
{
|
||||
iter->module_name[0] = '\0';
|
||||
strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
|
||||
iter->exported = 0;
|
||||
return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
|
||||
&iter->value, &iter->type,
|
||||
|
@@ -1396,7 +1396,7 @@ bool __weak arch_within_kprobe_blacklist(unsigned long addr)
|
||||
addr < (unsigned long)__kprobes_text_end;
|
||||
}
|
||||
|
||||
bool within_kprobe_blacklist(unsigned long addr)
|
||||
static bool __within_kprobe_blacklist(unsigned long addr)
|
||||
{
|
||||
struct kprobe_blacklist_entry *ent;
|
||||
|
||||
@@ -1410,7 +1410,26 @@ bool within_kprobe_blacklist(unsigned long addr)
|
||||
if (addr >= ent->start_addr && addr < ent->end_addr)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool within_kprobe_blacklist(unsigned long addr)
|
||||
{
|
||||
char symname[KSYM_NAME_LEN], *p;
|
||||
|
||||
if (__within_kprobe_blacklist(addr))
|
||||
return true;
|
||||
|
||||
/* Check if the address is on a suffixed-symbol */
|
||||
if (!lookup_symbol_name(addr, symname)) {
|
||||
p = strchr(symname, '.');
|
||||
if (!p)
|
||||
return false;
|
||||
*p = '\0';
|
||||
addr = (unsigned long)kprobe_lookup_name(symname, 0);
|
||||
if (addr)
|
||||
return __within_kprobe_blacklist(addr);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@@ -52,6 +52,7 @@
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
|
||||
@@ -3161,6 +3162,7 @@ void lockdep_hardirqs_on(unsigned long ip)
|
||||
__trace_hardirqs_on_caller(ip);
|
||||
current->lockdep_recursion = 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(lockdep_hardirqs_on);
|
||||
|
||||
/*
|
||||
* Hardirqs were disabled:
|
||||
@@ -3190,6 +3192,7 @@ void lockdep_hardirqs_off(unsigned long ip)
|
||||
} else
|
||||
debug_atomic_inc(redundant_hardirqs_off);
|
||||
}
|
||||
NOKPROBE_SYMBOL(lockdep_hardirqs_off);
|
||||
|
||||
/*
|
||||
* Softirqs will be enabled:
|
||||
@@ -4007,7 +4010,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __lock_is_held(const struct lockdep_map *lock, int read)
|
||||
static nokprobe_inline
|
||||
int __lock_is_held(const struct lockdep_map *lock, int read)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
int i;
|
||||
@@ -4240,6 +4244,7 @@ int lock_is_held_type(const struct lockdep_map *lock, int read)
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lock_is_held_type);
|
||||
NOKPROBE_SYMBOL(lock_is_held_type);
|
||||
|
||||
struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
|
||||
{
|
||||
|
@@ -50,6 +50,7 @@
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include "tree.h"
|
||||
#include "rcu.h"
|
||||
@@ -882,6 +883,7 @@ void rcu_nmi_enter(void)
|
||||
{
|
||||
rcu_nmi_enter_common(false);
|
||||
}
|
||||
NOKPROBE_SYMBOL(rcu_nmi_enter);
|
||||
|
||||
/**
|
||||
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
|
||||
|
@@ -39,6 +39,7 @@
|
||||
#include <linux/tick.h>
|
||||
#include <linux/rcupdate_wait.h>
|
||||
#include <linux/sched/isolation.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
|
||||
@@ -236,6 +237,7 @@ int notrace debug_lockdep_rcu_enabled(void)
|
||||
current->lockdep_recursion == 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
|
||||
NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
|
||||
|
||||
/**
|
||||
* rcu_read_lock_held() - might we be in RCU read-side critical section?
|
||||
|
@@ -431,8 +431,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
|
||||
if (unlikely(event->oncpu != cpu))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
perf_event_output(event, sd, regs);
|
||||
return 0;
|
||||
return perf_event_output(event, sd, regs);
|
||||
}
|
||||
|
||||
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
@@ -365,7 +366,7 @@ out:
|
||||
__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
|
||||
}
|
||||
|
||||
static inline void
|
||||
static nokprobe_inline void
|
||||
start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
|
||||
{
|
||||
int cpu;
|
||||
@@ -401,7 +402,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
|
||||
atomic_dec(&data->disabled);
|
||||
}
|
||||
|
||||
static inline void
|
||||
static nokprobe_inline void
|
||||
stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
|
||||
{
|
||||
int cpu;
|
||||
@@ -443,6 +444,7 @@ void start_critical_timings(void)
|
||||
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(start_critical_timings);
|
||||
NOKPROBE_SYMBOL(start_critical_timings);
|
||||
|
||||
void stop_critical_timings(void)
|
||||
{
|
||||
@@ -452,6 +454,7 @@ void stop_critical_timings(void)
|
||||
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stop_critical_timings);
|
||||
NOKPROBE_SYMBOL(stop_critical_timings);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
static bool function_enabled;
|
||||
@@ -611,6 +614,7 @@ void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
|
||||
if (!preempt_trace(pc) && irq_trace())
|
||||
stop_critical_timing(a0, a1, pc);
|
||||
}
|
||||
NOKPROBE_SYMBOL(tracer_hardirqs_on);
|
||||
|
||||
void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
|
||||
{
|
||||
@@ -619,6 +623,7 @@ void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
|
||||
if (!preempt_trace(pc) && irq_trace())
|
||||
start_critical_timing(a0, a1, pc);
|
||||
}
|
||||
NOKPROBE_SYMBOL(tracer_hardirqs_off);
|
||||
|
||||
static int irqsoff_tracer_init(struct trace_array *tr)
|
||||
{
|
||||
|
@@ -9,6 +9,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include "trace.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
@@ -30,6 +31,7 @@ void trace_hardirqs_on(void)
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_on);
|
||||
NOKPROBE_SYMBOL(trace_hardirqs_on);
|
||||
|
||||
void trace_hardirqs_off(void)
|
||||
{
|
||||
@@ -43,6 +45,7 @@ void trace_hardirqs_off(void)
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_off);
|
||||
NOKPROBE_SYMBOL(trace_hardirqs_off);
|
||||
|
||||
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
|
||||
{
|
||||
@@ -56,6 +59,7 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
||||
NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
|
||||
|
||||
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
|
||||
{
|
||||
@@ -69,6 +73,7 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
|
||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
||||
}
|
||||
EXPORT_SYMBOL(trace_hardirqs_off_caller);
|
||||
NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||
|
||||
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
|
||||
|
Reference in New Issue
Block a user