Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf update from Thomas Gleixner: "The perf crowd presents: Kernel updates: - Removal of jprobes - Cleanup and consolidatation the handling of kprobes - Cleanup and consolidation of hardware breakpoints - The usual pile of fixes and updates to PMUs and event descriptors Tooling updates: - Updates and improvements all over the place. Nothing outstanding, just the (good) boring incremental grump work" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (103 commits) perf trace: Do not require --no-syscalls to suppress strace like output perf bpf: Include uapi/linux/bpf.h from the 'perf trace' script's bpf.h perf tools: Allow overriding MAX_NR_CPUS at compile time perf bpf: Show better message when failing to load an object perf list: Unify metric group description format with PMU event description perf vendor events arm64: Update ThunderX2 implementation defined pmu core events perf cs-etm: Generate branch sample for CS_ETM_TRACE_ON packet perf cs-etm: Generate branch sample when receiving a CS_ETM_TRACE_ON packet perf cs-etm: Support dummy address value for CS_ETM_TRACE_ON packet perf cs-etm: Fix start tracing packet handling perf build: Fix installation directory for eBPF perf c2c report: Fix crash for empty browser perf tests: Fix indexing when invoking subtests perf trace: Beautify the AF_INET & AF_INET6 'socket' syscall 'protocol' args perf trace beauty: Add beautifiers for 'socket''s 'protocol' arg perf trace beauty: Do not print NULL strarray entries perf beauty: Add a generator for IPPROTO_ socket's protocol constants tools include uapi: Grab a copy of linux/in.h perf tests: Fix complex event name parsing perf evlist: Fix error out while applying initial delay and LBR ...
Esse commit está contido em:
@@ -1656,7 +1656,7 @@ perf_event_groups_next(struct perf_event *event)
|
||||
typeof(*event), group_node))
|
||||
|
||||
/*
|
||||
* Add a event from the lists for its context.
|
||||
* Add an event from the lists for its context.
|
||||
* Must be called with ctx->mutex and ctx->lock held.
|
||||
*/
|
||||
static void
|
||||
@@ -1844,7 +1844,7 @@ static void perf_group_attach(struct perf_event *event)
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a event from the lists for its context.
|
||||
* Remove an event from the lists for its context.
|
||||
* Must be called with ctx->mutex and ctx->lock held.
|
||||
*/
|
||||
static void
|
||||
@@ -2148,7 +2148,7 @@ static void __perf_event_disable(struct perf_event *event,
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable a event.
|
||||
* Disable an event.
|
||||
*
|
||||
* If event->ctx is a cloned context, callers must make sure that
|
||||
* every task struct that event->ctx->task could possibly point to
|
||||
@@ -2677,7 +2677,7 @@ static void __perf_event_enable(struct perf_event *event,
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable a event.
|
||||
* Enable an event.
|
||||
*
|
||||
* If event->ctx is a cloned context, callers must make sure that
|
||||
* every task struct that event->ctx->task could possibly point to
|
||||
@@ -2755,7 +2755,7 @@ static int __perf_event_stop(void *info)
|
||||
* events will refuse to restart because of rb::aux_mmap_count==0,
|
||||
* see comments in perf_aux_output_begin().
|
||||
*
|
||||
* Since this is happening on a event-local CPU, no trace is lost
|
||||
* Since this is happening on an event-local CPU, no trace is lost
|
||||
* while restarting.
|
||||
*/
|
||||
if (sd->restart)
|
||||
@@ -4827,7 +4827,7 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count)
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Return end-of-file for a read on a event that is in
|
||||
* Return end-of-file for a read on an event that is in
|
||||
* error state (i.e. because it was pinned but it couldn't be
|
||||
* scheduled on to the CPU at some point).
|
||||
*/
|
||||
@@ -5273,11 +5273,11 @@ unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_event_update_userpage);
|
||||
|
||||
static int perf_mmap_fault(struct vm_fault *vmf)
|
||||
static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct perf_event *event = vmf->vma->vm_file->private_data;
|
||||
struct ring_buffer *rb;
|
||||
int ret = VM_FAULT_SIGBUS;
|
||||
vm_fault_t ret = VM_FAULT_SIGBUS;
|
||||
|
||||
if (vmf->flags & FAULT_FLAG_MKWRITE) {
|
||||
if (vmf->pgoff == 0)
|
||||
@@ -9904,7 +9904,7 @@ enabled:
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate and initialize a event structure
|
||||
* Allocate and initialize an event structure
|
||||
*/
|
||||
static struct perf_event *
|
||||
perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
@@ -11235,7 +11235,7 @@ const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
|
||||
}
|
||||
|
||||
/*
|
||||
* Inherit a event from parent task to child task.
|
||||
* Inherit an event from parent task to child task.
|
||||
*
|
||||
* Returns:
|
||||
* - valid pointer on success
|
||||
|
@@ -345,13 +345,13 @@ void release_bp_slot(struct perf_event *bp)
|
||||
mutex_unlock(&nr_bp_mutex);
|
||||
}
|
||||
|
||||
static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
|
||||
static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
|
||||
{
|
||||
int err;
|
||||
|
||||
__release_bp_slot(bp, old_type);
|
||||
|
||||
err = __reserve_bp_slot(bp, bp->attr.bp_type);
|
||||
err = __reserve_bp_slot(bp, new_type);
|
||||
if (err) {
|
||||
/*
|
||||
* Reserve the old_type slot back in case
|
||||
@@ -367,12 +367,12 @@ static int __modify_bp_slot(struct perf_event *bp, u64 old_type)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int modify_bp_slot(struct perf_event *bp, u64 old_type)
|
||||
static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&nr_bp_mutex);
|
||||
ret = __modify_bp_slot(bp, old_type);
|
||||
ret = __modify_bp_slot(bp, old_type, new_type);
|
||||
mutex_unlock(&nr_bp_mutex);
|
||||
return ret;
|
||||
}
|
||||
@@ -400,16 +400,18 @@ int dbg_release_bp_slot(struct perf_event *bp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int validate_hw_breakpoint(struct perf_event *bp)
|
||||
static int hw_breakpoint_parse(struct perf_event *bp,
|
||||
const struct perf_event_attr *attr,
|
||||
struct arch_hw_breakpoint *hw)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
ret = arch_validate_hwbkpt_settings(bp);
|
||||
if (ret)
|
||||
return ret;
|
||||
err = hw_breakpoint_arch_parse(bp, attr, hw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (arch_check_bp_in_kernelspace(bp)) {
|
||||
if (bp->attr.exclude_kernel)
|
||||
if (arch_check_bp_in_kernelspace(hw)) {
|
||||
if (attr->exclude_kernel)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Don't let unprivileged users set a breakpoint in the trap
|
||||
@@ -424,19 +426,22 @@ static int validate_hw_breakpoint(struct perf_event *bp)
|
||||
|
||||
int register_perf_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
int ret;
|
||||
struct arch_hw_breakpoint hw;
|
||||
int err;
|
||||
|
||||
ret = reserve_bp_slot(bp);
|
||||
if (ret)
|
||||
return ret;
|
||||
err = reserve_bp_slot(bp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ret = validate_hw_breakpoint(bp);
|
||||
|
||||
/* if arch_validate_hwbkpt_settings() fails then release bp slot */
|
||||
if (ret)
|
||||
err = hw_breakpoint_parse(bp, &bp->attr, &hw);
|
||||
if (err) {
|
||||
release_bp_slot(bp);
|
||||
return err;
|
||||
}
|
||||
|
||||
return ret;
|
||||
bp->hw.info = hw;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -456,35 +461,44 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
|
||||
|
||||
static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
|
||||
struct perf_event_attr *from)
|
||||
{
|
||||
to->bp_addr = from->bp_addr;
|
||||
to->bp_type = from->bp_type;
|
||||
to->bp_len = from->bp_len;
|
||||
to->disabled = from->disabled;
|
||||
}
|
||||
|
||||
int
|
||||
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
|
||||
bool check)
|
||||
{
|
||||
u64 old_addr = bp->attr.bp_addr;
|
||||
u64 old_len = bp->attr.bp_len;
|
||||
int old_type = bp->attr.bp_type;
|
||||
bool modify = attr->bp_type != old_type;
|
||||
int err = 0;
|
||||
struct arch_hw_breakpoint hw;
|
||||
int err;
|
||||
|
||||
bp->attr.bp_addr = attr->bp_addr;
|
||||
bp->attr.bp_type = attr->bp_type;
|
||||
bp->attr.bp_len = attr->bp_len;
|
||||
|
||||
if (check && memcmp(&bp->attr, attr, sizeof(*attr)))
|
||||
return -EINVAL;
|
||||
|
||||
err = validate_hw_breakpoint(bp);
|
||||
if (!err && modify)
|
||||
err = modify_bp_slot(bp, old_type);
|
||||
|
||||
if (err) {
|
||||
bp->attr.bp_addr = old_addr;
|
||||
bp->attr.bp_type = old_type;
|
||||
bp->attr.bp_len = old_len;
|
||||
err = hw_breakpoint_parse(bp, attr, &hw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (check) {
|
||||
struct perf_event_attr old_attr;
|
||||
|
||||
old_attr = bp->attr;
|
||||
hw_breakpoint_copy_attr(&old_attr, attr);
|
||||
if (memcmp(&old_attr, attr, sizeof(*attr)))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bp->attr.disabled = attr->disabled;
|
||||
if (bp->attr.bp_type != attr->bp_type) {
|
||||
err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
hw_breakpoint_copy_attr(&bp->attr, attr);
|
||||
bp->hw.info = hw;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -918,7 +918,7 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
|
||||
EXPORT_SYMBOL_GPL(uprobe_register);
|
||||
|
||||
/*
|
||||
* uprobe_apply - unregister a already registered probe.
|
||||
* uprobe_apply - unregister an already registered probe.
|
||||
* @inode: the file in which the probe has to be removed.
|
||||
* @offset: offset from the start of the file.
|
||||
* @uc: consumer which wants to add more or remove some breakpoints
|
||||
@@ -947,7 +947,7 @@ int uprobe_apply(struct inode *inode, loff_t offset,
|
||||
}
|
||||
|
||||
/*
|
||||
* uprobe_unregister - unregister a already registered probe.
|
||||
* uprobe_unregister - unregister an already registered probe.
|
||||
* @inode: the file in which the probe has to be removed.
|
||||
* @offset: offset from the start of the file.
|
||||
* @uc: identify which probe if multiple probes are colocated.
|
||||
@@ -1403,7 +1403,7 @@ static struct return_instance *free_ret_instance(struct return_instance *ri)
|
||||
|
||||
/*
|
||||
* Called with no locks held.
|
||||
* Called in context of a exiting or a exec-ing thread.
|
||||
* Called in context of an exiting or an exec-ing thread.
|
||||
*/
|
||||
void uprobe_free_utask(struct task_struct *t)
|
||||
{
|
||||
|
@@ -184,9 +184,6 @@ static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
|
||||
if (should_fail(&fei_fault_attr, 1)) {
|
||||
regs_set_return_value(regs, attr->retval);
|
||||
override_function_with_return(regs);
|
||||
/* Kprobe specific fixup */
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
167
kernel/kprobes.c
167
kernel/kprobes.c
@@ -627,8 +627,8 @@ static void optimize_kprobe(struct kprobe *p)
|
||||
(kprobe_disabled(p) || kprobes_all_disarmed))
|
||||
return;
|
||||
|
||||
/* Both of break_handler and post_handler are not supported. */
|
||||
if (p->break_handler || p->post_handler)
|
||||
/* kprobes with post_handler can not be optimized */
|
||||
if (p->post_handler)
|
||||
return;
|
||||
|
||||
op = container_of(p, struct optimized_kprobe, kp);
|
||||
@@ -710,9 +710,7 @@ static void reuse_unused_kprobe(struct kprobe *ap)
|
||||
* there is still a relative jump) and disabled.
|
||||
*/
|
||||
op = container_of(ap, struct optimized_kprobe, kp);
|
||||
if (unlikely(list_empty(&op->list)))
|
||||
printk(KERN_WARNING "Warning: found a stray unused "
|
||||
"aggrprobe@%p\n", ap->addr);
|
||||
WARN_ON_ONCE(list_empty(&op->list));
|
||||
/* Enable the probe again */
|
||||
ap->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
/* Optimize it again (remove from op->list) */
|
||||
@@ -985,7 +983,8 @@ static int arm_kprobe_ftrace(struct kprobe *p)
|
||||
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
|
||||
(unsigned long)p->addr, 0, 0);
|
||||
if (ret) {
|
||||
pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
|
||||
pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
|
||||
p->addr, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1025,7 +1024,8 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
|
||||
|
||||
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
|
||||
(unsigned long)p->addr, 1, 0);
|
||||
WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
|
||||
WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
|
||||
p->addr, ret);
|
||||
return ret;
|
||||
}
|
||||
#else /* !CONFIG_KPROBES_ON_FTRACE */
|
||||
@@ -1116,20 +1116,6 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
|
||||
}
|
||||
NOKPROBE_SYMBOL(aggr_fault_handler);
|
||||
|
||||
static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = __this_cpu_read(kprobe_instance);
|
||||
int ret = 0;
|
||||
|
||||
if (cur && cur->break_handler) {
|
||||
if (cur->break_handler(cur, regs))
|
||||
ret = 1;
|
||||
}
|
||||
reset_kprobe_instance();
|
||||
return ret;
|
||||
}
|
||||
NOKPROBE_SYMBOL(aggr_break_handler);
|
||||
|
||||
/* Walks the list and increments nmissed count for multiprobe case */
|
||||
void kprobes_inc_nmissed_count(struct kprobe *p)
|
||||
{
|
||||
@@ -1270,24 +1256,15 @@ static void cleanup_rp_inst(struct kretprobe *rp)
|
||||
}
|
||||
NOKPROBE_SYMBOL(cleanup_rp_inst);
|
||||
|
||||
/*
|
||||
* Add the new probe to ap->list. Fail if this is the
|
||||
* second jprobe at the address - two jprobes can't coexist
|
||||
*/
|
||||
/* Add the new probe to ap->list */
|
||||
static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
|
||||
{
|
||||
BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
|
||||
|
||||
if (p->break_handler || p->post_handler)
|
||||
if (p->post_handler)
|
||||
unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
|
||||
|
||||
if (p->break_handler) {
|
||||
if (ap->break_handler)
|
||||
return -EEXIST;
|
||||
list_add_tail_rcu(&p->list, &ap->list);
|
||||
ap->break_handler = aggr_break_handler;
|
||||
} else
|
||||
list_add_rcu(&p->list, &ap->list);
|
||||
list_add_rcu(&p->list, &ap->list);
|
||||
if (p->post_handler && !ap->post_handler)
|
||||
ap->post_handler = aggr_post_handler;
|
||||
|
||||
@@ -1310,8 +1287,6 @@ static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
|
||||
/* We don't care the kprobe which has gone. */
|
||||
if (p->post_handler && !kprobe_gone(p))
|
||||
ap->post_handler = aggr_post_handler;
|
||||
if (p->break_handler && !kprobe_gone(p))
|
||||
ap->break_handler = aggr_break_handler;
|
||||
|
||||
INIT_LIST_HEAD(&ap->list);
|
||||
INIT_HLIST_NODE(&ap->hlist);
|
||||
@@ -1706,8 +1681,6 @@ static int __unregister_kprobe_top(struct kprobe *p)
|
||||
goto disarmed;
|
||||
else {
|
||||
/* If disabling probe has special handlers, update aggrprobe */
|
||||
if (p->break_handler && !kprobe_gone(p))
|
||||
ap->break_handler = NULL;
|
||||
if (p->post_handler && !kprobe_gone(p)) {
|
||||
list_for_each_entry_rcu(list_p, &ap->list, list) {
|
||||
if ((list_p != p) && (list_p->post_handler))
|
||||
@@ -1812,77 +1785,6 @@ unsigned long __weak arch_deref_entry_point(void *entry)
|
||||
return (unsigned long)entry;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int register_jprobes(struct jprobe **jps, int num)
|
||||
{
|
||||
int ret = 0, i;
|
||||
|
||||
if (num <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
ret = register_jprobe(jps[i]);
|
||||
|
||||
if (ret < 0) {
|
||||
if (i > 0)
|
||||
unregister_jprobes(jps, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_jprobes);
|
||||
|
||||
int register_jprobe(struct jprobe *jp)
|
||||
{
|
||||
unsigned long addr, offset;
|
||||
struct kprobe *kp = &jp->kp;
|
||||
|
||||
/*
|
||||
* Verify probepoint as well as the jprobe handler are
|
||||
* valid function entry points.
|
||||
*/
|
||||
addr = arch_deref_entry_point(jp->entry);
|
||||
|
||||
if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
|
||||
kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
|
||||
kp->pre_handler = setjmp_pre_handler;
|
||||
kp->break_handler = longjmp_break_handler;
|
||||
return register_kprobe(kp);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_jprobe);
|
||||
|
||||
void unregister_jprobe(struct jprobe *jp)
|
||||
{
|
||||
unregister_jprobes(&jp, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_jprobe);
|
||||
|
||||
void unregister_jprobes(struct jprobe **jps, int num)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (num <= 0)
|
||||
return;
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < num; i++)
|
||||
if (__unregister_kprobe_top(&jps[i]->kp) < 0)
|
||||
jps[i]->kp.addr = NULL;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
synchronize_sched();
|
||||
for (i = 0; i < num; i++) {
|
||||
if (jps[i]->kp.addr)
|
||||
__unregister_kprobe_bottom(&jps[i]->kp);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_jprobes);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
/*
|
||||
* This kprobe pre_handler is registered with every kretprobe. When probe
|
||||
@@ -1982,7 +1884,6 @@ int register_kretprobe(struct kretprobe *rp)
|
||||
rp->kp.pre_handler = pre_handler_kretprobe;
|
||||
rp->kp.post_handler = NULL;
|
||||
rp->kp.fault_handler = NULL;
|
||||
rp->kp.break_handler = NULL;
|
||||
|
||||
/* Pre-allocate memory for max kretprobe instances */
|
||||
if (rp->maxactive <= 0) {
|
||||
@@ -2105,7 +2006,6 @@ static void kill_kprobe(struct kprobe *p)
|
||||
list_for_each_entry_rcu(kp, &p->list, list)
|
||||
kp->flags |= KPROBE_FLAG_GONE;
|
||||
p->post_handler = NULL;
|
||||
p->break_handler = NULL;
|
||||
kill_optimized_kprobe(p);
|
||||
}
|
||||
/*
|
||||
@@ -2169,11 +2069,12 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enable_kprobe);
|
||||
|
||||
/* Caller must NOT call this in usual path. This is only for critical case */
|
||||
void dump_kprobe(struct kprobe *kp)
|
||||
{
|
||||
printk(KERN_WARNING "Dumping kprobe:\n");
|
||||
printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
|
||||
kp->symbol_name, kp->addr, kp->offset);
|
||||
pr_err("Dumping kprobe:\n");
|
||||
pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
|
||||
kp->symbol_name, kp->offset, kp->addr);
|
||||
}
|
||||
NOKPROBE_SYMBOL(dump_kprobe);
|
||||
|
||||
@@ -2196,11 +2097,8 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
|
||||
entry = arch_deref_entry_point((void *)*iter);
|
||||
|
||||
if (!kernel_text_address(entry) ||
|
||||
!kallsyms_lookup_size_offset(entry, &size, &offset)) {
|
||||
pr_err("Failed to find blacklist at %p\n",
|
||||
(void *)entry);
|
||||
!kallsyms_lookup_size_offset(entry, &size, &offset))
|
||||
continue;
|
||||
}
|
||||
|
||||
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
|
||||
if (!ent)
|
||||
@@ -2326,21 +2224,23 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
|
||||
const char *sym, int offset, char *modname, struct kprobe *pp)
|
||||
{
|
||||
char *kprobe_type;
|
||||
void *addr = p->addr;
|
||||
|
||||
if (p->pre_handler == pre_handler_kretprobe)
|
||||
kprobe_type = "r";
|
||||
else if (p->pre_handler == setjmp_pre_handler)
|
||||
kprobe_type = "j";
|
||||
else
|
||||
kprobe_type = "k";
|
||||
|
||||
if (!kallsyms_show_value())
|
||||
addr = NULL;
|
||||
|
||||
if (sym)
|
||||
seq_printf(pi, "%p %s %s+0x%x %s ",
|
||||
p->addr, kprobe_type, sym, offset,
|
||||
seq_printf(pi, "%px %s %s+0x%x %s ",
|
||||
addr, kprobe_type, sym, offset,
|
||||
(modname ? modname : " "));
|
||||
else
|
||||
seq_printf(pi, "%p %s %p ",
|
||||
p->addr, kprobe_type, p->addr);
|
||||
else /* try to use %pS */
|
||||
seq_printf(pi, "%px %s %pS ",
|
||||
addr, kprobe_type, p->addr);
|
||||
|
||||
if (!pp)
|
||||
pp = p;
|
||||
@@ -2428,8 +2328,16 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
|
||||
struct kprobe_blacklist_entry *ent =
|
||||
list_entry(v, struct kprobe_blacklist_entry, list);
|
||||
|
||||
seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
|
||||
(void *)ent->end_addr, (void *)ent->start_addr);
|
||||
/*
|
||||
* If /proc/kallsyms is not showing kernel address, we won't
|
||||
* show them here either.
|
||||
*/
|
||||
if (!kallsyms_show_value())
|
||||
seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
|
||||
(void *)ent->start_addr);
|
||||
else
|
||||
seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
|
||||
(void *)ent->end_addr, (void *)ent->start_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2611,7 +2519,7 @@ static int __init debugfs_kprobe_init(void)
|
||||
if (!dir)
|
||||
return -ENOMEM;
|
||||
|
||||
file = debugfs_create_file("list", 0444, dir, NULL,
|
||||
file = debugfs_create_file("list", 0400, dir, NULL,
|
||||
&debugfs_kprobes_operations);
|
||||
if (!file)
|
||||
goto error;
|
||||
@@ -2621,7 +2529,7 @@ static int __init debugfs_kprobe_init(void)
|
||||
if (!file)
|
||||
goto error;
|
||||
|
||||
file = debugfs_create_file("blacklist", 0444, dir, NULL,
|
||||
file = debugfs_create_file("blacklist", 0400, dir, NULL,
|
||||
&debugfs_kprobe_blacklist_ops);
|
||||
if (!file)
|
||||
goto error;
|
||||
@@ -2637,6 +2545,3 @@ late_initcall(debugfs_kprobe_init);
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
module_init(init_kprobes);
|
||||
|
||||
/* defined in arch/.../kernel/kprobes.c */
|
||||
EXPORT_SYMBOL_GPL(jprobe_return);
|
||||
|
@@ -162,90 +162,6 @@ static int test_kprobes(void)
|
||||
|
||||
}
|
||||
|
||||
#if 0
|
||||
static u32 jph_val;
|
||||
|
||||
static u32 j_kprobe_target(u32 value)
|
||||
{
|
||||
if (preemptible()) {
|
||||
handler_errors++;
|
||||
pr_err("jprobe-handler is preemptible\n");
|
||||
}
|
||||
if (value != rand1) {
|
||||
handler_errors++;
|
||||
pr_err("incorrect value in jprobe handler\n");
|
||||
}
|
||||
|
||||
jph_val = rand1;
|
||||
jprobe_return();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct jprobe jp = {
|
||||
.entry = j_kprobe_target,
|
||||
.kp.symbol_name = "kprobe_target"
|
||||
};
|
||||
|
||||
static int test_jprobe(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_jprobe(&jp);
|
||||
if (ret < 0) {
|
||||
pr_err("register_jprobe returned %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = target(rand1);
|
||||
unregister_jprobe(&jp);
|
||||
if (jph_val == 0) {
|
||||
pr_err("jprobe handler not called\n");
|
||||
handler_errors++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct jprobe jp2 = {
|
||||
.entry = j_kprobe_target,
|
||||
.kp.symbol_name = "kprobe_target2"
|
||||
};
|
||||
|
||||
static int test_jprobes(void)
|
||||
{
|
||||
int ret;
|
||||
struct jprobe *jps[2] = {&jp, &jp2};
|
||||
|
||||
/* addr and flags should be cleard for reusing kprobe. */
|
||||
jp.kp.addr = NULL;
|
||||
jp.kp.flags = 0;
|
||||
ret = register_jprobes(jps, 2);
|
||||
if (ret < 0) {
|
||||
pr_err("register_jprobes returned %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
jph_val = 0;
|
||||
ret = target(rand1);
|
||||
if (jph_val == 0) {
|
||||
pr_err("jprobe handler not called\n");
|
||||
handler_errors++;
|
||||
}
|
||||
|
||||
jph_val = 0;
|
||||
ret = target2(rand1);
|
||||
if (jph_val == 0) {
|
||||
pr_err("jprobe handler2 not called\n");
|
||||
handler_errors++;
|
||||
}
|
||||
unregister_jprobes(jps, 2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define test_jprobe() (0)
|
||||
#define test_jprobes() (0)
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
static u32 krph_val;
|
||||
|
||||
@@ -383,16 +299,6 @@ int init_test_probes(void)
|
||||
if (ret < 0)
|
||||
errors++;
|
||||
|
||||
num_tests++;
|
||||
ret = test_jprobe();
|
||||
if (ret < 0)
|
||||
errors++;
|
||||
|
||||
num_tests++;
|
||||
ret = test_jprobes();
|
||||
if (ret < 0)
|
||||
errors++;
|
||||
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
num_tests++;
|
||||
ret = test_kretprobe();
|
||||
|
@@ -1228,16 +1228,11 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
|
||||
|
||||
/*
|
||||
* We need to check and see if we modified the pc of the
|
||||
* pt_regs, and if so clear the kprobe and return 1 so that we
|
||||
* don't do the single stepping.
|
||||
* The ftrace kprobe handler leaves it up to us to re-enable
|
||||
* preemption here before returning if we've modified the ip.
|
||||
* pt_regs, and if so return 1 so that we don't do the
|
||||
* single stepping.
|
||||
*/
|
||||
if (orig_ip != instruction_pointer(regs)) {
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
if (orig_ip != instruction_pointer(regs))
|
||||
return 1;
|
||||
}
|
||||
if (!ret)
|
||||
return 0;
|
||||
}
|
||||
|
Referência em uma nova issue
Block a user