Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf update from Thomas Gleixner: "The perf crowd presents: Kernel updates: - Removal of jprobes - Cleanup and consolidatation the handling of kprobes - Cleanup and consolidation of hardware breakpoints - The usual pile of fixes and updates to PMUs and event descriptors Tooling updates: - Updates and improvements all over the place. Nothing outstanding, just the (good) boring incremental grump work" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (103 commits) perf trace: Do not require --no-syscalls to suppress strace like output perf bpf: Include uapi/linux/bpf.h from the 'perf trace' script's bpf.h perf tools: Allow overriding MAX_NR_CPUS at compile time perf bpf: Show better message when failing to load an object perf list: Unify metric group description format with PMU event description perf vendor events arm64: Update ThunderX2 implementation defined pmu core events perf cs-etm: Generate branch sample for CS_ETM_TRACE_ON packet perf cs-etm: Generate branch sample when receiving a CS_ETM_TRACE_ON packet perf cs-etm: Support dummy address value for CS_ETM_TRACE_ON packet perf cs-etm: Fix start tracing packet handling perf build: Fix installation directory for eBPF perf c2c report: Fix crash for empty browser perf tests: Fix indexing when invoking subtests perf trace: Beautify the AF_INET & AF_INET6 'socket' syscall 'protocol' args perf trace beauty: Add beautifiers for 'socket''s 'protocol' arg perf trace beauty: Do not print NULL strarray entries perf beauty: Add a generator for IPPROTO_ socket's protocol constants tools include uapi: Grab a copy of linux/in.h perf tests: Fix complex event name parsing perf evlist: Fix error out while applying initial delay and LBR ...
This commit is contained in:
@@ -52,6 +52,7 @@ struct arch_hw_breakpoint {
|
||||
#include <asm/reg.h>
|
||||
#include <asm/debug.h>
|
||||
|
||||
struct perf_event_attr;
|
||||
struct perf_event;
|
||||
struct pmu;
|
||||
struct perf_sample_data;
|
||||
@@ -60,8 +61,10 @@ struct perf_sample_data;
|
||||
|
||||
extern int hw_breakpoint_slots(int type);
|
||||
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
|
||||
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
|
||||
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
|
||||
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
|
||||
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
|
||||
const struct perf_event_attr *attr,
|
||||
struct arch_hw_breakpoint *hw);
|
||||
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
|
||||
unsigned long val, void *data);
|
||||
int arch_install_hw_breakpoint(struct perf_event *bp);
|
||||
|
@@ -88,7 +88,6 @@ struct prev_kprobe {
|
||||
struct kprobe_ctlblk {
|
||||
unsigned long kprobe_status;
|
||||
unsigned long kprobe_saved_msr;
|
||||
struct pt_regs jprobe_saved_regs;
|
||||
struct prev_kprobe prev_kprobe;
|
||||
};
|
||||
|
||||
@@ -103,17 +102,6 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
|
||||
extern int kprobe_handler(struct pt_regs *regs);
|
||||
extern int kprobe_post_handler(struct pt_regs *regs);
|
||||
#ifdef CONFIG_KPROBES_ON_FTRACE
|
||||
extern int __is_active_jprobe(unsigned long addr);
|
||||
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb);
|
||||
#else
|
||||
static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
|
||||
static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
|
||||
|
@@ -119,11 +119,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
|
||||
/*
|
||||
* Check for virtual address in kernel space.
|
||||
*/
|
||||
int arch_check_bp_in_kernelspace(struct perf_event *bp)
|
||||
int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
|
||||
{
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
|
||||
return is_kernel_addr(info->address);
|
||||
return is_kernel_addr(hw->address);
|
||||
}
|
||||
|
||||
int arch_bp_generic_fields(int type, int *gen_bp_type)
|
||||
@@ -141,30 +139,31 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
|
||||
/*
|
||||
* Validate the arch-specific HW Breakpoint register settings
|
||||
*/
|
||||
int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
int hw_breakpoint_arch_parse(struct perf_event *bp,
|
||||
const struct perf_event_attr *attr,
|
||||
struct arch_hw_breakpoint *hw)
|
||||
{
|
||||
int ret = -EINVAL, length_max;
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
|
||||
if (!bp)
|
||||
return ret;
|
||||
|
||||
info->type = HW_BRK_TYPE_TRANSLATE;
|
||||
if (bp->attr.bp_type & HW_BREAKPOINT_R)
|
||||
info->type |= HW_BRK_TYPE_READ;
|
||||
if (bp->attr.bp_type & HW_BREAKPOINT_W)
|
||||
info->type |= HW_BRK_TYPE_WRITE;
|
||||
if (info->type == HW_BRK_TYPE_TRANSLATE)
|
||||
hw->type = HW_BRK_TYPE_TRANSLATE;
|
||||
if (attr->bp_type & HW_BREAKPOINT_R)
|
||||
hw->type |= HW_BRK_TYPE_READ;
|
||||
if (attr->bp_type & HW_BREAKPOINT_W)
|
||||
hw->type |= HW_BRK_TYPE_WRITE;
|
||||
if (hw->type == HW_BRK_TYPE_TRANSLATE)
|
||||
/* must set alteast read or write */
|
||||
return ret;
|
||||
if (!(bp->attr.exclude_user))
|
||||
info->type |= HW_BRK_TYPE_USER;
|
||||
if (!(bp->attr.exclude_kernel))
|
||||
info->type |= HW_BRK_TYPE_KERNEL;
|
||||
if (!(bp->attr.exclude_hv))
|
||||
info->type |= HW_BRK_TYPE_HYP;
|
||||
info->address = bp->attr.bp_addr;
|
||||
info->len = bp->attr.bp_len;
|
||||
if (!attr->exclude_user)
|
||||
hw->type |= HW_BRK_TYPE_USER;
|
||||
if (!attr->exclude_kernel)
|
||||
hw->type |= HW_BRK_TYPE_KERNEL;
|
||||
if (!attr->exclude_hv)
|
||||
hw->type |= HW_BRK_TYPE_HYP;
|
||||
hw->address = attr->bp_addr;
|
||||
hw->len = attr->bp_len;
|
||||
|
||||
/*
|
||||
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
|
||||
@@ -178,12 +177,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
if (cpu_has_feature(CPU_FTR_DAWR)) {
|
||||
length_max = 512 ; /* 64 doublewords */
|
||||
/* DAWR region can't cross 512 boundary */
|
||||
if ((bp->attr.bp_addr >> 9) !=
|
||||
((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
|
||||
if ((attr->bp_addr >> 9) !=
|
||||
((attr->bp_addr + attr->bp_len - 1) >> 9))
|
||||
return -EINVAL;
|
||||
}
|
||||
if (info->len >
|
||||
(length_max - (info->address & HW_BREAKPOINT_ALIGN)))
|
||||
if (hw->len >
|
||||
(length_max - (hw->address & HW_BREAKPOINT_ALIGN)))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -25,50 +25,6 @@
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
/*
|
||||
* This is called from ftrace code after invoking registered handlers to
|
||||
* disambiguate regs->nip changes done by jprobes and livepatch. We check if
|
||||
* there is an active jprobe at the provided address (mcount location).
|
||||
*/
|
||||
int __is_active_jprobe(unsigned long addr)
|
||||
{
|
||||
if (!preemptible()) {
|
||||
struct kprobe *p = raw_cpu_read(current_kprobe);
|
||||
return (p && (unsigned long)p->addr == addr) ? 1 : 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static nokprobe_inline
|
||||
int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb, unsigned long orig_nip)
|
||||
{
|
||||
/*
|
||||
* Emulate singlestep (and also recover regs->nip)
|
||||
* as if there is a nop
|
||||
*/
|
||||
regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
|
||||
if (unlikely(p->post_handler)) {
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
p->post_handler(p, regs, 0);
|
||||
}
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
if (orig_nip)
|
||||
regs->nip = orig_nip;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
if (kprobe_ftrace(p))
|
||||
return __skip_singlestep(p, regs, kcb, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(skip_singlestep);
|
||||
|
||||
/* Ftrace callback handler for kprobes */
|
||||
void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
|
||||
struct ftrace_ops *ops, struct pt_regs *regs)
|
||||
@@ -76,18 +32,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
|
||||
struct kprobe *p;
|
||||
struct kprobe_ctlblk *kcb;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
p = get_kprobe((kprobe_opcode_t *)nip);
|
||||
if (unlikely(!p) || kprobe_disabled(p))
|
||||
goto end;
|
||||
return;
|
||||
|
||||
kcb = get_kprobe_ctlblk();
|
||||
if (kprobe_running()) {
|
||||
kprobes_inc_nmissed_count(p);
|
||||
} else {
|
||||
unsigned long orig_nip = regs->nip;
|
||||
|
||||
/*
|
||||
* On powerpc, NIP is *before* this instruction for the
|
||||
* pre handler
|
||||
@@ -96,19 +48,23 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
|
||||
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
if (!p->pre_handler || !p->pre_handler(p, regs))
|
||||
__skip_singlestep(p, regs, kcb, orig_nip);
|
||||
else {
|
||||
if (!p->pre_handler || !p->pre_handler(p, regs)) {
|
||||
/*
|
||||
* If pre_handler returns !0, it sets regs->nip and
|
||||
* resets current kprobe. In this case, we should not
|
||||
* re-enable preemption.
|
||||
* Emulate singlestep (and also recover regs->nip)
|
||||
* as if there is a nop
|
||||
*/
|
||||
return;
|
||||
regs->nip += MCOUNT_INSN_SIZE;
|
||||
if (unlikely(p->post_handler)) {
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
p->post_handler(p, regs, 0);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If pre_handler returns !0, it changes regs->nip. We have to
|
||||
* skip emulating post_handler.
|
||||
*/
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
end:
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
|
||||
|
||||
|
@@ -317,25 +317,17 @@ int kprobe_handler(struct pt_regs *regs)
|
||||
}
|
||||
prepare_singlestep(p, regs);
|
||||
return 1;
|
||||
} else {
|
||||
if (*addr != BREAKPOINT_INSTRUCTION) {
|
||||
/* If trap variant, then it belongs not to us */
|
||||
kprobe_opcode_t cur_insn = *addr;
|
||||
if (is_trap(cur_insn))
|
||||
goto no_kprobe;
|
||||
/* The breakpoint instruction was removed by
|
||||
* another cpu right after we hit, no further
|
||||
* handling of this interrupt is appropriate
|
||||
*/
|
||||
ret = 1;
|
||||
} else if (*addr != BREAKPOINT_INSTRUCTION) {
|
||||
/* If trap variant, then it belongs not to us */
|
||||
kprobe_opcode_t cur_insn = *addr;
|
||||
|
||||
if (is_trap(cur_insn))
|
||||
goto no_kprobe;
|
||||
}
|
||||
p = __this_cpu_read(current_kprobe);
|
||||
if (p->break_handler && p->break_handler(p, regs)) {
|
||||
if (!skip_singlestep(p, regs, kcb))
|
||||
goto ss_probe;
|
||||
ret = 1;
|
||||
}
|
||||
/* The breakpoint instruction was removed by
|
||||
* another cpu right after we hit, no further
|
||||
* handling of this interrupt is appropriate
|
||||
*/
|
||||
ret = 1;
|
||||
}
|
||||
goto no_kprobe;
|
||||
}
|
||||
@@ -350,7 +342,7 @@ int kprobe_handler(struct pt_regs *regs)
|
||||
*/
|
||||
kprobe_opcode_t cur_insn = *addr;
|
||||
if (is_trap(cur_insn))
|
||||
goto no_kprobe;
|
||||
goto no_kprobe;
|
||||
/*
|
||||
* The breakpoint instruction was removed right
|
||||
* after we hit it. Another cpu has removed
|
||||
@@ -366,11 +358,13 @@ int kprobe_handler(struct pt_regs *regs)
|
||||
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
if (p->pre_handler && p->pre_handler(p, regs))
|
||||
/* handler has already set things up, so skip ss setup */
|
||||
if (p->pre_handler && p->pre_handler(p, regs)) {
|
||||
/* handler changed execution path, so skip ss setup */
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
|
||||
ss_probe:
|
||||
if (p->ainsn.boostable >= 0) {
|
||||
ret = try_to_emulate(p, regs);
|
||||
|
||||
@@ -611,60 +605,6 @@ unsigned long arch_deref_entry_point(void *entry)
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_deref_entry_point);
|
||||
|
||||
int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
|
||||
|
||||
/* setup return addr to the jprobe handler routine */
|
||||
regs->nip = arch_deref_entry_point(jp->entry);
|
||||
#ifdef PPC64_ELF_ABI_v2
|
||||
regs->gpr[12] = (unsigned long)jp->entry;
|
||||
#elif defined(PPC64_ELF_ABI_v1)
|
||||
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* jprobes use jprobe_return() which skips the normal return
|
||||
* path of the function, and this messes up the accounting of the
|
||||
* function graph tracer.
|
||||
*
|
||||
* Pause function graph tracing while performing the jprobe function.
|
||||
*/
|
||||
pause_graph_tracing();
|
||||
|
||||
return 1;
|
||||
}
|
||||
NOKPROBE_SYMBOL(setjmp_pre_handler);
|
||||
|
||||
void __used jprobe_return(void)
|
||||
{
|
||||
asm volatile("jprobe_return_trap:\n"
|
||||
"trap\n"
|
||||
::: "memory");
|
||||
}
|
||||
NOKPROBE_SYMBOL(jprobe_return);
|
||||
|
||||
int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) {
|
||||
pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n",
|
||||
regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
|
||||
/* It's OK to start function graph tracing again */
|
||||
unpause_graph_tracing();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
NOKPROBE_SYMBOL(longjmp_break_handler);
|
||||
|
||||
static struct kprobe trampoline_p = {
|
||||
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
|
||||
.pre_handler = trampoline_probe_handler
|
||||
|
@@ -104,39 +104,13 @@ ftrace_regs_call:
|
||||
bl ftrace_stub
|
||||
nop
|
||||
|
||||
/* Load the possibly modified NIP */
|
||||
ld r15, _NIP(r1)
|
||||
|
||||
/* Load ctr with the possibly modified NIP */
|
||||
ld r3, _NIP(r1)
|
||||
mtctr r3
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
cmpd r14, r15 /* has NIP been altered? */
|
||||
cmpd r14, r3 /* has NIP been altered? */
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
|
||||
/* NIP has not been altered, skip over further checks */
|
||||
beq 1f
|
||||
|
||||
/* Check if there is an active jprobe on us */
|
||||
subi r3, r14, 4
|
||||
bl __is_active_jprobe
|
||||
nop
|
||||
|
||||
/*
|
||||
* If r3 == 1, then this is a kprobe/jprobe.
|
||||
* else, this is livepatched function.
|
||||
*
|
||||
* The conditional branch for livepatch_handler below will use the
|
||||
* result of this comparison. For kprobe/jprobe, we just need to branch to
|
||||
* the new NIP, not call livepatch_handler. The branch below is bne, so we
|
||||
* want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
|
||||
* CR0[EQ] = (r3 == 1).
|
||||
*/
|
||||
cmpdi r3, 1
|
||||
1:
|
||||
#endif
|
||||
|
||||
/* Load CTR with the possibly modified NIP */
|
||||
mtctr r15
|
||||
|
||||
/* Restore gprs */
|
||||
REST_GPR(0,r1)
|
||||
REST_10GPRS(2,r1)
|
||||
@@ -154,10 +128,7 @@ ftrace_regs_call:
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
/*
|
||||
* Based on the cmpd or cmpdi above, if the NIP was altered and we're
|
||||
* not on a kprobe/jprobe, then handle livepatch.
|
||||
*/
|
||||
/* Based on the cmpd above, if the NIP was altered handle livepatch */
|
||||
bne- livepatch_handler
|
||||
#endif
|
||||
|
||||
|
@@ -1469,7 +1469,7 @@ static int collect_events(struct perf_event *group, int max_count,
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a event to the PMU.
|
||||
* Add an event to the PMU.
|
||||
* If all events are not already frozen, then we disable and
|
||||
* re-enable the PMU in order to get hw_perf_enable to do the
|
||||
* actual work of reconfiguring the PMU.
|
||||
@@ -1548,7 +1548,7 @@ nocheck:
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a event from the PMU.
|
||||
* Remove an event from the PMU.
|
||||
*/
|
||||
static void power_pmu_del(struct perf_event *event, int ef_flags)
|
||||
{
|
||||
@@ -1742,7 +1742,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
|
||||
/*
|
||||
* Return 1 if we might be able to put event on a limited PMC,
|
||||
* or 0 if not.
|
||||
* A event can only go on a limited PMC if it counts something
|
||||
* An event can only go on a limited PMC if it counts something
|
||||
* that a limited PMC can count, doesn't require interrupts, and
|
||||
* doesn't exclude any processor mode.
|
||||
*/
|
||||
|
Fai riferimento in un nuovo problema
Block a user