Merge tag 'trace-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "New features for this release: - Pretty much a full rewrite of the processing of function plugins. i.e. echo do_IRQ:stacktrace > set_ftrace_filter - The rewrite was needed to add plugins to be unique to tracing instances. i.e. mkdir instance/foo; cd instances/foo; echo do_IRQ:stacktrace > set_ftrace_filter The old way was written very hacky. This removes a lot of those hacks. - New "function-fork" tracing option. When set, pids in the set_ftrace_pid will have their children added when the processes with their pids listed in the set_ftrace_pid file forks. - Exposure of "maxactive" for kretprobe in kprobe_events - Allow for builtin init functions to be traced by the function tracer (via the kernel command line). Module init function tracing will come in the next release. - Added more selftests, and have selftests also test in an instance" * tag 'trace-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (60 commits) ring-buffer: Return reader page back into existing ring buffer selftests: ftrace: Allow some event trigger tests to run in an instance selftests: ftrace: Have some basic tests run in a tracing instance too selftests: ftrace: Have event tests also run in an tracing instance selftests: ftrace: Make func_event_triggers and func_traceonoff_triggers tests do instances selftests: ftrace: Allow some tests to be run in a tracing instance tracing/ftrace: Allow for instances to trigger their own stacktrace probes tracing/ftrace: Allow for the traceonoff probe be unique to instances tracing/ftrace: Enable snapshot function trigger to work with instances tracing/ftrace: Allow instances to have their own function probes tracing/ftrace: Add a better way to pass data via the probe functions ftrace: Dynamically create the probe ftrace_ops for the trace_array tracing: Pass the trace_array into ftrace_probe_ops functions tracing: Have the trace_array hold the list of registered func probes ftrace: If the hash for a probe fails to update then free what was initialized ftrace: Have the function probes call their own function ftrace: Have each function probe use its own ftrace_ops ftrace: Have unregister_ftrace_function_probe_func() return a value ftrace: Add helper function ftrace_hash_move_and_update_ops() ftrace: Remove data field from ftrace_func_probe structure ...
This commit is contained in:
@@ -42,8 +42,10 @@
|
||||
/* Main tracing buffer and events set up */
|
||||
#ifdef CONFIG_TRACING
|
||||
void trace_init(void);
|
||||
void early_trace_init(void);
|
||||
#else
|
||||
static inline void trace_init(void) { }
|
||||
static inline void early_trace_init(void) { }
|
||||
#endif
|
||||
|
||||
struct module;
|
||||
@@ -144,6 +146,10 @@ struct ftrace_ops_hash {
|
||||
struct ftrace_hash *filter_hash;
|
||||
struct mutex regex_lock;
|
||||
};
|
||||
|
||||
void ftrace_free_init_mem(void);
|
||||
#else
|
||||
static inline void ftrace_free_init_mem(void) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -260,6 +266,7 @@ static inline int ftrace_nr_registered_ops(void)
|
||||
}
|
||||
static inline void clear_ftrace_function(void) { }
|
||||
static inline void ftrace_kill(void) { }
|
||||
static inline void ftrace_free_init_mem(void) { }
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_STACK_TRACER
|
||||
@@ -279,15 +286,45 @@ int
|
||||
stack_trace_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
#endif
|
||||
|
||||
struct ftrace_func_command {
|
||||
struct list_head list;
|
||||
char *name;
|
||||
int (*func)(struct ftrace_hash *hash,
|
||||
char *func, char *cmd,
|
||||
char *params, int enable);
|
||||
};
|
||||
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
|
||||
DECLARE_PER_CPU(int, disable_stack_tracer);
|
||||
|
||||
/**
|
||||
* stack_tracer_disable - temporarily disable the stack tracer
|
||||
*
|
||||
* There's a few locations (namely in RCU) where stack tracing
|
||||
* cannot be executed. This function is used to disable stack
|
||||
* tracing during those critical sections.
|
||||
*
|
||||
* This function must be called with preemption or interrupts
|
||||
* disabled and stack_tracer_enable() must be called shortly after
|
||||
* while preemption or interrupts are still disabled.
|
||||
*/
|
||||
static inline void stack_tracer_disable(void)
|
||||
{
|
||||
/* Preemption or interupts must be disabled */
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
|
||||
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
|
||||
this_cpu_inc(disable_stack_tracer);
|
||||
}
|
||||
|
||||
/**
|
||||
* stack_tracer_enable - re-enable the stack tracer
|
||||
*
|
||||
* After stack_tracer_disable() is called, stack_tracer_enable()
|
||||
* must be called shortly afterward.
|
||||
*/
|
||||
static inline void stack_tracer_enable(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
|
||||
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
|
||||
this_cpu_dec(disable_stack_tracer);
|
||||
}
|
||||
#else
|
||||
static inline void stack_tracer_disable(void) { }
|
||||
static inline void stack_tracer_enable(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
@@ -315,30 +352,6 @@ void ftrace_bug(int err, struct dyn_ftrace *rec);
|
||||
|
||||
struct seq_file;
|
||||
|
||||
struct ftrace_probe_ops {
|
||||
void (*func)(unsigned long ip,
|
||||
unsigned long parent_ip,
|
||||
void **data);
|
||||
int (*init)(struct ftrace_probe_ops *ops,
|
||||
unsigned long ip, void **data);
|
||||
void (*free)(struct ftrace_probe_ops *ops,
|
||||
unsigned long ip, void **data);
|
||||
int (*print)(struct seq_file *m,
|
||||
unsigned long ip,
|
||||
struct ftrace_probe_ops *ops,
|
||||
void *data);
|
||||
};
|
||||
|
||||
extern int
|
||||
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
void *data);
|
||||
extern void
|
||||
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
void *data);
|
||||
extern void
|
||||
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
|
||||
extern void unregister_ftrace_function_probe_all(char *glob);
|
||||
|
||||
extern int ftrace_text_reserved(const void *start, const void *end);
|
||||
|
||||
extern int ftrace_nr_registered_ops(void);
|
||||
@@ -400,9 +413,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
|
||||
void ftrace_free_filter(struct ftrace_ops *ops);
|
||||
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
|
||||
|
||||
int register_ftrace_command(struct ftrace_func_command *cmd);
|
||||
int unregister_ftrace_command(struct ftrace_func_command *cmd);
|
||||
|
||||
enum {
|
||||
FTRACE_UPDATE_CALLS = (1 << 0),
|
||||
FTRACE_DISABLE_CALLS = (1 << 1),
|
||||
@@ -433,8 +443,8 @@ enum {
|
||||
FTRACE_ITER_FILTER = (1 << 0),
|
||||
FTRACE_ITER_NOTRACE = (1 << 1),
|
||||
FTRACE_ITER_PRINTALL = (1 << 2),
|
||||
FTRACE_ITER_DO_HASH = (1 << 3),
|
||||
FTRACE_ITER_HASH = (1 << 4),
|
||||
FTRACE_ITER_DO_PROBES = (1 << 3),
|
||||
FTRACE_ITER_PROBE = (1 << 4),
|
||||
FTRACE_ITER_ENABLED = (1 << 5),
|
||||
};
|
||||
|
||||
@@ -618,14 +628,6 @@ static inline void ftrace_enable_daemon(void) { }
|
||||
static inline void ftrace_module_init(struct module *mod) { }
|
||||
static inline void ftrace_module_enable(struct module *mod) { }
|
||||
static inline void ftrace_release_mod(struct module *mod) { }
|
||||
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline __init int unregister_ftrace_command(char *cmd_name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int ftrace_text_reserved(const void *start, const void *end)
|
||||
{
|
||||
return 0;
|
||||
|
@@ -39,7 +39,7 @@
|
||||
|
||||
/* These are for everybody (although not all archs will actually
|
||||
discard it in modules) */
|
||||
#define __init __section(.init.text) __cold notrace __latent_entropy
|
||||
#define __init __section(.init.text) __cold __inittrace __latent_entropy
|
||||
#define __initdata __section(.init.data)
|
||||
#define __initconst __section(.init.rodata)
|
||||
#define __exitdata __section(.exit.data)
|
||||
@@ -68,8 +68,10 @@
|
||||
|
||||
#ifdef MODULE
|
||||
#define __exitused
|
||||
#define __inittrace notrace
|
||||
#else
|
||||
#define __exitused __used
|
||||
#define __inittrace
|
||||
#endif
|
||||
|
||||
#define __exit __section(.exit.text) __exitused __cold notrace
|
||||
|
@@ -97,6 +97,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
|
||||
unsigned long secs,
|
||||
unsigned long c_old,
|
||||
unsigned long c);
|
||||
bool rcu_irq_enter_disabled(void);
|
||||
#else
|
||||
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
|
||||
int *flags,
|
||||
@@ -113,6 +114,10 @@ static inline void rcutorture_record_test_transition(void)
|
||||
static inline void rcutorture_record_progress(unsigned long vernum)
|
||||
{
|
||||
}
|
||||
static inline bool rcu_irq_enter_disabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#ifdef CONFIG_RCU_TRACE
|
||||
void do_trace_rcu_torture_read(const char *rcutorturename,
|
||||
struct rcu_head *rhp,
|
||||
|
@@ -185,7 +185,7 @@ size_t ring_buffer_page_len(void *page);
|
||||
|
||||
|
||||
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
|
||||
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
|
||||
void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
|
||||
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
|
||||
size_t len, int cpu, int full);
|
||||
|
||||
|
@@ -138,16 +138,7 @@ enum print_line_t {
|
||||
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
|
||||
};
|
||||
|
||||
/*
|
||||
* Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
|
||||
* overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
|
||||
* simplifies those functions and keeps them in sync.
|
||||
*/
|
||||
static inline enum print_line_t trace_handle_return(struct trace_seq *s)
|
||||
{
|
||||
return trace_seq_has_overflowed(s) ?
|
||||
TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
|
||||
}
|
||||
enum print_line_t trace_handle_return(struct trace_seq *s);
|
||||
|
||||
void tracing_generic_entry_update(struct trace_entry *entry,
|
||||
unsigned long flags,
|
||||
|
@@ -128,7 +128,7 @@ extern void syscall_unregfunc(void);
|
||||
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
|
||||
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
|
||||
*/
|
||||
#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
|
||||
#define __DO_TRACE(tp, proto, args, cond, rcucheck) \
|
||||
do { \
|
||||
struct tracepoint_func *it_func_ptr; \
|
||||
void *it_func; \
|
||||
@@ -136,7 +136,11 @@ extern void syscall_unregfunc(void);
|
||||
\
|
||||
if (!(cond)) \
|
||||
return; \
|
||||
prercu; \
|
||||
if (rcucheck) { \
|
||||
if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
|
||||
return; \
|
||||
rcu_irq_enter_irqson(); \
|
||||
} \
|
||||
rcu_read_lock_sched_notrace(); \
|
||||
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
|
||||
if (it_func_ptr) { \
|
||||
@@ -147,20 +151,19 @@ extern void syscall_unregfunc(void);
|
||||
} while ((++it_func_ptr)->func); \
|
||||
} \
|
||||
rcu_read_unlock_sched_notrace(); \
|
||||
postrcu; \
|
||||
if (rcucheck) \
|
||||
rcu_irq_exit_irqson(); \
|
||||
} while (0)
|
||||
|
||||
#ifndef MODULE
|
||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
|
||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
|
||||
static inline void trace_##name##_rcuidle(proto) \
|
||||
{ \
|
||||
if (static_key_false(&__tracepoint_##name.key)) \
|
||||
__DO_TRACE(&__tracepoint_##name, \
|
||||
TP_PROTO(data_proto), \
|
||||
TP_ARGS(data_args), \
|
||||
TP_CONDITION(cond), \
|
||||
rcu_irq_enter_irqson(), \
|
||||
rcu_irq_exit_irqson()); \
|
||||
TP_CONDITION(cond), 1); \
|
||||
}
|
||||
#else
|
||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
|
||||
@@ -186,7 +189,7 @@ extern void syscall_unregfunc(void);
|
||||
__DO_TRACE(&__tracepoint_##name, \
|
||||
TP_PROTO(data_proto), \
|
||||
TP_ARGS(data_args), \
|
||||
TP_CONDITION(cond),,); \
|
||||
TP_CONDITION(cond), 0); \
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
|
||||
rcu_read_lock_sched_notrace(); \
|
||||
rcu_dereference_sched(__tracepoint_##name.funcs);\
|
||||
|
Reference in New Issue
Block a user