Merge tag 'v4.6-rc3' into perf/core, to refresh the tree
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -1437,12 +1437,12 @@ static struct trace_event trace_blk_event = {
|
||||
static int __init init_blk_tracer(void)
|
||||
{
|
||||
if (!register_trace_event(&trace_blk_event)) {
|
||||
pr_warning("Warning: could not register block events\n");
|
||||
pr_warn("Warning: could not register block events\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (register_tracer(&blk_tracer) != 0) {
|
||||
pr_warning("Warning: could not register the block tracer\n");
|
||||
pr_warn("Warning: could not register the block tracer\n");
|
||||
unregister_trace_event(&trace_blk_event);
|
||||
return 1;
|
||||
}
|
||||
|
@@ -13,8 +13,6 @@
|
||||
#include <linux/ctype.h>
|
||||
#include "trace.h"
|
||||
|
||||
static DEFINE_PER_CPU(int, bpf_prog_active);
|
||||
|
||||
/**
|
||||
* trace_call_bpf - invoke BPF program
|
||||
* @prog: BPF program
|
||||
@@ -299,6 +297,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
|
||||
return &bpf_perf_event_read_proto;
|
||||
case BPF_FUNC_perf_event_output:
|
||||
return &bpf_perf_event_output_proto;
|
||||
case BPF_FUNC_get_stackid:
|
||||
return &bpf_get_stackid_proto;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -1030,8 +1030,7 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
|
||||
for_each_possible_cpu(cpu) {
|
||||
stat = &per_cpu(ftrace_profile_stats, cpu);
|
||||
|
||||
/* allocate enough for function name + cpu number */
|
||||
name = kmalloc(32, GFP_KERNEL);
|
||||
name = kasprintf(GFP_KERNEL, "function%d", cpu);
|
||||
if (!name) {
|
||||
/*
|
||||
* The files created are permanent, if something happens
|
||||
@@ -1043,7 +1042,6 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
|
||||
return;
|
||||
}
|
||||
stat->stat = function_stats;
|
||||
snprintf(name, 32, "function%d", cpu);
|
||||
stat->stat.name = name;
|
||||
ret = register_stat_tracer(&stat->stat);
|
||||
if (ret) {
|
||||
@@ -1058,8 +1056,7 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
|
||||
entry = tracefs_create_file("function_profile_enabled", 0644,
|
||||
d_tracer, NULL, &ftrace_profile_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create tracefs "
|
||||
"'function_profile_enabled' entry\n");
|
||||
pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
|
||||
}
|
||||
|
||||
#else /* CONFIG_FUNCTION_PROFILER */
|
||||
@@ -1610,7 +1607,7 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
|
||||
return keep_regs;
|
||||
}
|
||||
|
||||
static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
int filter_hash,
|
||||
bool inc)
|
||||
{
|
||||
@@ -1618,12 +1615,13 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
struct ftrace_hash *other_hash;
|
||||
struct ftrace_page *pg;
|
||||
struct dyn_ftrace *rec;
|
||||
bool update = false;
|
||||
int count = 0;
|
||||
int all = 0;
|
||||
|
||||
/* Only update if the ops has been registered */
|
||||
if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
|
||||
return;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* In the filter_hash case:
|
||||
@@ -1650,7 +1648,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
* then there's nothing to do.
|
||||
*/
|
||||
if (ftrace_hash_empty(hash))
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
@@ -1694,7 +1692,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
if (inc) {
|
||||
rec->flags++;
|
||||
if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
|
||||
return;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If there's only a single callback registered to a
|
||||
@@ -1720,7 +1718,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
rec->flags |= FTRACE_FL_REGS;
|
||||
} else {
|
||||
if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
|
||||
return;
|
||||
return false;
|
||||
rec->flags--;
|
||||
|
||||
/*
|
||||
@@ -1753,22 +1751,28 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
*/
|
||||
}
|
||||
count++;
|
||||
|
||||
/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
|
||||
update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
|
||||
|
||||
/* Shortcut, if we handled all records, we are done. */
|
||||
if (!all && count == hash->count)
|
||||
return;
|
||||
return update;
|
||||
} while_for_each_ftrace_rec();
|
||||
|
||||
return update;
|
||||
}
|
||||
|
||||
static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
|
||||
static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
|
||||
int filter_hash)
|
||||
{
|
||||
__ftrace_hash_rec_update(ops, filter_hash, 0);
|
||||
return __ftrace_hash_rec_update(ops, filter_hash, 0);
|
||||
}
|
||||
|
||||
static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
|
||||
static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
|
||||
int filter_hash)
|
||||
{
|
||||
__ftrace_hash_rec_update(ops, filter_hash, 1);
|
||||
return __ftrace_hash_rec_update(ops, filter_hash, 1);
|
||||
}
|
||||
|
||||
static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
|
||||
@@ -2314,8 +2318,8 @@ unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
|
||||
if (rec->flags & FTRACE_FL_TRAMP_EN) {
|
||||
ops = ftrace_find_tramp_ops_curr(rec);
|
||||
if (FTRACE_WARN_ON(!ops)) {
|
||||
pr_warning("Bad trampoline accounting at: %p (%pS)\n",
|
||||
(void *)rec->ip, (void *)rec->ip);
|
||||
pr_warn("Bad trampoline accounting at: %p (%pS)\n",
|
||||
(void *)rec->ip, (void *)rec->ip);
|
||||
/* Ftrace is shutting down, return anything */
|
||||
return (unsigned long)FTRACE_ADDR;
|
||||
}
|
||||
@@ -2644,7 +2648,6 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
|
||||
return ret;
|
||||
|
||||
ftrace_start_up++;
|
||||
command |= FTRACE_UPDATE_CALLS;
|
||||
|
||||
/*
|
||||
* Note that ftrace probes uses this to start up
|
||||
@@ -2665,7 +2668,8 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ftrace_hash_rec_enable(ops, 1);
|
||||
if (ftrace_hash_rec_enable(ops, 1))
|
||||
command |= FTRACE_UPDATE_CALLS;
|
||||
|
||||
ftrace_startup_enable(command);
|
||||
|
||||
@@ -2695,12 +2699,12 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
|
||||
/* Disabling ipmodify never fails */
|
||||
ftrace_hash_ipmodify_disable(ops);
|
||||
ftrace_hash_rec_disable(ops, 1);
|
||||
|
||||
if (ftrace_hash_rec_disable(ops, 1))
|
||||
command |= FTRACE_UPDATE_CALLS;
|
||||
|
||||
ops->flags &= ~FTRACE_OPS_FL_ENABLED;
|
||||
|
||||
command |= FTRACE_UPDATE_CALLS;
|
||||
|
||||
if (saved_ftrace_func != ftrace_trace_function) {
|
||||
saved_ftrace_func = ftrace_trace_function;
|
||||
command |= FTRACE_UPDATE_TRACE_FUNC;
|
||||
|
@@ -15,4 +15,5 @@
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle);
|
||||
|
||||
|
@@ -74,11 +74,6 @@ static struct tracer_opt dummy_tracer_opt[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct tracer_flags dummy_tracer_flags = {
|
||||
.val = 0,
|
||||
.opts = dummy_tracer_opt
|
||||
};
|
||||
|
||||
static int
|
||||
dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
||||
{
|
||||
@@ -1258,12 +1253,22 @@ int __init register_tracer(struct tracer *type)
|
||||
|
||||
if (!type->set_flag)
|
||||
type->set_flag = &dummy_set_flag;
|
||||
if (!type->flags)
|
||||
type->flags = &dummy_tracer_flags;
|
||||
else
|
||||
if (!type->flags) {
|
||||
/*allocate a dummy tracer_flags*/
|
||||
type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
|
||||
if (!type->flags) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
type->flags->val = 0;
|
||||
type->flags->opts = dummy_tracer_opt;
|
||||
} else
|
||||
if (!type->flags->opts)
|
||||
type->flags->opts = dummy_tracer_opt;
|
||||
|
||||
/* store the tracer for __set_tracer_option */
|
||||
type->flags->trace = type;
|
||||
|
||||
ret = run_tracer_selftest(type);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@@ -1659,6 +1664,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
|
||||
#else
|
||||
TRACE_FLAG_IRQS_NOSUPPORT |
|
||||
#endif
|
||||
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
|
||||
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
|
||||
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
|
||||
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
|
||||
@@ -2071,20 +2077,20 @@ void trace_printk_init_buffers(void)
|
||||
|
||||
/* trace_printk() is for debug use only. Don't use it in production. */
|
||||
|
||||
pr_warning("\n");
|
||||
pr_warning("**********************************************************\n");
|
||||
pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
|
||||
pr_warning("** **\n");
|
||||
pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
|
||||
pr_warning("** **\n");
|
||||
pr_warning("** This means that this is a DEBUG kernel and it is **\n");
|
||||
pr_warning("** unsafe for production use. **\n");
|
||||
pr_warning("** **\n");
|
||||
pr_warning("** If you see this message and you are not debugging **\n");
|
||||
pr_warning("** the kernel, report this immediately to your vendor! **\n");
|
||||
pr_warning("** **\n");
|
||||
pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
|
||||
pr_warning("**********************************************************\n");
|
||||
pr_warn("\n");
|
||||
pr_warn("**********************************************************\n");
|
||||
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
|
||||
pr_warn("** **\n");
|
||||
pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
|
||||
pr_warn("** **\n");
|
||||
pr_warn("** This means that this is a DEBUG kernel and it is **\n");
|
||||
pr_warn("** unsafe for production use. **\n");
|
||||
pr_warn("** **\n");
|
||||
pr_warn("** If you see this message and you are not debugging **\n");
|
||||
pr_warn("** the kernel, report this immediately to your vendor! **\n");
|
||||
pr_warn("** **\n");
|
||||
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
|
||||
pr_warn("**********************************************************\n");
|
||||
|
||||
/* Expand the buffers to set size */
|
||||
tracing_update_buffers();
|
||||
@@ -3505,7 +3511,7 @@ static int __set_tracer_option(struct trace_array *tr,
|
||||
struct tracer_flags *tracer_flags,
|
||||
struct tracer_opt *opts, int neg)
|
||||
{
|
||||
struct tracer *trace = tr->current_trace;
|
||||
struct tracer *trace = tracer_flags->trace;
|
||||
int ret;
|
||||
|
||||
ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
|
||||
@@ -4101,7 +4107,7 @@ trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
|
||||
*/
|
||||
map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
|
||||
if (!map_array) {
|
||||
pr_warning("Unable to allocate trace enum mapping\n");
|
||||
pr_warn("Unable to allocate trace enum mapping\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -4949,7 +4955,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
||||
|
||||
spd.nr_pages = i;
|
||||
|
||||
ret = splice_to_pipe(pipe, &spd);
|
||||
if (i)
|
||||
ret = splice_to_pipe(pipe, &spd);
|
||||
else
|
||||
ret = 0;
|
||||
out:
|
||||
splice_shrink_spd(&spd);
|
||||
return ret;
|
||||
@@ -6131,7 +6140,7 @@ tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
|
||||
snprintf(cpu_dir, 30, "cpu%ld", cpu);
|
||||
d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
|
||||
if (!d_cpu) {
|
||||
pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
|
||||
pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -6318,7 +6327,7 @@ struct dentry *trace_create_file(const char *name,
|
||||
|
||||
ret = tracefs_create_file(name, mode, parent, data, fops);
|
||||
if (!ret)
|
||||
pr_warning("Could not create tracefs '%s' entry\n", name);
|
||||
pr_warn("Could not create tracefs '%s' entry\n", name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -6337,7 +6346,7 @@ static struct dentry *trace_options_init_dentry(struct trace_array *tr)
|
||||
|
||||
tr->options = tracefs_create_dir("options", d_tracer);
|
||||
if (!tr->options) {
|
||||
pr_warning("Could not create tracefs directory 'options'\n");
|
||||
pr_warn("Could not create tracefs directory 'options'\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -6391,11 +6400,8 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
|
||||
return;
|
||||
|
||||
for (i = 0; i < tr->nr_topts; i++) {
|
||||
/*
|
||||
* Check if these flags have already been added.
|
||||
* Some tracers share flags.
|
||||
*/
|
||||
if (tr->topts[i].tracer->flags == tracer->flags)
|
||||
/* Make sure there's no duplicate flags. */
|
||||
if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -7248,8 +7254,8 @@ __init static int tracer_alloc_buffers(void)
|
||||
if (trace_boot_clock) {
|
||||
ret = tracing_set_clock(&global_trace, trace_boot_clock);
|
||||
if (ret < 0)
|
||||
pr_warning("Trace clock %s not defined, going back to default\n",
|
||||
trace_boot_clock);
|
||||
pr_warn("Trace clock %s not defined, going back to default\n",
|
||||
trace_boot_clock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -125,6 +125,7 @@ enum trace_flag_type {
|
||||
TRACE_FLAG_HARDIRQ = 0x08,
|
||||
TRACE_FLAG_SOFTIRQ = 0x10,
|
||||
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
|
||||
TRACE_FLAG_NMI = 0x40,
|
||||
};
|
||||
|
||||
#define TRACE_BUF_SIZE 1024
|
||||
@@ -345,6 +346,7 @@ struct tracer_opt {
|
||||
struct tracer_flags {
|
||||
u32 val;
|
||||
struct tracer_opt *opts;
|
||||
struct tracer *trace;
|
||||
};
|
||||
|
||||
/* Makes more easy to define a tracer opt */
|
||||
@@ -1111,6 +1113,18 @@ struct filter_pred {
|
||||
unsigned short right;
|
||||
};
|
||||
|
||||
static inline bool is_string_field(struct ftrace_event_field *field)
|
||||
{
|
||||
return field->filter_type == FILTER_DYN_STRING ||
|
||||
field->filter_type == FILTER_STATIC_STRING ||
|
||||
field->filter_type == FILTER_PTR_STRING;
|
||||
}
|
||||
|
||||
static inline bool is_function_field(struct ftrace_event_field *field)
|
||||
{
|
||||
return field->filter_type == FILTER_TRACE_FN;
|
||||
}
|
||||
|
||||
extern enum regex_type
|
||||
filter_parse_regex(char *buff, int len, char **search, int *not);
|
||||
extern void print_event_filter(struct trace_event_file *file,
|
||||
@@ -1159,9 +1173,24 @@ struct event_trigger_data {
|
||||
struct event_filter __rcu *filter;
|
||||
char *filter_str;
|
||||
void *private_data;
|
||||
bool paused;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
extern void trigger_data_free(struct event_trigger_data *data);
|
||||
extern int event_trigger_init(struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data);
|
||||
extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
|
||||
int trigger_enable);
|
||||
extern void update_cond_flag(struct trace_event_file *file);
|
||||
extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *test,
|
||||
struct trace_event_file *file);
|
||||
extern int set_trigger_filter(char *filter_str,
|
||||
struct event_trigger_data *trigger_data,
|
||||
struct trace_event_file *file);
|
||||
extern int register_event_command(struct event_command *cmd);
|
||||
|
||||
/**
|
||||
* struct event_trigger_ops - callbacks for trace event triggers
|
||||
*
|
||||
@@ -1174,7 +1203,8 @@ struct event_trigger_data {
|
||||
* @func: The trigger 'probe' function called when the triggering
|
||||
* event occurs. The data passed into this callback is the data
|
||||
* that was supplied to the event_command @reg() function that
|
||||
* registered the trigger (see struct event_command).
|
||||
* registered the trigger (see struct event_command) along with
|
||||
* the trace record, rec.
|
||||
*
|
||||
* @init: An optional initialization function called for the trigger
|
||||
* when the trigger is registered (via the event_command reg()
|
||||
@@ -1199,7 +1229,8 @@ struct event_trigger_data {
|
||||
* (see trace_event_triggers.c).
|
||||
*/
|
||||
struct event_trigger_ops {
|
||||
void (*func)(struct event_trigger_data *data);
|
||||
void (*func)(struct event_trigger_data *data,
|
||||
void *rec);
|
||||
int (*init)(struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data);
|
||||
void (*free)(struct event_trigger_ops *ops,
|
||||
@@ -1243,27 +1274,10 @@ struct event_trigger_ops {
|
||||
* values are defined by adding new values to the trigger_type
|
||||
* enum in include/linux/trace_events.h.
|
||||
*
|
||||
* @post_trigger: A flag that says whether or not this command needs
|
||||
* to have its action delayed until after the current event has
|
||||
* been closed. Some triggers need to avoid being invoked while
|
||||
* an event is currently in the process of being logged, since
|
||||
* the trigger may itself log data into the trace buffer. Thus
|
||||
* we make sure the current event is committed before invoking
|
||||
* those triggers. To do that, the trigger invocation is split
|
||||
* in two - the first part checks the filter using the current
|
||||
* trace record; if a command has the @post_trigger flag set, it
|
||||
* sets a bit for itself in the return value, otherwise it
|
||||
* directly invokes the trigger. Once all commands have been
|
||||
* either invoked or set their return flag, the current record is
|
||||
* either committed or discarded. At that point, if any commands
|
||||
* have deferred their triggers, those commands are finally
|
||||
* invoked following the close of the current event. In other
|
||||
* words, if the event_trigger_ops @func() probe implementation
|
||||
* itself logs to the trace buffer, this flag should be set,
|
||||
* otherwise it can be left unspecified.
|
||||
* @flags: See the enum event_command_flags below.
|
||||
*
|
||||
* All the methods below, except for @set_filter(), must be
|
||||
* implemented.
|
||||
* All the methods below, except for @set_filter() and @unreg_all(),
|
||||
* must be implemented.
|
||||
*
|
||||
* @func: The callback function responsible for parsing and
|
||||
* registering the trigger written to the 'trigger' file by the
|
||||
@@ -1288,6 +1302,10 @@ struct event_trigger_ops {
|
||||
* This is usually implemented by the generic utility function
|
||||
* @unregister_trigger() (see trace_event_triggers.c).
|
||||
*
|
||||
* @unreg_all: An optional function called to remove all the triggers
|
||||
* from the list of triggers associated with the event. Called
|
||||
* when a trigger file is opened in truncate mode.
|
||||
*
|
||||
* @set_filter: An optional function called to parse and set a filter
|
||||
* for the trigger. If no @set_filter() method is set for the
|
||||
* event command, filters set by the user for the command will be
|
||||
@@ -1301,7 +1319,7 @@ struct event_command {
|
||||
struct list_head list;
|
||||
char *name;
|
||||
enum event_trigger_type trigger_type;
|
||||
bool post_trigger;
|
||||
int flags;
|
||||
int (*func)(struct event_command *cmd_ops,
|
||||
struct trace_event_file *file,
|
||||
char *glob, char *cmd, char *params);
|
||||
@@ -1313,12 +1331,56 @@ struct event_command {
|
||||
struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data,
|
||||
struct trace_event_file *file);
|
||||
void (*unreg_all)(struct trace_event_file *file);
|
||||
int (*set_filter)(char *filter_str,
|
||||
struct event_trigger_data *data,
|
||||
struct trace_event_file *file);
|
||||
struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
|
||||
};
|
||||
|
||||
/**
|
||||
* enum event_command_flags - flags for struct event_command
|
||||
*
|
||||
* @POST_TRIGGER: A flag that says whether or not this command needs
|
||||
* to have its action delayed until after the current event has
|
||||
* been closed. Some triggers need to avoid being invoked while
|
||||
* an event is currently in the process of being logged, since
|
||||
* the trigger may itself log data into the trace buffer. Thus
|
||||
* we make sure the current event is committed before invoking
|
||||
* those triggers. To do that, the trigger invocation is split
|
||||
* in two - the first part checks the filter using the current
|
||||
* trace record; if a command has the @post_trigger flag set, it
|
||||
* sets a bit for itself in the return value, otherwise it
|
||||
* directly invokes the trigger. Once all commands have been
|
||||
* either invoked or set their return flag, the current record is
|
||||
* either committed or discarded. At that point, if any commands
|
||||
* have deferred their triggers, those commands are finally
|
||||
* invoked following the close of the current event. In other
|
||||
* words, if the event_trigger_ops @func() probe implementation
|
||||
* itself logs to the trace buffer, this flag should be set,
|
||||
* otherwise it can be left unspecified.
|
||||
*
|
||||
* @NEEDS_REC: A flag that says whether or not this command needs
|
||||
* access to the trace record in order to perform its function,
|
||||
* regardless of whether or not it has a filter associated with
|
||||
* it (filters make a trigger require access to the trace record
|
||||
* but are not always present).
|
||||
*/
|
||||
enum event_command_flags {
|
||||
EVENT_CMD_FL_POST_TRIGGER = 1,
|
||||
EVENT_CMD_FL_NEEDS_REC = 2,
|
||||
};
|
||||
|
||||
static inline bool event_command_post_trigger(struct event_command *cmd_ops)
|
||||
{
|
||||
return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
|
||||
}
|
||||
|
||||
static inline bool event_command_needs_rec(struct event_command *cmd_ops)
|
||||
{
|
||||
return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
|
||||
}
|
||||
|
||||
extern int trace_event_enable_disable(struct trace_event_file *file,
|
||||
int enable, int soft_disable);
|
||||
extern int tracing_alloc_snapshot(void);
|
||||
@@ -1365,8 +1427,13 @@ int perf_ftrace_event_register(struct trace_event_call *call,
|
||||
|
||||
#ifdef CONFIG_FTRACE_SYSCALLS
|
||||
void init_ftrace_syscalls(void);
|
||||
const char *get_syscall_name(int syscall);
|
||||
#else
|
||||
static inline void init_ftrace_syscalls(void) { }
|
||||
static inline const char *get_syscall_name(int syscall)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
|
@@ -961,18 +961,6 @@ int filter_assign_type(const char *type)
|
||||
return FILTER_OTHER;
|
||||
}
|
||||
|
||||
static bool is_function_field(struct ftrace_event_field *field)
|
||||
{
|
||||
return field->filter_type == FILTER_TRACE_FN;
|
||||
}
|
||||
|
||||
static bool is_string_field(struct ftrace_event_field *field)
|
||||
{
|
||||
return field->filter_type == FILTER_DYN_STRING ||
|
||||
field->filter_type == FILTER_STATIC_STRING ||
|
||||
field->filter_type == FILTER_PTR_STRING;
|
||||
}
|
||||
|
||||
static bool is_legal_op(struct ftrace_event_field *field, int op)
|
||||
{
|
||||
if (is_string_field(field) &&
|
||||
|
@@ -28,8 +28,7 @@
|
||||
static LIST_HEAD(trigger_commands);
|
||||
static DEFINE_MUTEX(trigger_cmd_mutex);
|
||||
|
||||
static void
|
||||
trigger_data_free(struct event_trigger_data *data)
|
||||
void trigger_data_free(struct event_trigger_data *data)
|
||||
{
|
||||
if (data->cmd_ops->set_filter)
|
||||
data->cmd_ops->set_filter(NULL, data, NULL);
|
||||
@@ -73,18 +72,20 @@ event_triggers_call(struct trace_event_file *file, void *rec)
|
||||
return tt;
|
||||
|
||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
||||
if (data->paused)
|
||||
continue;
|
||||
if (!rec) {
|
||||
data->ops->func(data);
|
||||
data->ops->func(data, rec);
|
||||
continue;
|
||||
}
|
||||
filter = rcu_dereference_sched(data->filter);
|
||||
if (filter && !filter_match_preds(filter, rec))
|
||||
continue;
|
||||
if (data->cmd_ops->post_trigger) {
|
||||
if (event_command_post_trigger(data->cmd_ops)) {
|
||||
tt |= data->cmd_ops->trigger_type;
|
||||
continue;
|
||||
}
|
||||
data->ops->func(data);
|
||||
data->ops->func(data, rec);
|
||||
}
|
||||
return tt;
|
||||
}
|
||||
@@ -94,6 +95,7 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
|
||||
* event_triggers_post_call - Call 'post_triggers' for a trace event
|
||||
* @file: The trace_event_file associated with the event
|
||||
* @tt: enum event_trigger_type containing a set bit for each trigger to invoke
|
||||
* @rec: The trace entry for the event
|
||||
*
|
||||
* For each trigger associated with an event, invoke the trigger
|
||||
* function registered with the associated trigger command, if the
|
||||
@@ -104,13 +106,16 @@ EXPORT_SYMBOL_GPL(event_triggers_call);
|
||||
*/
|
||||
void
|
||||
event_triggers_post_call(struct trace_event_file *file,
|
||||
enum event_trigger_type tt)
|
||||
enum event_trigger_type tt,
|
||||
void *rec)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
|
||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
||||
if (data->paused)
|
||||
continue;
|
||||
if (data->cmd_ops->trigger_type & tt)
|
||||
data->ops->func(data);
|
||||
data->ops->func(data, rec);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(event_triggers_post_call);
|
||||
@@ -188,6 +193,19 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
(file->f_flags & O_TRUNC)) {
|
||||
struct trace_event_file *event_file;
|
||||
struct event_command *p;
|
||||
|
||||
event_file = event_file_data(file);
|
||||
|
||||
list_for_each_entry(p, &trigger_commands, list) {
|
||||
if (p->unreg_all)
|
||||
p->unreg_all(event_file);
|
||||
}
|
||||
}
|
||||
|
||||
if (file->f_mode & FMODE_READ) {
|
||||
ret = seq_open(file, &event_triggers_seq_ops);
|
||||
if (!ret) {
|
||||
@@ -306,7 +324,7 @@ const struct file_operations event_trigger_fops = {
|
||||
* Currently we only register event commands from __init, so mark this
|
||||
* __init too.
|
||||
*/
|
||||
static __init int register_event_command(struct event_command *cmd)
|
||||
__init int register_event_command(struct event_command *cmd)
|
||||
{
|
||||
struct event_command *p;
|
||||
int ret = 0;
|
||||
@@ -395,9 +413,8 @@ event_trigger_print(const char *name, struct seq_file *m,
|
||||
*
|
||||
* Return: 0 on success, errno otherwise
|
||||
*/
|
||||
static int
|
||||
event_trigger_init(struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data)
|
||||
int event_trigger_init(struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *data)
|
||||
{
|
||||
data->ref++;
|
||||
return 0;
|
||||
@@ -425,8 +442,8 @@ event_trigger_free(struct event_trigger_ops *ops,
|
||||
trigger_data_free(data);
|
||||
}
|
||||
|
||||
static int trace_event_trigger_enable_disable(struct trace_event_file *file,
|
||||
int trigger_enable)
|
||||
int trace_event_trigger_enable_disable(struct trace_event_file *file,
|
||||
int trigger_enable)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@@ -483,13 +500,14 @@ clear_event_triggers(struct trace_array *tr)
|
||||
* its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
|
||||
* cleared.
|
||||
*/
|
||||
static void update_cond_flag(struct trace_event_file *file)
|
||||
void update_cond_flag(struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
bool set_cond = false;
|
||||
|
||||
list_for_each_entry_rcu(data, &file->triggers, list) {
|
||||
if (data->filter || data->cmd_ops->post_trigger) {
|
||||
if (data->filter || event_command_post_trigger(data->cmd_ops) ||
|
||||
event_command_needs_rec(data->cmd_ops)) {
|
||||
set_cond = true;
|
||||
break;
|
||||
}
|
||||
@@ -560,9 +578,9 @@ out:
|
||||
* Usually used directly as the @unreg method in event command
|
||||
* implementations.
|
||||
*/
|
||||
static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *test,
|
||||
struct trace_event_file *file)
|
||||
void unregister_trigger(char *glob, struct event_trigger_ops *ops,
|
||||
struct event_trigger_data *test,
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
bool unregistered = false;
|
||||
@@ -696,9 +714,9 @@ event_trigger_callback(struct event_command *cmd_ops,
|
||||
*
|
||||
* Return: 0 on success, errno otherwise
|
||||
*/
|
||||
static int set_trigger_filter(char *filter_str,
|
||||
struct event_trigger_data *trigger_data,
|
||||
struct trace_event_file *file)
|
||||
int set_trigger_filter(char *filter_str,
|
||||
struct event_trigger_data *trigger_data,
|
||||
struct trace_event_file *file)
|
||||
{
|
||||
struct event_trigger_data *data = trigger_data;
|
||||
struct event_filter *filter = NULL, *tmp;
|
||||
@@ -747,7 +765,7 @@ static int set_trigger_filter(char *filter_str,
|
||||
}
|
||||
|
||||
static void
|
||||
traceon_trigger(struct event_trigger_data *data)
|
||||
traceon_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (tracing_is_on())
|
||||
return;
|
||||
@@ -756,7 +774,7 @@ traceon_trigger(struct event_trigger_data *data)
|
||||
}
|
||||
|
||||
static void
|
||||
traceon_count_trigger(struct event_trigger_data *data)
|
||||
traceon_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (tracing_is_on())
|
||||
return;
|
||||
@@ -771,7 +789,7 @@ traceon_count_trigger(struct event_trigger_data *data)
|
||||
}
|
||||
|
||||
static void
|
||||
traceoff_trigger(struct event_trigger_data *data)
|
||||
traceoff_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (!tracing_is_on())
|
||||
return;
|
||||
@@ -780,7 +798,7 @@ traceoff_trigger(struct event_trigger_data *data)
|
||||
}
|
||||
|
||||
static void
|
||||
traceoff_count_trigger(struct event_trigger_data *data)
|
||||
traceoff_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (!tracing_is_on())
|
||||
return;
|
||||
@@ -876,13 +894,13 @@ static struct event_command trigger_traceoff_cmd = {
|
||||
|
||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||
static void
|
||||
snapshot_trigger(struct event_trigger_data *data)
|
||||
snapshot_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
tracing_snapshot();
|
||||
}
|
||||
|
||||
static void
|
||||
snapshot_count_trigger(struct event_trigger_data *data)
|
||||
snapshot_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (!data->count)
|
||||
return;
|
||||
@@ -890,7 +908,7 @@ snapshot_count_trigger(struct event_trigger_data *data)
|
||||
if (data->count != -1)
|
||||
(data->count)--;
|
||||
|
||||
snapshot_trigger(data);
|
||||
snapshot_trigger(data, rec);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -969,13 +987,13 @@ static __init int register_trigger_snapshot_cmd(void) { return 0; }
|
||||
#define STACK_SKIP 3
|
||||
|
||||
static void
|
||||
stacktrace_trigger(struct event_trigger_data *data)
|
||||
stacktrace_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
trace_dump_stack(STACK_SKIP);
|
||||
}
|
||||
|
||||
static void
|
||||
stacktrace_count_trigger(struct event_trigger_data *data)
|
||||
stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
if (!data->count)
|
||||
return;
|
||||
@@ -983,7 +1001,7 @@ stacktrace_count_trigger(struct event_trigger_data *data)
|
||||
if (data->count != -1)
|
||||
(data->count)--;
|
||||
|
||||
stacktrace_trigger(data);
|
||||
stacktrace_trigger(data, rec);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -1017,7 +1035,7 @@ stacktrace_get_trigger_ops(char *cmd, char *param)
|
||||
static struct event_command trigger_stacktrace_cmd = {
|
||||
.name = "stacktrace",
|
||||
.trigger_type = ETT_STACKTRACE,
|
||||
.post_trigger = true,
|
||||
.flags = EVENT_CMD_FL_POST_TRIGGER,
|
||||
.func = event_trigger_callback,
|
||||
.reg = register_trigger,
|
||||
.unreg = unregister_trigger,
|
||||
@@ -1054,7 +1072,7 @@ struct enable_trigger_data {
|
||||
};
|
||||
|
||||
static void
|
||||
event_enable_trigger(struct event_trigger_data *data)
|
||||
event_enable_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
struct enable_trigger_data *enable_data = data->private_data;
|
||||
|
||||
@@ -1065,7 +1083,7 @@ event_enable_trigger(struct event_trigger_data *data)
|
||||
}
|
||||
|
||||
static void
|
||||
event_enable_count_trigger(struct event_trigger_data *data)
|
||||
event_enable_count_trigger(struct event_trigger_data *data, void *rec)
|
||||
{
|
||||
struct enable_trigger_data *enable_data = data->private_data;
|
||||
|
||||
@@ -1079,7 +1097,7 @@ event_enable_count_trigger(struct event_trigger_data *data)
|
||||
if (data->count != -1)
|
||||
(data->count)--;
|
||||
|
||||
event_enable_trigger(data);
|
||||
event_enable_trigger(data, rec);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@@ -219,6 +219,8 @@ static void tracing_stop_function_trace(struct trace_array *tr)
|
||||
unregister_ftrace_function(tr->ops);
|
||||
}
|
||||
|
||||
static struct tracer function_trace;
|
||||
|
||||
static int
|
||||
func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
||||
{
|
||||
@@ -228,6 +230,10 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
||||
if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
|
||||
break;
|
||||
|
||||
/* We can change this flag when not running. */
|
||||
if (tr->current_trace != &function_trace)
|
||||
break;
|
||||
|
||||
unregister_ftrace_function(tr->ops);
|
||||
|
||||
if (set) {
|
||||
|
@@ -8,6 +8,7 @@
|
||||
*/
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
@@ -1350,7 +1351,7 @@ void graph_trace_open(struct trace_iterator *iter)
|
||||
out_err_free:
|
||||
kfree(data);
|
||||
out_err:
|
||||
pr_warning("function graph tracer: not enough memory\n");
|
||||
pr_warn("function graph tracer: not enough memory\n");
|
||||
}
|
||||
|
||||
void graph_trace_close(struct trace_iterator *iter)
|
||||
@@ -1468,12 +1469,12 @@ static __init int init_graph_trace(void)
|
||||
max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
|
||||
|
||||
if (!register_trace_event(&graph_trace_entry_event)) {
|
||||
pr_warning("Warning: could not register graph trace events\n");
|
||||
pr_warn("Warning: could not register graph trace events\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!register_trace_event(&graph_trace_ret_event)) {
|
||||
pr_warning("Warning: could not register graph trace events\n");
|
||||
pr_warn("Warning: could not register graph trace events\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@@ -109,8 +109,12 @@ static int func_prolog_dec(struct trace_array *tr,
|
||||
return 0;
|
||||
|
||||
local_save_flags(*flags);
|
||||
/* slight chance to get a false positive on tracing_cpu */
|
||||
if (!irqs_disabled_flags(*flags))
|
||||
/*
|
||||
* Slight chance to get a false positive on tracing_cpu,
|
||||
* although I'm starting to think there isn't a chance.
|
||||
* Leave this for now just to be paranoid.
|
||||
*/
|
||||
if (!irqs_disabled_flags(*flags) && !preempt_count())
|
||||
return 0;
|
||||
|
||||
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
||||
@@ -622,7 +626,6 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
|
||||
irqsoff_trace = tr;
|
||||
/* make sure that the tracer is visible */
|
||||
smp_wmb();
|
||||
tracing_reset_online_cpus(&tr->trace_buffer);
|
||||
|
||||
ftrace_init_array_ops(tr, irqsoff_tracer_call);
|
||||
|
||||
|
@@ -459,16 +459,14 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
|
||||
if (ret == 0)
|
||||
tk->tp.flags |= TP_FLAG_REGISTERED;
|
||||
else {
|
||||
pr_warning("Could not insert probe at %s+%lu: %d\n",
|
||||
trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
|
||||
pr_warn("Could not insert probe at %s+%lu: %d\n",
|
||||
trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
|
||||
if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
|
||||
pr_warning("This probe might be able to register after"
|
||||
"target module is loaded. Continue.\n");
|
||||
pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
|
||||
ret = 0;
|
||||
} else if (ret == -EILSEQ) {
|
||||
pr_warning("Probing address(0x%p) is not an "
|
||||
"instruction boundary.\n",
|
||||
tk->rp.kp.addr);
|
||||
pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
|
||||
tk->rp.kp.addr);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
@@ -529,7 +527,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
||||
/* Register new event */
|
||||
ret = register_kprobe_event(tk);
|
||||
if (ret) {
|
||||
pr_warning("Failed to register probe event(%d)\n", ret);
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
}
|
||||
|
||||
@@ -564,10 +562,9 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
|
||||
__unregister_trace_kprobe(tk);
|
||||
ret = __register_trace_kprobe(tk);
|
||||
if (ret)
|
||||
pr_warning("Failed to re-register probe %s on"
|
||||
"%s: %d\n",
|
||||
trace_event_name(&tk->tp.call),
|
||||
mod->name, ret);
|
||||
pr_warn("Failed to re-register probe %s on %s: %d\n",
|
||||
trace_event_name(&tk->tp.call),
|
||||
mod->name, ret);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&probe_lock);
|
||||
@@ -1336,16 +1333,14 @@ static __init int init_kprobe_trace(void)
|
||||
|
||||
/* Event list interface */
|
||||
if (!entry)
|
||||
pr_warning("Could not create tracefs "
|
||||
"'kprobe_events' entry\n");
|
||||
pr_warn("Could not create tracefs 'kprobe_events' entry\n");
|
||||
|
||||
/* Profile interface */
|
||||
entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
|
||||
NULL, &kprobe_profile_ops);
|
||||
|
||||
if (!entry)
|
||||
pr_warning("Could not create tracefs "
|
||||
"'kprobe_profile' entry\n");
|
||||
pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(init_kprobe_trace);
|
||||
|
@@ -146,7 +146,7 @@ static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
|
||||
/* XXX: This is later than where events were lost. */
|
||||
trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
|
||||
if (!overrun_detected)
|
||||
pr_warning("mmiotrace has lost events.\n");
|
||||
pr_warn("mmiotrace has lost events\n");
|
||||
overrun_detected = true;
|
||||
goto print_out;
|
||||
}
|
||||
|
@@ -56,7 +56,7 @@ static void nop_trace_reset(struct trace_array *tr)
|
||||
}
|
||||
|
||||
/* It only serves as a signal handler and a callback to
|
||||
* accept or refuse tthe setting of a flag.
|
||||
* accept or refuse the setting of a flag.
|
||||
* If you don't implement it, then the flag setting will be
|
||||
* automatically accepted.
|
||||
*/
|
||||
@@ -75,7 +75,7 @@ static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
|
||||
|
||||
if (bit == TRACE_NOP_OPT_REFUSE) {
|
||||
printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse."
|
||||
"Now cat trace_options to see the result\n",
|
||||
" Now cat trace_options to see the result\n",
|
||||
set);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -389,7 +389,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
||||
char irqs_off;
|
||||
int hardirq;
|
||||
int softirq;
|
||||
int nmi;
|
||||
|
||||
nmi = entry->flags & TRACE_FLAG_NMI;
|
||||
hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
|
||||
softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
|
||||
|
||||
@@ -415,10 +417,12 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
||||
}
|
||||
|
||||
hardsoft_irq =
|
||||
(nmi && hardirq) ? 'Z' :
|
||||
nmi ? 'z' :
|
||||
(hardirq && softirq) ? 'H' :
|
||||
hardirq ? 'h' :
|
||||
softirq ? 's' :
|
||||
'.';
|
||||
hardirq ? 'h' :
|
||||
softirq ? 's' :
|
||||
'.' ;
|
||||
|
||||
trace_seq_printf(s, "%c%c%c",
|
||||
irqs_off, need_resched, hardsoft_irq);
|
||||
|
@@ -296,6 +296,9 @@ static int t_show(struct seq_file *m, void *v)
|
||||
const char *str = *fmt;
|
||||
int i;
|
||||
|
||||
if (!*fmt)
|
||||
return 0;
|
||||
|
||||
seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
|
||||
|
||||
/*
|
||||
|
@@ -636,8 +636,8 @@ ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer,
|
||||
*tmp = '\0';
|
||||
size = tmp - kbuf + 1;
|
||||
} else if (done + size < count) {
|
||||
pr_warning("Line length is too long: "
|
||||
"Should be less than %d.", WRITE_BUFSIZE);
|
||||
pr_warn("Line length is too long: Should be less than %d\n",
|
||||
WRITE_BUFSIZE);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@@ -281,8 +281,7 @@ static int tracing_stat_init(void)
|
||||
|
||||
stat_dir = tracefs_create_dir("trace_stat", d_tracing);
|
||||
if (!stat_dir)
|
||||
pr_warning("Could not create tracefs "
|
||||
"'trace_stat' entry\n");
|
||||
pr_warn("Could not create tracefs 'trace_stat' entry\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -106,6 +106,17 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr)
|
||||
return syscalls_metadata[nr];
|
||||
}
|
||||
|
||||
const char *get_syscall_name(int syscall)
|
||||
{
|
||||
struct syscall_metadata *entry;
|
||||
|
||||
entry = syscall_nr_to_meta(syscall);
|
||||
if (!entry)
|
||||
return NULL;
|
||||
|
||||
return entry->name;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_syscall_enter(struct trace_iterator *iter, int flags,
|
||||
struct trace_event *event)
|
||||
|
@@ -334,7 +334,7 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
|
||||
|
||||
ret = register_uprobe_event(tu);
|
||||
if (ret) {
|
||||
pr_warning("Failed to register probe event(%d)\n", ret);
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user