Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
This commit is contained in:
@@ -1255,19 +1255,29 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
|
||||
/*
|
||||
* If we have a symbol_name argument, look it up and add the offset field
|
||||
* to it. This way, we can specify a relative address to a symbol.
|
||||
* This returns encoded errors if it fails to look up symbol or invalid
|
||||
* combination of parameters.
|
||||
*/
|
||||
static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
|
||||
{
|
||||
kprobe_opcode_t *addr = p->addr;
|
||||
|
||||
if ((p->symbol_name && p->addr) ||
|
||||
(!p->symbol_name && !p->addr))
|
||||
goto invalid;
|
||||
|
||||
if (p->symbol_name) {
|
||||
if (addr)
|
||||
return NULL;
|
||||
kprobe_lookup_name(p->symbol_name, addr);
|
||||
if (!addr)
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
if (!addr)
|
||||
return NULL;
|
||||
return (kprobe_opcode_t *)(((char *)addr) + p->offset);
|
||||
addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
|
||||
if (addr)
|
||||
return addr;
|
||||
|
||||
invalid:
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* Check passed kprobe is valid and return kprobe in kprobe_table. */
|
||||
@@ -1311,8 +1321,8 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
kprobe_opcode_t *addr;
|
||||
|
||||
addr = kprobe_addr(p);
|
||||
if (!addr)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(addr))
|
||||
return PTR_ERR(addr);
|
||||
p->addr = addr;
|
||||
|
||||
ret = check_kprobe_rereg(p);
|
||||
@@ -1335,6 +1345,8 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
*/
|
||||
probed_mod = __module_text_address((unsigned long) p->addr);
|
||||
if (probed_mod) {
|
||||
/* Return -ENOENT if fail. */
|
||||
ret = -ENOENT;
|
||||
/*
|
||||
* We must hold a refcount of the probed module while updating
|
||||
* its code to prohibit unexpected unloading.
|
||||
@@ -1351,6 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
|
||||
module_put(probed_mod);
|
||||
goto fail_with_jump_label;
|
||||
}
|
||||
/* ret will be updated by following code */
|
||||
}
|
||||
preempt_enable();
|
||||
jump_label_unlock();
|
||||
@@ -1399,7 +1412,7 @@ out:
|
||||
fail_with_jump_label:
|
||||
preempt_enable();
|
||||
jump_label_unlock();
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_kprobe);
|
||||
|
||||
@@ -1686,8 +1699,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
|
||||
|
||||
if (kretprobe_blacklist_size) {
|
||||
addr = kprobe_addr(&rp->kp);
|
||||
if (!addr)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(addr))
|
||||
return PTR_ERR(addr);
|
||||
|
||||
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
|
||||
if (kretprobe_blacklist[i].addr == addr)
|
||||
|
@@ -88,6 +88,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
|
||||
static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
|
||||
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
||||
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
|
||||
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
||||
static struct ftrace_ops global_ops;
|
||||
@@ -146,9 +147,11 @@ void clear_ftrace_function(void)
|
||||
{
|
||||
ftrace_trace_function = ftrace_stub;
|
||||
__ftrace_trace_function = ftrace_stub;
|
||||
__ftrace_trace_function_delay = ftrace_stub;
|
||||
ftrace_pid_function = ftrace_stub;
|
||||
}
|
||||
|
||||
#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
/*
|
||||
* For those archs that do not test ftrace_trace_stop in their
|
||||
@@ -207,8 +210,13 @@ static void update_ftrace_function(void)
|
||||
|
||||
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
ftrace_trace_function = func;
|
||||
#else
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
/* do not update till all functions have been modified */
|
||||
__ftrace_trace_function_delay = func;
|
||||
#else
|
||||
__ftrace_trace_function = func;
|
||||
#endif
|
||||
ftrace_trace_function = ftrace_test_stop_func;
|
||||
#endif
|
||||
}
|
||||
@@ -1170,8 +1178,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
|
||||
static void
|
||||
ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
|
||||
|
||||
static int
|
||||
ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
ftrace_hash_move(struct ftrace_ops *ops, int enable,
|
||||
struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
{
|
||||
struct ftrace_func_entry *entry;
|
||||
struct hlist_node *tp, *tn;
|
||||
@@ -1181,8 +1195,15 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
unsigned long key;
|
||||
int size = src->count;
|
||||
int bits = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Remove the current set, update the hash and add
|
||||
* them back.
|
||||
*/
|
||||
ftrace_hash_rec_disable(ops, enable);
|
||||
|
||||
/*
|
||||
* If the new source is empty, just free dst and assign it
|
||||
* the empty_hash.
|
||||
@@ -1203,9 +1224,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
if (bits > FTRACE_HASH_MAX_BITS)
|
||||
bits = FTRACE_HASH_MAX_BITS;
|
||||
|
||||
ret = -ENOMEM;
|
||||
new_hash = alloc_ftrace_hash(bits);
|
||||
if (!new_hash)
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
|
||||
size = 1 << src->size_bits;
|
||||
for (i = 0; i < size; i++) {
|
||||
@@ -1224,7 +1246,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
|
||||
rcu_assign_pointer(*dst, new_hash);
|
||||
free_ftrace_hash_rcu(old_hash);
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
out:
|
||||
/*
|
||||
* Enable regardless of ret:
|
||||
* On success, we enable the new hash.
|
||||
* On failure, we re-enable the original hash.
|
||||
*/
|
||||
ftrace_hash_rec_enable(ops, enable);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1584,6 +1615,12 @@ static int __ftrace_modify_code(void *data)
|
||||
{
|
||||
int *command = data;
|
||||
|
||||
/*
|
||||
* Do not call function tracer while we update the code.
|
||||
* We are in stop machine, no worrying about races.
|
||||
*/
|
||||
function_trace_stop++;
|
||||
|
||||
if (*command & FTRACE_ENABLE_CALLS)
|
||||
ftrace_replace_code(1);
|
||||
else if (*command & FTRACE_DISABLE_CALLS)
|
||||
@@ -1597,6 +1634,18 @@ static int __ftrace_modify_code(void *data)
|
||||
else if (*command & FTRACE_STOP_FUNC_RET)
|
||||
ftrace_disable_ftrace_graph_caller();
|
||||
|
||||
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
/*
|
||||
* For archs that call ftrace_test_stop_func(), we must
|
||||
* wait till after we update all the function callers
|
||||
* before we update the callback. This keeps different
|
||||
* ops that record different functions from corrupting
|
||||
* each other.
|
||||
*/
|
||||
__ftrace_trace_function = __ftrace_trace_function_delay;
|
||||
#endif
|
||||
function_trace_stop--;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2865,7 +2914,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
|
||||
ftrace_match_records(hash, buf, len);
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
ret = ftrace_hash_move(orig_hash, hash);
|
||||
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
|
||||
if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
|
||||
&& ftrace_enabled)
|
||||
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
|
||||
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
mutex_unlock(&ftrace_regex_lock);
|
||||
@@ -3048,18 +3101,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
|
||||
orig_hash = &iter->ops->notrace_hash;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
/*
|
||||
* Remove the current set, update the hash and add
|
||||
* them back.
|
||||
*/
|
||||
ftrace_hash_rec_disable(iter->ops, filter_hash);
|
||||
ret = ftrace_hash_move(orig_hash, iter->hash);
|
||||
if (!ret) {
|
||||
ftrace_hash_rec_enable(iter->ops, filter_hash);
|
||||
if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
|
||||
&& ftrace_enabled)
|
||||
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
|
||||
}
|
||||
ret = ftrace_hash_move(iter->ops, filter_hash,
|
||||
orig_hash, iter->hash);
|
||||
if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
|
||||
&& ftrace_enabled)
|
||||
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
|
||||
|
||||
mutex_unlock(&ftrace_lock);
|
||||
}
|
||||
free_ftrace_hash(iter->hash);
|
||||
@@ -3338,7 +3385,7 @@ static int ftrace_process_locs(struct module *mod,
|
||||
{
|
||||
unsigned long *p;
|
||||
unsigned long addr;
|
||||
unsigned long flags;
|
||||
unsigned long flags = 0; /* Shut up gcc */
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
p = start;
|
||||
@@ -3356,12 +3403,18 @@ static int ftrace_process_locs(struct module *mod,
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable interrupts to prevent interrupts from executing
|
||||
* code that is being modified.
|
||||
* We only need to disable interrupts on start up
|
||||
* because we are modifying code that an interrupt
|
||||
* may execute, and the modification is not atomic.
|
||||
* But for modules, nothing runs the code we modify
|
||||
* until we are finished with it, and there's no
|
||||
* reason to cause large interrupt latencies while we do it.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
if (!mod)
|
||||
local_irq_save(flags);
|
||||
ftrace_update_code(mod);
|
||||
local_irq_restore(flags);
|
||||
if (!mod)
|
||||
local_irq_restore(flags);
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
return 0;
|
||||
|
@@ -1248,6 +1248,15 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
|
||||
#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
|
||||
struct ftrace_stack {
|
||||
unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
|
||||
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
|
||||
|
||||
static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
||||
unsigned long flags,
|
||||
int skip, int pc, struct pt_regs *regs)
|
||||
@@ -1256,25 +1265,77 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event;
|
||||
struct stack_entry *entry;
|
||||
struct stack_trace trace;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
|
||||
sizeof(*entry), flags, pc);
|
||||
if (!event)
|
||||
return;
|
||||
entry = ring_buffer_event_data(event);
|
||||
memset(&entry->caller, 0, sizeof(entry->caller));
|
||||
int use_stack;
|
||||
int size = FTRACE_STACK_ENTRIES;
|
||||
|
||||
trace.nr_entries = 0;
|
||||
trace.max_entries = FTRACE_STACK_ENTRIES;
|
||||
trace.skip = skip;
|
||||
trace.entries = entry->caller;
|
||||
|
||||
if (regs)
|
||||
save_stack_trace_regs(regs, &trace);
|
||||
else
|
||||
save_stack_trace(&trace);
|
||||
/*
|
||||
* Since events can happen in NMIs there's no safe way to
|
||||
* use the per cpu ftrace_stacks. We reserve it and if an interrupt
|
||||
* or NMI comes in, it will just have to use the default
|
||||
* FTRACE_STACK_SIZE.
|
||||
*/
|
||||
preempt_disable_notrace();
|
||||
|
||||
use_stack = ++__get_cpu_var(ftrace_stack_reserve);
|
||||
/*
|
||||
* We don't need any atomic variables, just a barrier.
|
||||
* If an interrupt comes in, we don't care, because it would
|
||||
* have exited and put the counter back to what we want.
|
||||
* We just need a barrier to keep gcc from moving things
|
||||
* around.
|
||||
*/
|
||||
barrier();
|
||||
if (use_stack == 1) {
|
||||
trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
|
||||
trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
|
||||
|
||||
if (regs)
|
||||
save_stack_trace_regs(regs, &trace);
|
||||
else
|
||||
save_stack_trace(&trace);
|
||||
|
||||
if (trace.nr_entries > size)
|
||||
size = trace.nr_entries;
|
||||
} else
|
||||
/* From now on, use_stack is a boolean */
|
||||
use_stack = 0;
|
||||
|
||||
size *= sizeof(unsigned long);
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
|
||||
sizeof(*entry) + size, flags, pc);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
|
||||
memset(&entry->caller, 0, size);
|
||||
|
||||
if (use_stack)
|
||||
memcpy(&entry->caller, trace.entries,
|
||||
trace.nr_entries * sizeof(unsigned long));
|
||||
else {
|
||||
trace.max_entries = FTRACE_STACK_ENTRIES;
|
||||
trace.entries = entry->caller;
|
||||
if (regs)
|
||||
save_stack_trace_regs(regs, &trace);
|
||||
else
|
||||
save_stack_trace(&trace);
|
||||
}
|
||||
|
||||
entry->size = trace.nr_entries;
|
||||
|
||||
if (!filter_check_discard(call, entry, buffer, event))
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
|
||||
out:
|
||||
/* Again, don't let gcc optimize things here */
|
||||
barrier();
|
||||
__get_cpu_var(ftrace_stack_reserve)--;
|
||||
preempt_enable_notrace();
|
||||
|
||||
}
|
||||
|
||||
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
|
||||
@@ -1562,7 +1623,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
|
||||
|
||||
ftrace_enable_cpu();
|
||||
|
||||
return event ? ring_buffer_event_data(event) : NULL;
|
||||
if (event) {
|
||||
iter->ent_size = ring_buffer_event_length(event);
|
||||
return ring_buffer_event_data(event);
|
||||
}
|
||||
iter->ent_size = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct trace_entry *
|
||||
|
@@ -278,6 +278,29 @@ struct tracer {
|
||||
};
|
||||
|
||||
|
||||
/* Only current can touch trace_recursion */
|
||||
#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
|
||||
#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
|
||||
|
||||
/* Ring buffer has the 10 LSB bits to count */
|
||||
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
|
||||
|
||||
/* for function tracing recursion */
|
||||
#define TRACE_INTERNAL_BIT (1<<11)
|
||||
#define TRACE_GLOBAL_BIT (1<<12)
|
||||
/*
|
||||
* Abuse of the trace_recursion.
|
||||
* As we need a way to maintain state if we are tracing the function
|
||||
* graph in irq because we want to trace a particular function that
|
||||
* was called in irq context but we have irq tracing off. Since this
|
||||
* can only be modified by current, we can reuse trace_recursion.
|
||||
*/
|
||||
#define TRACE_IRQ_BIT (1<<13)
|
||||
|
||||
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
|
||||
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
|
||||
#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
|
||||
|
||||
#define TRACE_PIPE_ALL_CPU -1
|
||||
|
||||
int tracer_init(struct tracer *t, struct trace_array *tr);
|
||||
@@ -516,8 +539,18 @@ static inline int ftrace_graph_addr(unsigned long addr)
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < ftrace_graph_count; i++) {
|
||||
if (addr == ftrace_graph_funcs[i])
|
||||
if (addr == ftrace_graph_funcs[i]) {
|
||||
/*
|
||||
* If no irqs are to be traced, but a set_graph_function
|
||||
* is set, and called by an interrupt handler, we still
|
||||
* want to trace it.
|
||||
*/
|
||||
if (in_irq())
|
||||
trace_recursion_set(TRACE_IRQ_BIT);
|
||||
else
|
||||
trace_recursion_clear(TRACE_IRQ_BIT);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -795,19 +828,4 @@ extern const char *__stop___trace_bprintk_fmt[];
|
||||
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
|
||||
#include "trace_entries.h"
|
||||
|
||||
/* Only current can touch trace_recursion */
|
||||
#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
|
||||
#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
|
||||
|
||||
/* Ring buffer has the 10 LSB bits to count */
|
||||
#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
|
||||
|
||||
/* for function tracing recursion */
|
||||
#define TRACE_INTERNAL_BIT (1<<11)
|
||||
#define TRACE_GLOBAL_BIT (1<<12)
|
||||
|
||||
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
|
||||
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
|
||||
#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
|
||||
|
||||
#endif /* _LINUX_KERNEL_TRACE_H */
|
||||
|
@@ -161,7 +161,8 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
|
||||
TRACE_STACK,
|
||||
|
||||
F_STRUCT(
|
||||
__array( unsigned long, caller, FTRACE_STACK_ENTRIES )
|
||||
__field( int, size )
|
||||
__dynamic_array(unsigned long, caller )
|
||||
),
|
||||
|
||||
F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
|
||||
|
@@ -227,7 +227,7 @@ int __trace_graph_entry(struct trace_array *tr,
|
||||
|
||||
static inline int ftrace_graph_ignore_irqs(void)
|
||||
{
|
||||
if (!ftrace_graph_skip_irqs)
|
||||
if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
|
||||
return 0;
|
||||
|
||||
return in_irq();
|
||||
|
@@ -343,6 +343,14 @@ DEFINE_BASIC_FETCH_FUNCS(deref)
|
||||
DEFINE_FETCH_deref(string)
|
||||
DEFINE_FETCH_deref(string_size)
|
||||
|
||||
static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data)
|
||||
{
|
||||
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
|
||||
update_deref_fetch_param(data->orig.data);
|
||||
else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
|
||||
update_symbol_cache(data->orig.data);
|
||||
}
|
||||
|
||||
static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
|
||||
{
|
||||
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
|
||||
@@ -376,6 +384,19 @@ DEFINE_BASIC_FETCH_FUNCS(bitfield)
|
||||
#define fetch_bitfield_string NULL
|
||||
#define fetch_bitfield_string_size NULL
|
||||
|
||||
static __kprobes void
|
||||
update_bitfield_fetch_param(struct bitfield_fetch_param *data)
|
||||
{
|
||||
/*
|
||||
* Don't check the bitfield itself, because this must be the
|
||||
* last fetch function.
|
||||
*/
|
||||
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
|
||||
update_deref_fetch_param(data->orig.data);
|
||||
else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
|
||||
update_symbol_cache(data->orig.data);
|
||||
}
|
||||
|
||||
static __kprobes void
|
||||
free_bitfield_fetch_param(struct bitfield_fetch_param *data)
|
||||
{
|
||||
@@ -389,6 +410,7 @@ free_bitfield_fetch_param(struct bitfield_fetch_param *data)
|
||||
free_symbol_cache(data->orig.data);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
/* Default (unsigned long) fetch type */
|
||||
#define __DEFAULT_FETCH_TYPE(t) u##t
|
||||
#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
|
||||
@@ -536,6 +558,7 @@ struct probe_arg {
|
||||
/* Flags for trace_probe */
|
||||
#define TP_FLAG_TRACE 1
|
||||
#define TP_FLAG_PROFILE 2
|
||||
#define TP_FLAG_REGISTERED 4
|
||||
|
||||
struct trace_probe {
|
||||
struct list_head list;
|
||||
@@ -555,16 +578,49 @@ struct trace_probe {
|
||||
(sizeof(struct probe_arg) * (n)))
|
||||
|
||||
|
||||
static __kprobes int probe_is_return(struct trace_probe *tp)
|
||||
static __kprobes int trace_probe_is_return(struct trace_probe *tp)
|
||||
{
|
||||
return tp->rp.handler != NULL;
|
||||
}
|
||||
|
||||
static __kprobes const char *probe_symbol(struct trace_probe *tp)
|
||||
static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
|
||||
{
|
||||
return tp->symbol ? tp->symbol : "unknown";
|
||||
}
|
||||
|
||||
static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
|
||||
{
|
||||
return tp->rp.kp.offset;
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
|
||||
{
|
||||
return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
|
||||
{
|
||||
return !!(tp->flags & TP_FLAG_REGISTERED);
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
|
||||
{
|
||||
return !!(kprobe_gone(&tp->rp.kp));
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
|
||||
struct module *mod)
|
||||
{
|
||||
int len = strlen(mod->name);
|
||||
const char *name = trace_probe_symbol(tp);
|
||||
return strncmp(mod->name, name, len) == 0 && name[len] == ':';
|
||||
}
|
||||
|
||||
static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
|
||||
{
|
||||
return !!strchr(trace_probe_symbol(tp), ':');
|
||||
}
|
||||
|
||||
static int register_probe_event(struct trace_probe *tp);
|
||||
static void unregister_probe_event(struct trace_probe *tp);
|
||||
|
||||
@@ -646,6 +702,16 @@ error:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void update_probe_arg(struct probe_arg *arg)
|
||||
{
|
||||
if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
|
||||
update_bitfield_fetch_param(arg->fetch.data);
|
||||
else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
|
||||
update_deref_fetch_param(arg->fetch.data);
|
||||
else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
|
||||
update_symbol_cache(arg->fetch.data);
|
||||
}
|
||||
|
||||
static void free_probe_arg(struct probe_arg *arg)
|
||||
{
|
||||
if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
|
||||
@@ -671,7 +737,7 @@ static void free_trace_probe(struct trace_probe *tp)
|
||||
kfree(tp);
|
||||
}
|
||||
|
||||
static struct trace_probe *find_probe_event(const char *event,
|
||||
static struct trace_probe *find_trace_probe(const char *event,
|
||||
const char *group)
|
||||
{
|
||||
struct trace_probe *tp;
|
||||
@@ -683,13 +749,96 @@ static struct trace_probe *find_probe_event(const char *event,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
|
||||
static int enable_trace_probe(struct trace_probe *tp, int flag)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
tp->flags |= flag;
|
||||
if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
|
||||
!trace_probe_has_gone(tp)) {
|
||||
if (trace_probe_is_return(tp))
|
||||
ret = enable_kretprobe(&tp->rp);
|
||||
else
|
||||
ret = enable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
|
||||
static void disable_trace_probe(struct trace_probe *tp, int flag)
|
||||
{
|
||||
tp->flags &= ~flag;
|
||||
if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
|
||||
if (trace_probe_is_return(tp))
|
||||
disable_kretprobe(&tp->rp);
|
||||
else
|
||||
disable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
}
|
||||
|
||||
/* Internal register function - just handle k*probes and flags */
|
||||
static int __register_trace_probe(struct trace_probe *tp)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
if (trace_probe_is_registered(tp))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
update_probe_arg(&tp->args[i]);
|
||||
|
||||
/* Set/clear disabled flag according to tp->flag */
|
||||
if (trace_probe_is_enabled(tp))
|
||||
tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
|
||||
else
|
||||
tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
|
||||
|
||||
if (trace_probe_is_return(tp))
|
||||
ret = register_kretprobe(&tp->rp);
|
||||
else
|
||||
ret = register_kprobe(&tp->rp.kp);
|
||||
|
||||
if (ret == 0)
|
||||
tp->flags |= TP_FLAG_REGISTERED;
|
||||
else {
|
||||
pr_warning("Could not insert probe at %s+%lu: %d\n",
|
||||
trace_probe_symbol(tp), trace_probe_offset(tp), ret);
|
||||
if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
|
||||
pr_warning("This probe might be able to register after"
|
||||
"target module is loaded. Continue.\n");
|
||||
ret = 0;
|
||||
} else if (ret == -EILSEQ) {
|
||||
pr_warning("Probing address(0x%p) is not an "
|
||||
"instruction boundary.\n",
|
||||
tp->rp.kp.addr);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Internal unregister function - just handle k*probes and flags */
|
||||
static void __unregister_trace_probe(struct trace_probe *tp)
|
||||
{
|
||||
if (trace_probe_is_registered(tp)) {
|
||||
if (trace_probe_is_return(tp))
|
||||
unregister_kretprobe(&tp->rp);
|
||||
else
|
||||
unregister_kprobe(&tp->rp.kp);
|
||||
tp->flags &= ~TP_FLAG_REGISTERED;
|
||||
/* Cleanup kprobe for reuse */
|
||||
if (tp->rp.kp.symbol_name)
|
||||
tp->rp.kp.addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Unregister a trace_probe and probe_event: call with locking probe_lock */
|
||||
static void unregister_trace_probe(struct trace_probe *tp)
|
||||
{
|
||||
if (probe_is_return(tp))
|
||||
unregister_kretprobe(&tp->rp);
|
||||
else
|
||||
unregister_kprobe(&tp->rp.kp);
|
||||
__unregister_trace_probe(tp);
|
||||
list_del(&tp->list);
|
||||
unregister_probe_event(tp);
|
||||
}
|
||||
@@ -702,41 +851,65 @@ static int register_trace_probe(struct trace_probe *tp)
|
||||
|
||||
mutex_lock(&probe_lock);
|
||||
|
||||
/* register as an event */
|
||||
old_tp = find_probe_event(tp->call.name, tp->call.class->system);
|
||||
/* Delete old (same name) event if exist */
|
||||
old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
|
||||
if (old_tp) {
|
||||
/* delete old event */
|
||||
unregister_trace_probe(old_tp);
|
||||
free_trace_probe(old_tp);
|
||||
}
|
||||
|
||||
/* Register new event */
|
||||
ret = register_probe_event(tp);
|
||||
if (ret) {
|
||||
pr_warning("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
}
|
||||
|
||||
tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
|
||||
if (probe_is_return(tp))
|
||||
ret = register_kretprobe(&tp->rp);
|
||||
else
|
||||
ret = register_kprobe(&tp->rp.kp);
|
||||
|
||||
if (ret) {
|
||||
pr_warning("Could not insert probe(%d)\n", ret);
|
||||
if (ret == -EILSEQ) {
|
||||
pr_warning("Probing address(0x%p) is not an "
|
||||
"instruction boundary.\n",
|
||||
tp->rp.kp.addr);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
/* Register k*probe */
|
||||
ret = __register_trace_probe(tp);
|
||||
if (ret < 0)
|
||||
unregister_probe_event(tp);
|
||||
} else
|
||||
else
|
||||
list_add_tail(&tp->list, &probe_list);
|
||||
|
||||
end:
|
||||
mutex_unlock(&probe_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Module notifier call back, checking event on the module */
|
||||
static int trace_probe_module_callback(struct notifier_block *nb,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct module *mod = data;
|
||||
struct trace_probe *tp;
|
||||
int ret;
|
||||
|
||||
if (val != MODULE_STATE_COMING)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Update probes on coming module */
|
||||
mutex_lock(&probe_lock);
|
||||
list_for_each_entry(tp, &probe_list, list) {
|
||||
if (trace_probe_within_module(tp, mod)) {
|
||||
__unregister_trace_probe(tp);
|
||||
ret = __register_trace_probe(tp);
|
||||
if (ret)
|
||||
pr_warning("Failed to re-register probe %s on"
|
||||
"%s: %d\n",
|
||||
tp->call.name, mod->name, ret);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&probe_lock);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block trace_probe_module_nb = {
|
||||
.notifier_call = trace_probe_module_callback,
|
||||
.priority = 1 /* Invoked after kprobe module callback */
|
||||
};
|
||||
|
||||
/* Split symbol and offset. */
|
||||
static int split_symbol_offset(char *symbol, unsigned long *offset)
|
||||
{
|
||||
@@ -962,8 +1135,8 @@ static int create_trace_probe(int argc, char **argv)
|
||||
{
|
||||
/*
|
||||
* Argument syntax:
|
||||
* - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
|
||||
* - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
|
||||
* - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
|
||||
* - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
|
||||
* Fetch args:
|
||||
* $retval : fetch return value
|
||||
* $stack : fetch stack address
|
||||
@@ -1025,7 +1198,7 @@ static int create_trace_probe(int argc, char **argv)
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&probe_lock);
|
||||
tp = find_probe_event(event, group);
|
||||
tp = find_trace_probe(event, group);
|
||||
if (!tp) {
|
||||
mutex_unlock(&probe_lock);
|
||||
pr_info("Event %s/%s doesn't exist.\n", group, event);
|
||||
@@ -1144,7 +1317,7 @@ error:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cleanup_all_probes(void)
|
||||
static void release_all_trace_probes(void)
|
||||
{
|
||||
struct trace_probe *tp;
|
||||
|
||||
@@ -1158,7 +1331,6 @@ static void cleanup_all_probes(void)
|
||||
mutex_unlock(&probe_lock);
|
||||
}
|
||||
|
||||
|
||||
/* Probes listing interfaces */
|
||||
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
@@ -1181,15 +1353,16 @@ static int probes_seq_show(struct seq_file *m, void *v)
|
||||
struct trace_probe *tp = v;
|
||||
int i;
|
||||
|
||||
seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
|
||||
seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
|
||||
seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
|
||||
|
||||
if (!tp->symbol)
|
||||
seq_printf(m, " 0x%p", tp->rp.kp.addr);
|
||||
else if (tp->rp.kp.offset)
|
||||
seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
|
||||
seq_printf(m, " %s+%u", trace_probe_symbol(tp),
|
||||
tp->rp.kp.offset);
|
||||
else
|
||||
seq_printf(m, " %s", probe_symbol(tp));
|
||||
seq_printf(m, " %s", trace_probe_symbol(tp));
|
||||
|
||||
for (i = 0; i < tp->nr_args; i++)
|
||||
seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
|
||||
@@ -1209,7 +1382,7 @@ static int probes_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
(file->f_flags & O_TRUNC))
|
||||
cleanup_all_probes();
|
||||
release_all_trace_probes();
|
||||
|
||||
return seq_open(file, &probes_seq_op);
|
||||
}
|
||||
@@ -1513,30 +1686,6 @@ partial:
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
static int probe_event_enable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
tp->flags |= TP_FLAG_TRACE;
|
||||
if (probe_is_return(tp))
|
||||
return enable_kretprobe(&tp->rp);
|
||||
else
|
||||
return enable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
|
||||
static void probe_event_disable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
tp->flags &= ~TP_FLAG_TRACE;
|
||||
if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
|
||||
if (probe_is_return(tp))
|
||||
disable_kretprobe(&tp->rp);
|
||||
else
|
||||
disable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
}
|
||||
|
||||
#undef DEFINE_FIELD
|
||||
#define DEFINE_FIELD(type, item, name, is_signed) \
|
||||
do { \
|
||||
@@ -1598,7 +1747,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
|
||||
|
||||
const char *fmt, *arg;
|
||||
|
||||
if (!probe_is_return(tp)) {
|
||||
if (!trace_probe_is_return(tp)) {
|
||||
fmt = "(%lx)";
|
||||
arg = "REC->" FIELD_STRING_IP;
|
||||
} else {
|
||||
@@ -1715,49 +1864,25 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
|
||||
head = this_cpu_ptr(call->perf_events);
|
||||
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
|
||||
}
|
||||
|
||||
static int probe_perf_enable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
tp->flags |= TP_FLAG_PROFILE;
|
||||
|
||||
if (probe_is_return(tp))
|
||||
return enable_kretprobe(&tp->rp);
|
||||
else
|
||||
return enable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
|
||||
static void probe_perf_disable(struct ftrace_event_call *call)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)call->data;
|
||||
|
||||
tp->flags &= ~TP_FLAG_PROFILE;
|
||||
|
||||
if (!(tp->flags & TP_FLAG_TRACE)) {
|
||||
if (probe_is_return(tp))
|
||||
disable_kretprobe(&tp->rp);
|
||||
else
|
||||
disable_kprobe(&tp->rp.kp);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
static __kprobes
|
||||
int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
|
||||
{
|
||||
struct trace_probe *tp = (struct trace_probe *)event->data;
|
||||
|
||||
switch (type) {
|
||||
case TRACE_REG_REGISTER:
|
||||
return probe_event_enable(event);
|
||||
return enable_trace_probe(tp, TP_FLAG_TRACE);
|
||||
case TRACE_REG_UNREGISTER:
|
||||
probe_event_disable(event);
|
||||
disable_trace_probe(tp, TP_FLAG_TRACE);
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
case TRACE_REG_PERF_REGISTER:
|
||||
return probe_perf_enable(event);
|
||||
return enable_trace_probe(tp, TP_FLAG_PROFILE);
|
||||
case TRACE_REG_PERF_UNREGISTER:
|
||||
probe_perf_disable(event);
|
||||
disable_trace_probe(tp, TP_FLAG_PROFILE);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
@@ -1807,7 +1932,7 @@ static int register_probe_event(struct trace_probe *tp)
|
||||
|
||||
/* Initialize ftrace_event_call */
|
||||
INIT_LIST_HEAD(&call->class->fields);
|
||||
if (probe_is_return(tp)) {
|
||||
if (trace_probe_is_return(tp)) {
|
||||
call->event.funcs = &kretprobe_funcs;
|
||||
call->class->define_fields = kretprobe_event_define_fields;
|
||||
} else {
|
||||
@@ -1846,6 +1971,9 @@ static __init int init_kprobe_trace(void)
|
||||
struct dentry *d_tracer;
|
||||
struct dentry *entry;
|
||||
|
||||
if (register_module_notifier(&trace_probe_module_nb))
|
||||
return -EINVAL;
|
||||
|
||||
d_tracer = tracing_init_dentry();
|
||||
if (!d_tracer)
|
||||
return 0;
|
||||
@@ -1899,12 +2027,12 @@ static __init int kprobe_trace_self_tests_init(void)
|
||||
warn++;
|
||||
} else {
|
||||
/* Enable trace point */
|
||||
tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
|
||||
tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
|
||||
if (WARN_ON_ONCE(tp == NULL)) {
|
||||
pr_warning("error on getting new probe.\n");
|
||||
warn++;
|
||||
} else
|
||||
probe_event_enable(&tp->call);
|
||||
enable_trace_probe(tp, TP_FLAG_TRACE);
|
||||
}
|
||||
|
||||
ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
|
||||
@@ -1914,12 +2042,12 @@ static __init int kprobe_trace_self_tests_init(void)
|
||||
warn++;
|
||||
} else {
|
||||
/* Enable trace point */
|
||||
tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
|
||||
tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
|
||||
if (WARN_ON_ONCE(tp == NULL)) {
|
||||
pr_warning("error on getting new probe.\n");
|
||||
warn++;
|
||||
} else
|
||||
probe_event_enable(&tp->call);
|
||||
enable_trace_probe(tp, TP_FLAG_TRACE);
|
||||
}
|
||||
|
||||
if (warn)
|
||||
@@ -1940,7 +2068,7 @@ static __init int kprobe_trace_self_tests_init(void)
|
||||
}
|
||||
|
||||
end:
|
||||
cleanup_all_probes();
|
||||
release_all_trace_probes();
|
||||
if (warn)
|
||||
pr_cont("NG: Some tests are failed. Please check them.\n");
|
||||
else
|
||||
|
@@ -1107,19 +1107,20 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
|
||||
{
|
||||
struct stack_entry *field;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int i;
|
||||
unsigned long *p;
|
||||
unsigned long *end;
|
||||
|
||||
trace_assign_type(field, iter->ent);
|
||||
end = (unsigned long *)((long)iter->ent + iter->ent_size);
|
||||
|
||||
if (!trace_seq_puts(s, "<stack trace>\n"))
|
||||
goto partial;
|
||||
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
|
||||
if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
|
||||
break;
|
||||
|
||||
for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
|
||||
if (!trace_seq_puts(s, " => "))
|
||||
goto partial;
|
||||
|
||||
if (!seq_print_ip_sym(s, field->caller[i], flags))
|
||||
if (!seq_print_ip_sym(s, *p, flags))
|
||||
goto partial;
|
||||
if (!trace_seq_puts(s, "\n"))
|
||||
goto partial;
|
||||
|
@@ -200,7 +200,6 @@ static int is_softlockup(unsigned long touch_ts)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
||||
void __weak hw_nmi_watchdog_set_attr(struct perf_event_attr *wd_attr) { }
|
||||
|
||||
static struct perf_event_attr wd_hw_attr = {
|
||||
.type = PERF_TYPE_HARDWARE,
|
||||
@@ -372,7 +371,6 @@ static int watchdog_nmi_enable(int cpu)
|
||||
|
||||
wd_attr = &wd_hw_attr;
|
||||
wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
|
||||
hw_nmi_watchdog_set_attr(wd_attr);
|
||||
|
||||
/* Try to register using hardware perf events */
|
||||
event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
|
||||
|
Reference in New Issue
Block a user