Merge tag 'trace-v5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt: - Have recordmcount work with > 64K sections (to support LTO) - kprobe RCU fixes - Correct a kprobe critical section with missing mutex - Remove redundant arch_disarm_kprobe() call - Fix lockup when kretprobe triggers within kprobe_flush_task() - Fix memory leak in fetch_op_data operations - Fix sleep in atomic in ftrace trace array sample code - Free up memory on failure in sample trace array code - Fix incorrect reporting of function_graph fields in format file - Fix quote within quote parsing in bootconfig - Fix return value of bootconfig tool - Add testcases for bootconfig tool - Fix maybe uninitialized warning in ftrace pid file code - Remove unused variable in tracing_iter_reset() - Fix some typos * tag 'trace-v5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ftrace: Fix maybe-uninitialized compiler warning tools/bootconfig: Add testcase for show-command and quotes test tools/bootconfig: Fix to return 0 if succeeded to show the bootconfig tools/bootconfig: Fix to use correct quotes for value proc/bootconfig: Fix to use correct quotes for value tracing: Remove unused event variable in tracing_iter_reset tracing/probe: Fix memleak in fetch_op_data operations trace: Fix typo in allocate_ftrace_ops()'s comment tracing: Make ftrace packed events have align of 1 sample-trace-array: Remove trace_array 'sample-instance' sample-trace-array: Fix sleeping function called from invalid context kretprobe: Prevent triggering kretprobe from within kprobe_flush_task kprobes: Remove redundant arch_disarm_kprobe() call kprobes: Fix to protect kick_kprobe_optimizer() by kprobe_mutex kprobes: Use non RCU traversal APIs on kprobe_tables if possible kprobes: Suppress the suspicious RCU warning on kprobes recordmcount: support >64k sections
This commit is contained in:
@@ -2260,7 +2260,7 @@ ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
|
||||
|
||||
if (hash_contains_ip(ip, op->func_hash))
|
||||
return op;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -3599,7 +3599,7 @@ static int t_show(struct seq_file *m, void *v)
|
||||
if (direct)
|
||||
seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seq_putc(m, '\n');
|
||||
|
||||
@@ -7151,6 +7151,10 @@ static int pid_open(struct inode *inode, struct file *file, int type)
|
||||
case TRACE_NO_PIDS:
|
||||
seq_ops = &ftrace_no_pid_sops;
|
||||
break;
|
||||
default:
|
||||
trace_array_put(tr);
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = seq_open(file, seq_ops);
|
||||
@@ -7229,6 +7233,10 @@ pid_write(struct file *filp, const char __user *ubuf,
|
||||
other_pids = rcu_dereference_protected(tr->function_pids,
|
||||
lockdep_is_held(&ftrace_lock));
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
WARN_ON_ONCE(1);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
|
||||
|
@@ -3570,7 +3570,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
|
||||
void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer_iter *buf_iter;
|
||||
unsigned long entries = 0;
|
||||
u64 ts;
|
||||
@@ -3588,7 +3587,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
||||
* that a reset never took place on a cpu. This is evident
|
||||
* by the timestamp being before the start of the buffer.
|
||||
*/
|
||||
while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
|
||||
while (ring_buffer_iter_peek(buf_iter, &ts)) {
|
||||
if (ts >= iter->array_buffer->time_start)
|
||||
break;
|
||||
entries++;
|
||||
|
@@ -61,6 +61,9 @@ enum trace_type {
|
||||
#undef __field_desc
|
||||
#define __field_desc(type, container, item)
|
||||
|
||||
#undef __field_packed
|
||||
#define __field_packed(type, container, item)
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, size) type item[size];
|
||||
|
||||
|
@@ -78,8 +78,8 @@ FTRACE_ENTRY_PACKED(funcgraph_entry, ftrace_graph_ent_entry,
|
||||
|
||||
F_STRUCT(
|
||||
__field_struct( struct ftrace_graph_ent, graph_ent )
|
||||
__field_desc( unsigned long, graph_ent, func )
|
||||
__field_desc( int, graph_ent, depth )
|
||||
__field_packed( unsigned long, graph_ent, func )
|
||||
__field_packed( int, graph_ent, depth )
|
||||
),
|
||||
|
||||
F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth)
|
||||
@@ -92,11 +92,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
|
||||
|
||||
F_STRUCT(
|
||||
__field_struct( struct ftrace_graph_ret, ret )
|
||||
__field_desc( unsigned long, ret, func )
|
||||
__field_desc( unsigned long, ret, overrun )
|
||||
__field_desc( unsigned long long, ret, calltime)
|
||||
__field_desc( unsigned long long, ret, rettime )
|
||||
__field_desc( int, ret, depth )
|
||||
__field_packed( unsigned long, ret, func )
|
||||
__field_packed( unsigned long, ret, overrun )
|
||||
__field_packed( unsigned long long, ret, calltime)
|
||||
__field_packed( unsigned long long, ret, rettime )
|
||||
__field_packed( int, ret, depth )
|
||||
),
|
||||
|
||||
F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d",
|
||||
|
@@ -45,6 +45,9 @@ static int ftrace_event_register(struct trace_event_call *call,
|
||||
#undef __field_desc
|
||||
#define __field_desc(type, container, item) type item;
|
||||
|
||||
#undef __field_packed
|
||||
#define __field_packed(type, container, item) type item;
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, size) type item[size];
|
||||
|
||||
@@ -85,6 +88,13 @@ static void __always_unused ____ftrace_check_##name(void) \
|
||||
.size = sizeof(_type), .align = __alignof__(_type), \
|
||||
is_signed_type(_type), .filter_type = _filter_type },
|
||||
|
||||
|
||||
#undef __field_ext_packed
|
||||
#define __field_ext_packed(_type, _item, _filter_type) { \
|
||||
.type = #_type, .name = #_item, \
|
||||
.size = sizeof(_type), .align = 1, \
|
||||
is_signed_type(_type), .filter_type = _filter_type },
|
||||
|
||||
#undef __field
|
||||
#define __field(_type, _item) __field_ext(_type, _item, FILTER_OTHER)
|
||||
|
||||
@@ -94,6 +104,9 @@ static void __always_unused ____ftrace_check_##name(void) \
|
||||
#undef __field_desc
|
||||
#define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER)
|
||||
|
||||
#undef __field_packed
|
||||
#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
|
||||
|
||||
#undef __array
|
||||
#define __array(_type, _item, _len) { \
|
||||
.type = #_type"["__stringify(_len)"]", .name = #_item, \
|
||||
@@ -129,6 +142,9 @@ static struct trace_event_fields ftrace_event_fields_##name[] = { \
|
||||
#undef __field_desc
|
||||
#define __field_desc(type, container, item)
|
||||
|
||||
#undef __field_packed
|
||||
#define __field_packed(type, container, item)
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, len)
|
||||
|
||||
|
@@ -42,7 +42,7 @@ static int allocate_ftrace_ops(struct trace_array *tr)
|
||||
if (!ops)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Currently only the non stack verision is supported */
|
||||
/* Currently only the non stack version is supported */
|
||||
ops->func = function_trace_call;
|
||||
ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
|
||||
|
||||
|
@@ -639,8 +639,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) ||
|
||||
parg->count) {
|
||||
if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM ||
|
||||
code->op == FETCH_OP_DATA) || parg->count) {
|
||||
/*
|
||||
* IMM, DATA and COMM is pointing actual address, those
|
||||
* must be kept, and if parg->count != 0, this is an
|
||||
|
Reference in New Issue
Block a user