Merge branch 'perf/vlbr'
This commit is contained in:
@@ -3,6 +3,9 @@
|
||||
* Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
@@ -344,7 +347,8 @@ static int __blk_trace_remove(struct request_queue *q)
|
||||
{
|
||||
struct blk_trace *bt;
|
||||
|
||||
bt = xchg(&q->blk_trace, NULL);
|
||||
bt = rcu_replace_pointer(q->blk_trace, NULL,
|
||||
lockdep_is_held(&q->blk_trace_mutex));
|
||||
if (!bt)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -494,6 +498,17 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
*/
|
||||
strreplace(buts->name, '/', '_');
|
||||
|
||||
/*
|
||||
* bdev can be NULL, as with scsi-generic, this is a helpful as
|
||||
* we can be.
|
||||
*/
|
||||
if (rcu_dereference_protected(q->blk_trace,
|
||||
lockdep_is_held(&q->blk_trace_mutex))) {
|
||||
pr_warn("Concurrent blktraces are not allowed on %s\n",
|
||||
buts->name);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
bt = kzalloc(sizeof(*bt), GFP_KERNEL);
|
||||
if (!bt)
|
||||
return -ENOMEM;
|
||||
@@ -543,10 +558,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
bt->pid = buts->pid;
|
||||
bt->trace_state = Blktrace_setup;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (cmpxchg(&q->blk_trace, NULL, bt))
|
||||
goto err;
|
||||
|
||||
rcu_assign_pointer(q->blk_trace, bt);
|
||||
get_probe_ref();
|
||||
|
||||
ret = 0;
|
||||
@@ -1629,7 +1641,8 @@ static int blk_trace_remove_queue(struct request_queue *q)
|
||||
{
|
||||
struct blk_trace *bt;
|
||||
|
||||
bt = xchg(&q->blk_trace, NULL);
|
||||
bt = rcu_replace_pointer(q->blk_trace, NULL,
|
||||
lockdep_is_held(&q->blk_trace_mutex));
|
||||
if (bt == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1661,10 +1674,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
|
||||
|
||||
blk_trace_setup_lba(bt, bdev);
|
||||
|
||||
ret = -EBUSY;
|
||||
if (cmpxchg(&q->blk_trace, NULL, bt))
|
||||
goto free_bt;
|
||||
|
||||
rcu_assign_pointer(q->blk_trace, bt);
|
||||
get_probe_ref();
|
||||
return 0;
|
||||
|
||||
|
@@ -141,7 +141,7 @@ bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = probe_user_read(dst, unsafe_ptr, size);
|
||||
ret = copy_from_user_nofault(dst, unsafe_ptr, size);
|
||||
if (unlikely(ret < 0))
|
||||
memset(dst, 0, size);
|
||||
return ret;
|
||||
@@ -196,7 +196,7 @@ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
|
||||
|
||||
if (unlikely(ret < 0))
|
||||
goto fail;
|
||||
ret = probe_kernel_read(dst, unsafe_ptr, size);
|
||||
ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
|
||||
if (unlikely(ret < 0))
|
||||
goto fail;
|
||||
return ret;
|
||||
@@ -241,7 +241,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
|
||||
if (unlikely(ret < 0))
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
fail:
|
||||
memset(dst, 0, size);
|
||||
return ret;
|
||||
@@ -326,7 +326,7 @@ BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
|
||||
if (unlikely(!nmi_uaccess_okay()))
|
||||
return -EPERM;
|
||||
|
||||
return probe_user_write(unsafe_ptr, src, size);
|
||||
return copy_to_user_nofault(unsafe_ptr, src, size);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_probe_write_user_proto = {
|
||||
@@ -661,7 +661,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
|
||||
|
||||
copy_size = (fmt[i + 2] == '4') ? 4 : 16;
|
||||
|
||||
err = probe_kernel_read(bufs->buf[memcpy_cnt],
|
||||
err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
|
||||
(void *) (long) args[fmt_cnt],
|
||||
copy_size);
|
||||
if (err < 0)
|
||||
|
@@ -2260,7 +2260,7 @@ ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
|
||||
|
||||
if (hash_contains_ip(ip, op->func_hash))
|
||||
return op;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -3643,7 +3643,7 @@ static int t_show(struct seq_file *m, void *v)
|
||||
if (direct)
|
||||
seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seq_putc(m, '\n');
|
||||
|
||||
@@ -7248,6 +7248,10 @@ static int pid_open(struct inode *inode, struct file *file, int type)
|
||||
case TRACE_NO_PIDS:
|
||||
seq_ops = &ftrace_no_pid_sops;
|
||||
break;
|
||||
default:
|
||||
trace_array_put(tr);
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = seq_open(file, seq_ops);
|
||||
@@ -7326,6 +7330,10 @@ pid_write(struct file *filp, const char __user *ubuf,
|
||||
other_pids = rcu_dereference_protected(tr->function_pids,
|
||||
lockdep_is_held(&ftrace_lock));
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
WARN_ON_ONCE(1);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
|
||||
|
@@ -2427,7 +2427,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
if (unlikely(info->add_timestamp)) {
|
||||
bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
|
||||
|
||||
event = rb_add_time_stamp(event, info->delta, abs);
|
||||
event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
|
||||
length -= RB_LEN_TIME_EXTEND;
|
||||
delta = 0;
|
||||
}
|
||||
|
@@ -3570,7 +3570,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
|
||||
void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer_iter *buf_iter;
|
||||
unsigned long entries = 0;
|
||||
u64 ts;
|
||||
@@ -3588,7 +3587,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
||||
* that a reset never took place on a cpu. This is evident
|
||||
* by the timestamp being before the start of the buffer.
|
||||
*/
|
||||
while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
|
||||
while (ring_buffer_iter_peek(buf_iter, &ts)) {
|
||||
if (ts >= iter->array_buffer->time_start)
|
||||
break;
|
||||
entries++;
|
||||
|
@@ -61,6 +61,9 @@ enum trace_type {
|
||||
#undef __field_desc
|
||||
#define __field_desc(type, container, item)
|
||||
|
||||
#undef __field_packed
|
||||
#define __field_packed(type, container, item)
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, size) type item[size];
|
||||
|
||||
|
@@ -101,12 +101,16 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
|
||||
kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
|
||||
|
||||
ret = kprobe_event_gen_cmd_start(&cmd, event, val);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
pr_err("Failed to generate probe: %s\n", buf);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = kprobe_event_gen_cmd_end(&cmd);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
pr_err("Failed to add probe: %s\n", buf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -120,7 +124,7 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIST_TRIGGERS
|
||||
#ifdef CONFIG_SYNTH_EVENTS
|
||||
static int __init
|
||||
trace_boot_add_synth_event(struct xbc_node *node, const char *event)
|
||||
{
|
||||
|
@@ -78,8 +78,8 @@ FTRACE_ENTRY_PACKED(funcgraph_entry, ftrace_graph_ent_entry,
|
||||
|
||||
F_STRUCT(
|
||||
__field_struct( struct ftrace_graph_ent, graph_ent )
|
||||
__field_desc( unsigned long, graph_ent, func )
|
||||
__field_desc( int, graph_ent, depth )
|
||||
__field_packed( unsigned long, graph_ent, func )
|
||||
__field_packed( int, graph_ent, depth )
|
||||
),
|
||||
|
||||
F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth)
|
||||
@@ -92,11 +92,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
|
||||
|
||||
F_STRUCT(
|
||||
__field_struct( struct ftrace_graph_ret, ret )
|
||||
__field_desc( unsigned long, ret, func )
|
||||
__field_desc( unsigned long, ret, overrun )
|
||||
__field_desc( unsigned long long, ret, calltime)
|
||||
__field_desc( unsigned long long, ret, rettime )
|
||||
__field_desc( int, ret, depth )
|
||||
__field_packed( unsigned long, ret, func )
|
||||
__field_packed( unsigned long, ret, overrun )
|
||||
__field_packed( unsigned long long, ret, calltime)
|
||||
__field_packed( unsigned long long, ret, rettime )
|
||||
__field_packed( int, ret, depth )
|
||||
),
|
||||
|
||||
F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d",
|
||||
|
@@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
|
||||
|
||||
int trigger_process_regex(struct trace_event_file *file, char *buff)
|
||||
{
|
||||
char *command, *next = buff;
|
||||
char *command, *next;
|
||||
struct event_command *p;
|
||||
int ret = -EINVAL;
|
||||
|
||||
next = buff = skip_spaces(buff);
|
||||
command = strsep(&next, ": \t");
|
||||
if (next) {
|
||||
next = skip_spaces(next);
|
||||
if (!*next)
|
||||
next = NULL;
|
||||
}
|
||||
command = (command[0] != '!') ? command : command + 1;
|
||||
|
||||
mutex_lock(&trigger_cmd_mutex);
|
||||
@@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops,
|
||||
int ret;
|
||||
|
||||
/* separate the trigger from the filter (t:n [if filter]) */
|
||||
if (param && isdigit(param[0]))
|
||||
if (param && isdigit(param[0])) {
|
||||
trigger = strsep(¶m, " \t");
|
||||
if (param) {
|
||||
param = skip_spaces(param);
|
||||
if (!*param)
|
||||
param = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
|
||||
|
||||
@@ -1368,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
|
||||
trigger = strsep(¶m, " \t");
|
||||
if (!trigger)
|
||||
return -EINVAL;
|
||||
if (param) {
|
||||
param = skip_spaces(param);
|
||||
if (!*param)
|
||||
param = NULL;
|
||||
}
|
||||
|
||||
system = strsep(&trigger, ":");
|
||||
if (!trigger)
|
||||
|
@@ -45,6 +45,9 @@ static int ftrace_event_register(struct trace_event_call *call,
|
||||
#undef __field_desc
|
||||
#define __field_desc(type, container, item) type item;
|
||||
|
||||
#undef __field_packed
|
||||
#define __field_packed(type, container, item) type item;
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, size) type item[size];
|
||||
|
||||
@@ -85,6 +88,13 @@ static void __always_unused ____ftrace_check_##name(void) \
|
||||
.size = sizeof(_type), .align = __alignof__(_type), \
|
||||
is_signed_type(_type), .filter_type = _filter_type },
|
||||
|
||||
|
||||
#undef __field_ext_packed
|
||||
#define __field_ext_packed(_type, _item, _filter_type) { \
|
||||
.type = #_type, .name = #_item, \
|
||||
.size = sizeof(_type), .align = 1, \
|
||||
is_signed_type(_type), .filter_type = _filter_type },
|
||||
|
||||
#undef __field
|
||||
#define __field(_type, _item) __field_ext(_type, _item, FILTER_OTHER)
|
||||
|
||||
@@ -94,6 +104,9 @@ static void __always_unused ____ftrace_check_##name(void) \
|
||||
#undef __field_desc
|
||||
#define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER)
|
||||
|
||||
#undef __field_packed
|
||||
#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
|
||||
|
||||
#undef __array
|
||||
#define __array(_type, _item, _len) { \
|
||||
.type = #_type"["__stringify(_len)"]", .name = #_item, \
|
||||
@@ -129,6 +142,9 @@ static struct trace_event_fields ftrace_event_fields_##name[] = { \
|
||||
#undef __field_desc
|
||||
#define __field_desc(type, container, item)
|
||||
|
||||
#undef __field_packed
|
||||
#define __field_packed(type, container, item)
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, len)
|
||||
|
||||
|
@@ -42,7 +42,7 @@ static int allocate_ftrace_ops(struct trace_array *tr)
|
||||
if (!ops)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Currently only the non stack verision is supported */
|
||||
/* Currently only the non stack version is supported */
|
||||
ops->func = function_trace_call;
|
||||
ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
|
||||
|
||||
|
@@ -1222,7 +1222,7 @@ fetch_store_strlen(unsigned long addr)
|
||||
#endif
|
||||
|
||||
do {
|
||||
ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
|
||||
ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
|
||||
len++;
|
||||
} while (c && ret == 0 && len < MAX_STRING_SIZE);
|
||||
|
||||
@@ -1290,7 +1290,7 @@ probe_mem_read_user(void *dest, void *src, size_t size)
|
||||
{
|
||||
const void __user *uaddr = (__force const void __user *)src;
|
||||
|
||||
return probe_user_read(dest, uaddr, size);
|
||||
return copy_from_user_nofault(dest, uaddr, size);
|
||||
}
|
||||
|
||||
static nokprobe_inline int
|
||||
@@ -1300,7 +1300,7 @@ probe_mem_read(void *dest, void *src, size_t size)
|
||||
if ((unsigned long)src < TASK_SIZE)
|
||||
return probe_mem_read_user(dest, src, size);
|
||||
#endif
|
||||
return probe_kernel_read(dest, src, size);
|
||||
return copy_from_kernel_nofault(dest, src, size);
|
||||
}
|
||||
|
||||
/* Note that we don't verify it, since the code does not come from user space */
|
||||
|
@@ -639,8 +639,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) ||
|
||||
parg->count) {
|
||||
if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM ||
|
||||
code->op == FETCH_OP_DATA) || parg->count) {
|
||||
/*
|
||||
* IMM, DATA and COMM is pointing actual address, those
|
||||
* must be kept, and if parg->count != 0, this is an
|
||||
|
@@ -236,7 +236,7 @@ struct trace_probe_event {
|
||||
struct trace_event_call call;
|
||||
struct list_head files;
|
||||
struct list_head probes;
|
||||
struct trace_uprobe_filter filter[0];
|
||||
struct trace_uprobe_filter filter[];
|
||||
};
|
||||
|
||||
struct trace_probe {
|
||||
|
Reference in New Issue
Block a user