Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Move the bpf verifier trace check into the new switch statement in HEAD. Resolve the overlapping changes in hinic, where bug fixes overlap the addition of VF support. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -466,7 +466,6 @@ config PROFILE_ANNOTATED_BRANCHES
|
||||
config PROFILE_ALL_BRANCHES
|
||||
bool "Profile all if conditionals" if !FORTIFY_SOURCE
|
||||
select TRACE_BRANCH_PROFILING
|
||||
imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED # avoid false positives
|
||||
help
|
||||
This tracer profiles all branch conditions. Every if ()
|
||||
taken in the kernel is recorded whether it hit or miss.
|
||||
|
@@ -326,17 +326,15 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
|
||||
|
||||
/*
|
||||
* Only limited trace_printk() conversion specifiers allowed:
|
||||
* %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
|
||||
* %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
|
||||
*/
|
||||
BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
|
||||
u64, arg2, u64, arg3)
|
||||
{
|
||||
int i, mod[3] = {}, fmt_cnt = 0;
|
||||
char buf[64], fmt_ptype;
|
||||
void *unsafe_ptr = NULL;
|
||||
bool str_seen = false;
|
||||
int mod[3] = {};
|
||||
int fmt_cnt = 0;
|
||||
u64 unsafe_addr;
|
||||
char buf[64];
|
||||
int i;
|
||||
|
||||
/*
|
||||
* bpf_check()->check_func_arg()->check_stack_boundary()
|
||||
@@ -362,40 +360,71 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
|
||||
if (fmt[i] == 'l') {
|
||||
mod[fmt_cnt]++;
|
||||
i++;
|
||||
} else if (fmt[i] == 'p' || fmt[i] == 's') {
|
||||
} else if (fmt[i] == 'p') {
|
||||
mod[fmt_cnt]++;
|
||||
if ((fmt[i + 1] == 'k' ||
|
||||
fmt[i + 1] == 'u') &&
|
||||
fmt[i + 2] == 's') {
|
||||
fmt_ptype = fmt[i + 1];
|
||||
i += 2;
|
||||
goto fmt_str;
|
||||
}
|
||||
|
||||
/* disallow any further format extensions */
|
||||
if (fmt[i + 1] != 0 &&
|
||||
!isspace(fmt[i + 1]) &&
|
||||
!ispunct(fmt[i + 1]))
|
||||
return -EINVAL;
|
||||
fmt_cnt++;
|
||||
if (fmt[i] == 's') {
|
||||
if (str_seen)
|
||||
/* allow only one '%s' per fmt string */
|
||||
return -EINVAL;
|
||||
str_seen = true;
|
||||
|
||||
switch (fmt_cnt) {
|
||||
case 1:
|
||||
unsafe_addr = arg1;
|
||||
arg1 = (long) buf;
|
||||
break;
|
||||
case 2:
|
||||
unsafe_addr = arg2;
|
||||
arg2 = (long) buf;
|
||||
break;
|
||||
case 3:
|
||||
unsafe_addr = arg3;
|
||||
arg3 = (long) buf;
|
||||
break;
|
||||
}
|
||||
buf[0] = 0;
|
||||
strncpy_from_unsafe(buf,
|
||||
(void *) (long) unsafe_addr,
|
||||
sizeof(buf));
|
||||
goto fmt_next;
|
||||
} else if (fmt[i] == 's') {
|
||||
mod[fmt_cnt]++;
|
||||
fmt_ptype = fmt[i];
|
||||
fmt_str:
|
||||
if (str_seen)
|
||||
/* allow only one '%s' per fmt string */
|
||||
return -EINVAL;
|
||||
str_seen = true;
|
||||
|
||||
if (fmt[i + 1] != 0 &&
|
||||
!isspace(fmt[i + 1]) &&
|
||||
!ispunct(fmt[i + 1]))
|
||||
return -EINVAL;
|
||||
|
||||
switch (fmt_cnt) {
|
||||
case 0:
|
||||
unsafe_ptr = (void *)(long)arg1;
|
||||
arg1 = (long)buf;
|
||||
break;
|
||||
case 1:
|
||||
unsafe_ptr = (void *)(long)arg2;
|
||||
arg2 = (long)buf;
|
||||
break;
|
||||
case 2:
|
||||
unsafe_ptr = (void *)(long)arg3;
|
||||
arg3 = (long)buf;
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
|
||||
buf[0] = 0;
|
||||
switch (fmt_ptype) {
|
||||
case 's':
|
||||
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||
strncpy_from_unsafe(buf, unsafe_ptr,
|
||||
sizeof(buf));
|
||||
break;
|
||||
#endif
|
||||
case 'k':
|
||||
strncpy_from_unsafe_strict(buf, unsafe_ptr,
|
||||
sizeof(buf));
|
||||
break;
|
||||
case 'u':
|
||||
strncpy_from_unsafe_user(buf,
|
||||
(__force void __user *)unsafe_ptr,
|
||||
sizeof(buf));
|
||||
break;
|
||||
}
|
||||
goto fmt_next;
|
||||
}
|
||||
|
||||
if (fmt[i] == 'l') {
|
||||
@@ -406,6 +435,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
|
||||
if (fmt[i] != 'i' && fmt[i] != 'd' &&
|
||||
fmt[i] != 'u' && fmt[i] != 'x')
|
||||
return -EINVAL;
|
||||
fmt_next:
|
||||
fmt_cnt++;
|
||||
}
|
||||
|
||||
@@ -1036,14 +1066,16 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_probe_read_user_proto;
|
||||
case BPF_FUNC_probe_read_kernel:
|
||||
return &bpf_probe_read_kernel_proto;
|
||||
case BPF_FUNC_probe_read:
|
||||
return &bpf_probe_read_compat_proto;
|
||||
case BPF_FUNC_probe_read_user_str:
|
||||
return &bpf_probe_read_user_str_proto;
|
||||
case BPF_FUNC_probe_read_kernel_str:
|
||||
return &bpf_probe_read_kernel_str_proto;
|
||||
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
|
||||
case BPF_FUNC_probe_read:
|
||||
return &bpf_probe_read_compat_proto;
|
||||
case BPF_FUNC_probe_read_str:
|
||||
return &bpf_probe_read_compat_str_proto;
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUPS
|
||||
case BPF_FUNC_get_current_cgroup_id:
|
||||
return &bpf_get_current_cgroup_id_proto;
|
||||
|
@@ -4,28 +4,6 @@
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
/*
|
||||
* Traverse the ftrace_global_list, invoking all entries. The reason that we
|
||||
* can use rcu_dereference_raw_check() is that elements removed from this list
|
||||
* are simply leaked, so there is no need to interact with a grace-period
|
||||
* mechanism. The rcu_dereference_raw_check() calls are needed to handle
|
||||
* concurrent insertions into the ftrace_global_list.
|
||||
*
|
||||
* Silly Alpha and silly pointer-speculation compiler optimizations!
|
||||
*/
|
||||
#define do_for_each_ftrace_op(op, list) \
|
||||
op = rcu_dereference_raw_check(list); \
|
||||
do
|
||||
|
||||
/*
|
||||
* Optimized for just a single item in the list (as that is the normal case).
|
||||
*/
|
||||
#define while_for_each_ftrace_op(op) \
|
||||
while (likely(op = rcu_dereference_raw_check((op)->next)) && \
|
||||
unlikely((op) != &ftrace_list_end))
|
||||
|
||||
extern struct ftrace_ops __rcu *ftrace_ops_list;
|
||||
extern struct ftrace_ops ftrace_list_end;
|
||||
extern struct mutex ftrace_lock;
|
||||
extern struct ftrace_ops global_ops;
|
||||
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#include <linux/printk.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
static ulong delay = 100;
|
||||
static char test_mode[12] = "irq";
|
||||
@@ -28,6 +29,8 @@ MODULE_PARM_DESC(delay, "Period in microseconds (100 us default)");
|
||||
MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt, irq, or alternate (default irq)");
|
||||
MODULE_PARM_DESC(burst_size, "The size of a burst (default 1)");
|
||||
|
||||
static struct completion done;
|
||||
|
||||
#define MIN(x, y) ((x) < (y) ? (x) : (y))
|
||||
|
||||
static void busy_wait(ulong time)
|
||||
@@ -113,22 +116,47 @@ static int preemptirq_delay_run(void *data)
|
||||
|
||||
for (i = 0; i < s; i++)
|
||||
(testfuncs[i])(i);
|
||||
|
||||
complete(&done);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct task_struct *preemptirq_start_test(void)
|
||||
static int preemptirq_run_test(void)
|
||||
{
|
||||
struct task_struct *task;
|
||||
char task_name[50];
|
||||
|
||||
init_completion(&done);
|
||||
|
||||
snprintf(task_name, sizeof(task_name), "%s_test", test_mode);
|
||||
return kthread_run(preemptirq_delay_run, NULL, task_name);
|
||||
task = kthread_run(preemptirq_delay_run, NULL, task_name);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
if (task) {
|
||||
wait_for_completion(&done);
|
||||
kthread_stop(task);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static ssize_t trigger_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
preemptirq_start_test();
|
||||
ssize_t ret;
|
||||
|
||||
ret = preemptirq_run_test();
|
||||
if (ret)
|
||||
return ret;
|
||||
return count;
|
||||
}
|
||||
|
||||
@@ -148,11 +176,9 @@ static struct kobject *preemptirq_delay_kobj;
|
||||
|
||||
static int __init preemptirq_delay_init(void)
|
||||
{
|
||||
struct task_struct *test_task;
|
||||
int retval;
|
||||
|
||||
test_task = preemptirq_start_test();
|
||||
retval = PTR_ERR_OR_ZERO(test_task);
|
||||
retval = preemptirq_run_test();
|
||||
if (retval != 0)
|
||||
return retval;
|
||||
|
||||
|
@@ -193,7 +193,7 @@ rb_event_length(struct ring_buffer_event *event)
|
||||
case RINGBUF_TYPE_DATA:
|
||||
return rb_event_data_length(event);
|
||||
default:
|
||||
BUG();
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
/* not hit */
|
||||
return 0;
|
||||
@@ -249,7 +249,7 @@ rb_event_data(struct ring_buffer_event *event)
|
||||
{
|
||||
if (extended_time(event))
|
||||
event = skip_time_extend(event);
|
||||
BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
|
||||
WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
|
||||
/* If length is in len field, then array[0] has the data */
|
||||
if (event->type_len)
|
||||
return (void *)&event->array[0];
|
||||
@@ -3727,7 +3727,7 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
return;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
RB_WARN_ON(cpu_buffer, 1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -3757,7 +3757,7 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
|
||||
return;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
RB_WARN_ON(iter->cpu_buffer, 1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@@ -4020,7 +4020,7 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
|
||||
return event;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
RB_WARN_ON(cpu_buffer, 1);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@@ -4034,7 +4034,6 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct ring_buffer_event *event;
|
||||
int nr_loops = 0;
|
||||
bool failed = false;
|
||||
|
||||
if (ts)
|
||||
*ts = 0;
|
||||
@@ -4056,19 +4055,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* We repeat when a time extend is encountered or we hit
|
||||
* the end of the page. Since the time extend is always attached
|
||||
* to a data event, we should never loop more than three times.
|
||||
* Once for going to next page, once on time extend, and
|
||||
* finally once to get the event.
|
||||
* We should never hit the following condition more than thrice,
|
||||
* unless the buffer is very small, and there's a writer
|
||||
* that is causing the reader to fail getting an event.
|
||||
* As the writer can mess with what the iterator is trying
|
||||
* to read, just give up if we fail to get an event after
|
||||
* three tries. The iterator is not as reliable when reading
|
||||
* the ring buffer with an active write as the consumer is.
|
||||
* Do not warn if the three failures is reached.
|
||||
*/
|
||||
if (++nr_loops > 3) {
|
||||
RB_WARN_ON(cpu_buffer, !failed);
|
||||
if (++nr_loops > 3)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rb_per_cpu_empty(cpu_buffer))
|
||||
return NULL;
|
||||
@@ -4079,10 +4073,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
}
|
||||
|
||||
event = rb_iter_head_event(iter);
|
||||
if (!event) {
|
||||
failed = true;
|
||||
if (!event)
|
||||
goto again;
|
||||
}
|
||||
|
||||
switch (event->type_len) {
|
||||
case RINGBUF_TYPE_PADDING:
|
||||
@@ -4117,7 +4109,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
return event;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
RB_WARN_ON(cpu_buffer, 1);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@@ -947,7 +947,8 @@ int __trace_bputs(unsigned long ip, const char *str)
|
||||
EXPORT_SYMBOL_GPL(__trace_bputs);
|
||||
|
||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||
void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
|
||||
static void tracing_snapshot_instance_cond(struct trace_array *tr,
|
||||
void *cond_data)
|
||||
{
|
||||
struct tracer *tracer = tr->current_trace;
|
||||
unsigned long flags;
|
||||
@@ -8525,6 +8526,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
|
||||
*/
|
||||
allocate_snapshot = false;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Because of some magic with the way alloc_percpu() works on
|
||||
* x86_64, we need to synchronize the pgd of all the tables,
|
||||
* otherwise the trace events that happen in x86_64 page fault
|
||||
* handlers can't cope with accessing the chance that a
|
||||
* alloc_percpu()'d memory might be touched in the page fault trace
|
||||
* event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
|
||||
* calls in tracing, because something might get triggered within a
|
||||
* page fault trace event!
|
||||
*/
|
||||
vmalloc_sync_mappings();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -95,23 +95,19 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
|
||||
struct xbc_node *anode;
|
||||
char buf[MAX_BUF_LEN];
|
||||
const char *val;
|
||||
int ret;
|
||||
|
||||
kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
|
||||
|
||||
ret = kprobe_event_gen_cmd_start(&cmd, event, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
int ret = 0;
|
||||
|
||||
xbc_node_for_each_array_value(node, "probes", anode, val) {
|
||||
ret = kprobe_event_add_field(&cmd, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
|
||||
|
||||
ret = kprobe_event_gen_cmd_end(&cmd);
|
||||
if (ret)
|
||||
pr_err("Failed to add probe: %s\n", buf);
|
||||
ret = kprobe_event_gen_cmd_start(&cmd, event, val);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
ret = kprobe_event_gen_cmd_end(&cmd);
|
||||
if (ret)
|
||||
pr_err("Failed to add probe: %s\n", buf);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -453,7 +453,7 @@ static bool __within_notrace_func(unsigned long addr)
|
||||
|
||||
static bool within_notrace_func(struct trace_kprobe *tk)
|
||||
{
|
||||
unsigned long addr = addr = trace_kprobe_address(tk);
|
||||
unsigned long addr = trace_kprobe_address(tk);
|
||||
char symname[KSYM_NAME_LEN], *p;
|
||||
|
||||
if (!__within_notrace_func(addr))
|
||||
@@ -940,6 +940,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
|
||||
* complete command or only the first part of it; in the latter case,
|
||||
* kprobe_event_add_fields() can be used to add more fields following this.
|
||||
*
|
||||
* Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
|
||||
* returns -EINVAL if @loc == NULL.
|
||||
*
|
||||
* Return: 0 if successful, error otherwise.
|
||||
*/
|
||||
int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
|
||||
@@ -953,6 +956,9 @@ int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
|
||||
if (cmd->type != DYNEVENT_TYPE_KPROBE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!loc)
|
||||
return -EINVAL;
|
||||
|
||||
if (kretprobe)
|
||||
snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
|
||||
else
|
||||
|
Reference in New Issue
Block a user