Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Three trivial overlapping conflicts.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2019-05-02 22:14:21 -04:00
167 changed files with 1378 additions and 730 deletions

View File

@@ -4349,15 +4349,35 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
static void __find_good_pkt_pointers(struct bpf_func_state *state,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type, u16 new_range)
{
struct bpf_reg_state *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
reg = &state->regs[i];
if (reg->type == type && reg->id == dst_reg->id)
/* keep the maximum range already checked */
reg->range = max(reg->range, new_range);
}
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg->type == type && reg->id == dst_reg->id)
reg->range = max(reg->range, new_range);
}
}
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
struct bpf_reg_state *dst_reg,
enum bpf_reg_type type,
bool range_right_open)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *regs = state->regs, *reg;
u16 new_range;
int i, j;
int i;
if (dst_reg->off < 0 ||
(dst_reg->off == 0 && range_right_open))
@@ -4422,20 +4442,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
* the range won't allow anything.
* dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
*/
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == type && regs[i].id == dst_reg->id)
/* keep the maximum range already checked */
regs[i].range = max(regs[i].range, new_range);
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
if (reg->type == type && reg->id == dst_reg->id)
reg->range = max(reg->range, new_range);
}
}
for (i = 0; i <= vstate->curframe; i++)
__find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
new_range);
}
/* compute branch direction of the expression "if (reg opcode val) goto target;"
@@ -4909,6 +4918,22 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
}
}
static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
bool is_null)
{
struct bpf_reg_state *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
mark_ptr_or_null_reg(state, reg, id, is_null);
}
}
/* The logic is similar to find_good_pkt_pointers(), both could eventually
* be folded together at some point.
*/
@@ -4916,10 +4941,10 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
bool is_null)
{
struct bpf_func_state *state = vstate->frame[vstate->curframe];
struct bpf_reg_state *reg, *regs = state->regs;
struct bpf_reg_state *regs = state->regs;
u32 ref_obj_id = regs[regno].ref_obj_id;
u32 id = regs[regno].id;
int i, j;
int i;
if (ref_obj_id && ref_obj_id == id && is_null)
/* regs[regno] is in the " == NULL" branch.
@@ -4928,17 +4953,8 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
*/
WARN_ON_ONCE(release_reference_state(state, id));
for (i = 0; i < MAX_BPF_REG; i++)
mark_ptr_or_null_reg(state, &regs[i], id, is_null);
for (j = 0; j <= vstate->curframe; j++) {
state = vstate->frame[j];
bpf_for_each_spilled_reg(i, state, reg) {
if (!reg)
continue;
mark_ptr_or_null_reg(state, reg, id, is_null);
}
}
for (i = 0; i <= vstate->curframe; i++)
__mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
}
static bool try_match_pkt_pointers(const struct bpf_insn *insn,

View File

@@ -2007,6 +2007,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
if (p->last_task_numa_placement) {
delta = runtime - p->last_sum_exec_runtime;
*period = now - p->last_task_numa_placement;
/* Avoid time going backwards, prevent potential divide error: */
if (unlikely((s64)*period < 0))
*period = 0;
} else {
delta = p->se.avg.load_sum;
*period = LOAD_AVG_MAX;

View File

@@ -502,7 +502,10 @@ out:
*
* Caller must be holding current->sighand->siglock lock.
*
* Returns 0 on success, -ve on error.
* Returns 0 on success, -ve on error, or
* - in TSYNC mode: the pid of a thread which was either not in the correct
* seccomp mode or did not have an ancestral seccomp filter
* - in NEW_LISTENER mode: the fd of the new listener
*/
static long seccomp_attach_filter(unsigned int flags,
struct seccomp_filter *filter)
@@ -1258,6 +1261,16 @@ static long seccomp_set_mode_filter(unsigned int flags,
if (flags & ~SECCOMP_FILTER_FLAG_MASK)
return -EINVAL;
/*
* In the successful case, NEW_LISTENER returns the new listener fd.
* But in the failure case, TSYNC returns the thread that died. If you
* combine these two flags, there's no way to tell whether something
* succeeded or failed. So, let's disallow this combination.
*/
if ((flags & SECCOMP_FILTER_FLAG_TSYNC) &&
(flags & SECCOMP_FILTER_FLAG_NEW_LISTENER))
return -EINVAL;
/* Prepare the new filter before holding any locks. */
prepared = seccomp_prepare_user_filter(filter);
if (IS_ERR(prepared))
@@ -1304,7 +1317,7 @@ out:
mutex_unlock(&current->signal->cred_guard_mutex);
out_put_fd:
if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
if (ret < 0) {
if (ret) {
listener_f->private_data = NULL;
fput(listener_f);
put_unused_fd(listener);

View File

@@ -762,7 +762,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
preempt_disable_notrace();
time = rb_time_stamp(buffer);
preempt_enable_no_resched_notrace();
preempt_enable_notrace();
return time;
}

View File

@@ -496,8 +496,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
* not modified.
*/
pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
if (!pid_list)
if (!pid_list) {
trace_parser_put(&parser);
return -ENOMEM;
}
pid_list->pid_max = READ_ONCE(pid_max);
@@ -507,6 +509,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
if (!pid_list->pids) {
trace_parser_put(&parser);
kfree(pid_list);
return -ENOMEM;
}
@@ -7025,19 +7028,23 @@ struct buffer_ref {
struct ring_buffer *buffer;
void *page;
int cpu;
int ref;
refcount_t refcount;
};
static void buffer_ref_release(struct buffer_ref *ref)
{
if (!refcount_dec_and_test(&ref->refcount))
return;
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
kfree(ref);
}
static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
if (--ref->ref)
return;
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
kfree(ref);
buffer_ref_release(ref);
buf->private = 0;
}
@@ -7046,10 +7053,10 @@ static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
if (ref->ref > INT_MAX/2)
if (refcount_read(&ref->refcount) > INT_MAX/2)
return false;
ref->ref++;
refcount_inc(&ref->refcount);
return true;
}
@@ -7057,7 +7064,7 @@ static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
static const struct pipe_buf_operations buffer_pipe_buf_ops = {
.confirm = generic_pipe_buf_confirm,
.release = buffer_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.steal = generic_pipe_buf_nosteal,
.get = buffer_pipe_buf_get,
};
@@ -7070,11 +7077,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
struct buffer_ref *ref =
(struct buffer_ref *)spd->partial[i].private;
if (--ref->ref)
return;
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
kfree(ref);
buffer_ref_release(ref);
spd->partial[i].private = 0;
}
@@ -7129,7 +7132,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
break;
}
ref->ref = 1;
refcount_set(&ref->refcount, 1);
ref->buffer = iter->trace_buffer->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (IS_ERR(ref->page)) {