Merge branch 'linus' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -216,8 +216,8 @@ static void print_verifier_state(struct bpf_verifier_state *state)
|
||||
reg->map_ptr->key_size,
|
||||
reg->map_ptr->value_size);
|
||||
if (reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
verbose(",min_value=%llu",
|
||||
(unsigned long long)reg->min_value);
|
||||
verbose(",min_value=%lld",
|
||||
(long long)reg->min_value);
|
||||
if (reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
verbose(",max_value=%llu",
|
||||
(unsigned long long)reg->max_value);
|
||||
@@ -758,7 +758,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
|
||||
* index'es we need to make sure that whatever we use
|
||||
* will have a set floor within our range.
|
||||
*/
|
||||
if ((s64)reg->min_value < 0) {
|
||||
if (reg->min_value < 0) {
|
||||
verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
||||
regno);
|
||||
return -EACCES;
|
||||
@@ -1468,7 +1468,8 @@ static void check_reg_overflow(struct bpf_reg_state *reg)
|
||||
{
|
||||
if (reg->max_value > BPF_REGISTER_MAX_RANGE)
|
||||
reg->max_value = BPF_REGISTER_MAX_RANGE;
|
||||
if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE)
|
||||
if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
|
||||
reg->min_value > BPF_REGISTER_MAX_RANGE)
|
||||
reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
}
|
||||
|
||||
@@ -1476,7 +1477,8 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
struct bpf_insn *insn)
|
||||
{
|
||||
struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
|
||||
u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE;
|
||||
s64 min_val = BPF_REGISTER_MIN_RANGE;
|
||||
u64 max_val = BPF_REGISTER_MAX_RANGE;
|
||||
bool min_set = false, max_set = false;
|
||||
u8 opcode = BPF_OP(insn->code);
|
||||
|
||||
@@ -1512,22 +1514,43 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
return;
|
||||
}
|
||||
|
||||
/* If one of our values was at the end of our ranges then we can't just
|
||||
* do our normal operations to the register, we need to set the values
|
||||
* to the min/max since they are undefined.
|
||||
*/
|
||||
if (min_val == BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
if (max_val == BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
|
||||
|
||||
switch (opcode) {
|
||||
case BPF_ADD:
|
||||
dst_reg->min_value += min_val;
|
||||
dst_reg->max_value += max_val;
|
||||
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value += min_val;
|
||||
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value += max_val;
|
||||
break;
|
||||
case BPF_SUB:
|
||||
dst_reg->min_value -= min_val;
|
||||
dst_reg->max_value -= max_val;
|
||||
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value -= min_val;
|
||||
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value -= max_val;
|
||||
break;
|
||||
case BPF_MUL:
|
||||
dst_reg->min_value *= min_val;
|
||||
dst_reg->max_value *= max_val;
|
||||
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value *= min_val;
|
||||
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value *= max_val;
|
||||
break;
|
||||
case BPF_AND:
|
||||
/* & is special since it could end up with 0 bits set. */
|
||||
dst_reg->min_value &= min_val;
|
||||
/* Disallow AND'ing of negative numbers, ain't nobody got time
|
||||
* for that. Otherwise the minimum is 0 and the max is the max
|
||||
* value we could AND against.
|
||||
*/
|
||||
if (min_val < 0)
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
else
|
||||
dst_reg->min_value = 0;
|
||||
dst_reg->max_value = max_val;
|
||||
break;
|
||||
case BPF_LSH:
|
||||
@@ -1537,24 +1560,25 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
*/
|
||||
if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
else
|
||||
else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value <<= min_val;
|
||||
|
||||
if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
|
||||
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
|
||||
else
|
||||
else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value <<= max_val;
|
||||
break;
|
||||
case BPF_RSH:
|
||||
dst_reg->min_value >>= min_val;
|
||||
dst_reg->max_value >>= max_val;
|
||||
break;
|
||||
case BPF_MOD:
|
||||
/* % is special since it is an unsigned modulus, so the floor
|
||||
* will always be 0.
|
||||
/* RSH by a negative number is undefined, and the BPF_RSH is an
|
||||
* unsigned shift, so make the appropriate casts.
|
||||
*/
|
||||
dst_reg->min_value = 0;
|
||||
dst_reg->max_value = max_val - 1;
|
||||
if (min_val < 0 || dst_reg->min_value < 0)
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
else
|
||||
dst_reg->min_value =
|
||||
(u64)(dst_reg->min_value) >> min_val;
|
||||
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value >>= max_val;
|
||||
break;
|
||||
default:
|
||||
reset_reg_range_values(regs, insn->dst_reg);
|
||||
|
@@ -836,6 +836,7 @@ void __noreturn do_exit(long code)
|
||||
*/
|
||||
perf_event_exit_task(tsk);
|
||||
|
||||
sched_autogroup_exit_task(tsk);
|
||||
cgroup_exit(tsk);
|
||||
|
||||
/*
|
||||
|
@@ -45,6 +45,14 @@ enum {
|
||||
#define LOCKF_USED_IN_IRQ_READ \
|
||||
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
|
||||
|
||||
/*
|
||||
* CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
|
||||
* .data and .bss to fit in required 32MB limit for the kernel. With
|
||||
* PROVE_LOCKING we could go over this limit and cause system boot-up problems.
|
||||
* So, reduce the static allocations for lockdeps related structures so that
|
||||
* everything fits in current required size limit.
|
||||
*/
|
||||
#ifdef CONFIG_PROVE_LOCKING_SMALL
|
||||
/*
|
||||
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
|
||||
* we track.
|
||||
@@ -54,18 +62,24 @@ enum {
|
||||
* table (if it's not there yet), and we check it for lock order
|
||||
* conflicts and deadlocks.
|
||||
*/
|
||||
#define MAX_LOCKDEP_ENTRIES 16384UL
|
||||
#define MAX_LOCKDEP_CHAINS_BITS 15
|
||||
#define MAX_STACK_TRACE_ENTRIES 262144UL
|
||||
#else
|
||||
#define MAX_LOCKDEP_ENTRIES 32768UL
|
||||
|
||||
#define MAX_LOCKDEP_CHAINS_BITS 16
|
||||
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
|
||||
|
||||
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
|
||||
|
||||
/*
|
||||
* Stack-trace: tightly packed array of stack backtrace
|
||||
* addresses. Protected by the hash_lock.
|
||||
*/
|
||||
#define MAX_STACK_TRACE_ENTRIES 524288UL
|
||||
#endif
|
||||
|
||||
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
|
||||
|
||||
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
|
||||
|
||||
extern struct list_head all_lock_classes;
|
||||
extern struct lock_chain lock_chains[];
|
||||
|
@@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
||||
{
|
||||
if (tg != &root_task_group)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We can only assume the task group can't go away on us if
|
||||
* autogroup_move_group() can see us on ->thread_group list.
|
||||
* If we race with autogroup_move_group() the caller can use the old
|
||||
* value of signal->autogroup but in this case sched_move_task() will
|
||||
* be called again before autogroup_kref_put().
|
||||
*
|
||||
* However, there is no way sched_autogroup_exit_task() could tell us
|
||||
* to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
|
||||
*/
|
||||
if (p->flags & PF_EXITING)
|
||||
return false;
|
||||
@@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
||||
return true;
|
||||
}
|
||||
|
||||
void sched_autogroup_exit_task(struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* We are going to call exit_notify() and autogroup_move_group() can't
|
||||
* see this thread after that: we can no longer use signal->autogroup.
|
||||
* See the PF_EXITING check in task_wants_autogroup().
|
||||
*/
|
||||
sched_move_task(p);
|
||||
}
|
||||
|
||||
static void
|
||||
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
||||
{
|
||||
@@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
||||
}
|
||||
|
||||
p->signal->autogroup = autogroup_kref_get(ag);
|
||||
|
||||
if (!READ_ONCE(sysctl_sched_autogroup_enabled))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We can't avoid sched_move_task() after we changed signal->autogroup,
|
||||
* this process can already run with task_group() == prev->tg or we can
|
||||
* race with cgroup code which can read autogroup = prev under rq->lock.
|
||||
* In the latter case for_each_thread() can not miss a migrating thread,
|
||||
* cpu_cgroup_attach() must not be possible after cgroup_exit() and it
|
||||
* can't be removed from thread list, we hold ->siglock.
|
||||
*
|
||||
* If an exiting thread was already removed from thread list we rely on
|
||||
* sched_autogroup_exit_task().
|
||||
*/
|
||||
for_each_thread(p, t)
|
||||
sched_move_task(t);
|
||||
out:
|
||||
|
||||
unlock_task_sighand(p, &flags);
|
||||
autogroup_kref_put(prev);
|
||||
}
|
||||
|
Reference in New Issue
Block a user