Merge branch 'linus' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -2454,6 +2454,7 @@ static bool states_equal(struct bpf_verifier_env *env,
|
||||
struct bpf_verifier_state *old,
|
||||
struct bpf_verifier_state *cur)
|
||||
{
|
||||
bool varlen_map_access = env->varlen_map_value_access;
|
||||
struct bpf_reg_state *rold, *rcur;
|
||||
int i;
|
||||
|
||||
@@ -2467,12 +2468,17 @@ static bool states_equal(struct bpf_verifier_env *env,
|
||||
/* If the ranges were not the same, but everything else was and
|
||||
* we didn't do a variable access into a map then we are a-ok.
|
||||
*/
|
||||
if (!env->varlen_map_value_access &&
|
||||
if (!varlen_map_access &&
|
||||
rold->type == rcur->type && rold->imm == rcur->imm)
|
||||
continue;
|
||||
|
||||
/* If we didn't map access then again we don't care about the
|
||||
* mismatched range values and it's ok if our old type was
|
||||
* UNKNOWN and we didn't go to a NOT_INIT'ed reg.
|
||||
*/
|
||||
if (rold->type == NOT_INIT ||
|
||||
(rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT))
|
||||
(!varlen_map_access && rold->type == UNKNOWN_VALUE &&
|
||||
rcur->type != NOT_INIT))
|
||||
continue;
|
||||
|
||||
if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
|
||||
|
@@ -902,7 +902,15 @@ list_update_cgroup_event(struct perf_event *event,
|
||||
* this will always be called from the right CPU.
|
||||
*/
|
||||
cpuctx = __get_cpu_context(ctx);
|
||||
cpuctx->cgrp = add ? event->cgrp : NULL;
|
||||
|
||||
/*
|
||||
* cpuctx->cgrp is NULL until a cgroup event is sched in or
|
||||
* ctx->nr_cgroup == 0 .
|
||||
*/
|
||||
if (add && perf_cgroup_from_task(current, ctx) == event->cgrp)
|
||||
cpuctx->cgrp = event->cgrp;
|
||||
else if (!add)
|
||||
cpuctx->cgrp = NULL;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CGROUP_PERF */
|
||||
@@ -8018,6 +8026,7 @@ restart:
|
||||
* if <size> is not specified, the range is treated as a single address.
|
||||
*/
|
||||
enum {
|
||||
IF_ACT_NONE = -1,
|
||||
IF_ACT_FILTER,
|
||||
IF_ACT_START,
|
||||
IF_ACT_STOP,
|
||||
@@ -8041,6 +8050,7 @@ static const match_table_t if_tokens = {
|
||||
{ IF_SRC_KERNEL, "%u/%u" },
|
||||
{ IF_SRC_FILEADDR, "%u@%s" },
|
||||
{ IF_SRC_KERNELADDR, "%u" },
|
||||
{ IF_ACT_NONE, NULL },
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
@@ -506,13 +506,13 @@ static void __print_lock_name(struct lock_class *class)
|
||||
name = class->name;
|
||||
if (!name) {
|
||||
name = __get_key_name(class->key, str);
|
||||
printk("%s", name);
|
||||
printk(KERN_CONT "%s", name);
|
||||
} else {
|
||||
printk("%s", name);
|
||||
printk(KERN_CONT "%s", name);
|
||||
if (class->name_version > 1)
|
||||
printk("#%d", class->name_version);
|
||||
printk(KERN_CONT "#%d", class->name_version);
|
||||
if (class->subclass)
|
||||
printk("/%d", class->subclass);
|
||||
printk(KERN_CONT "/%d", class->subclass);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -522,9 +522,9 @@ static void print_lock_name(struct lock_class *class)
|
||||
|
||||
get_usage_chars(class, usage);
|
||||
|
||||
printk(" (");
|
||||
printk(KERN_CONT " (");
|
||||
__print_lock_name(class);
|
||||
printk("){%s}", usage);
|
||||
printk(KERN_CONT "){%s}", usage);
|
||||
}
|
||||
|
||||
static void print_lockdep_cache(struct lockdep_map *lock)
|
||||
@@ -536,7 +536,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
|
||||
if (!name)
|
||||
name = __get_key_name(lock->key->subkeys, str);
|
||||
|
||||
printk("%s", name);
|
||||
printk(KERN_CONT "%s", name);
|
||||
}
|
||||
|
||||
static void print_lock(struct held_lock *hlock)
|
||||
@@ -551,13 +551,13 @@ static void print_lock(struct held_lock *hlock)
|
||||
barrier();
|
||||
|
||||
if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
|
||||
printk("<RELEASED>\n");
|
||||
printk(KERN_CONT "<RELEASED>\n");
|
||||
return;
|
||||
}
|
||||
|
||||
print_lock_name(lock_classes + class_idx - 1);
|
||||
printk(", at: ");
|
||||
print_ip_sym(hlock->acquire_ip);
|
||||
printk(KERN_CONT ", at: [<%p>] %pS\n",
|
||||
(void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
|
||||
}
|
||||
|
||||
static void lockdep_print_held_locks(struct task_struct *curr)
|
||||
@@ -792,8 +792,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
||||
|
||||
printk("\nnew class %p: %s", class->key, class->name);
|
||||
if (class->name_version > 1)
|
||||
printk("#%d", class->name_version);
|
||||
printk("\n");
|
||||
printk(KERN_CONT "#%d", class->name_version);
|
||||
printk(KERN_CONT "\n");
|
||||
dump_stack();
|
||||
|
||||
if (!graph_lock()) {
|
||||
@@ -1071,7 +1071,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
|
||||
return 0;
|
||||
printk("\n-> #%u", depth);
|
||||
print_lock_name(target->class);
|
||||
printk(":\n");
|
||||
printk(KERN_CONT ":\n");
|
||||
print_stack_trace(&target->trace, 6);
|
||||
|
||||
return 0;
|
||||
@@ -1102,11 +1102,11 @@ print_circular_lock_scenario(struct held_lock *src,
|
||||
if (parent != source) {
|
||||
printk("Chain exists of:\n ");
|
||||
__print_lock_name(source);
|
||||
printk(" --> ");
|
||||
printk(KERN_CONT " --> ");
|
||||
__print_lock_name(parent);
|
||||
printk(" --> ");
|
||||
printk(KERN_CONT " --> ");
|
||||
__print_lock_name(target);
|
||||
printk("\n\n");
|
||||
printk(KERN_CONT "\n\n");
|
||||
}
|
||||
|
||||
printk(" Possible unsafe locking scenario:\n\n");
|
||||
@@ -1114,16 +1114,16 @@ print_circular_lock_scenario(struct held_lock *src,
|
||||
printk(" ---- ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(target);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(parent);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(target);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(source);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
}
|
||||
|
||||
@@ -1359,22 +1359,22 @@ static void print_lock_class_header(struct lock_class *class, int depth)
|
||||
|
||||
printk("%*s->", depth, "");
|
||||
print_lock_name(class);
|
||||
printk(" ops: %lu", class->ops);
|
||||
printk(" {\n");
|
||||
printk(KERN_CONT " ops: %lu", class->ops);
|
||||
printk(KERN_CONT " {\n");
|
||||
|
||||
for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
|
||||
if (class->usage_mask & (1 << bit)) {
|
||||
int len = depth;
|
||||
|
||||
len += printk("%*s %s", depth, "", usage_str[bit]);
|
||||
len += printk(" at:\n");
|
||||
len += printk(KERN_CONT " at:\n");
|
||||
print_stack_trace(class->usage_traces + bit, len);
|
||||
}
|
||||
}
|
||||
printk("%*s }\n", depth, "");
|
||||
|
||||
printk("%*s ... key at: ",depth,"");
|
||||
print_ip_sym((unsigned long)class->key);
|
||||
printk("%*s ... key at: [<%p>] %pS\n",
|
||||
depth, "", class->key, class->key);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1437,11 +1437,11 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
|
||||
if (middle_class != unsafe_class) {
|
||||
printk("Chain exists of:\n ");
|
||||
__print_lock_name(safe_class);
|
||||
printk(" --> ");
|
||||
printk(KERN_CONT " --> ");
|
||||
__print_lock_name(middle_class);
|
||||
printk(" --> ");
|
||||
printk(KERN_CONT " --> ");
|
||||
__print_lock_name(unsafe_class);
|
||||
printk("\n\n");
|
||||
printk(KERN_CONT "\n\n");
|
||||
}
|
||||
|
||||
printk(" Possible interrupt unsafe locking scenario:\n\n");
|
||||
@@ -1449,18 +1449,18 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
|
||||
printk(" ---- ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(unsafe_class);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" local_irq_disable();\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(safe_class);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(middle_class);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" <Interrupt>\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(safe_class);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
}
|
||||
|
||||
@@ -1497,9 +1497,9 @@ print_bad_irq_dependency(struct task_struct *curr,
|
||||
print_lock(prev);
|
||||
printk("which would create a new lock dependency:\n");
|
||||
print_lock_name(hlock_class(prev));
|
||||
printk(" ->");
|
||||
printk(KERN_CONT " ->");
|
||||
print_lock_name(hlock_class(next));
|
||||
printk("\n");
|
||||
printk(KERN_CONT "\n");
|
||||
|
||||
printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
|
||||
irqclass);
|
||||
@@ -1521,8 +1521,7 @@ print_bad_irq_dependency(struct task_struct *curr,
|
||||
|
||||
lockdep_print_held_locks(curr);
|
||||
|
||||
printk("\nthe dependencies between %s-irq-safe lock", irqclass);
|
||||
printk(" and the holding lock:\n");
|
||||
printk("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
|
||||
if (!save_trace(&prev_root->trace))
|
||||
return 0;
|
||||
print_shortest_lock_dependencies(backwards_entry, prev_root);
|
||||
@@ -1694,10 +1693,10 @@ print_deadlock_scenario(struct held_lock *nxt,
|
||||
printk(" ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(prev);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(next);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
printk(" May be due to missing lock nesting notation\n\n");
|
||||
}
|
||||
@@ -1891,9 +1890,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||
graph_unlock();
|
||||
printk("\n new dependency: ");
|
||||
print_lock_name(hlock_class(prev));
|
||||
printk(" => ");
|
||||
printk(KERN_CONT " => ");
|
||||
print_lock_name(hlock_class(next));
|
||||
printk("\n");
|
||||
printk(KERN_CONT "\n");
|
||||
dump_stack();
|
||||
return graph_lock();
|
||||
}
|
||||
@@ -2343,11 +2342,11 @@ print_usage_bug_scenario(struct held_lock *lock)
|
||||
printk(" ----\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(class);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk(" <Interrupt>\n");
|
||||
printk(" lock(");
|
||||
__print_lock_name(class);
|
||||
printk(");\n");
|
||||
printk(KERN_CONT ");\n");
|
||||
printk("\n *** DEADLOCK ***\n\n");
|
||||
}
|
||||
|
||||
@@ -2522,14 +2521,18 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
|
||||
void print_irqtrace_events(struct task_struct *curr)
|
||||
{
|
||||
printk("irq event stamp: %u\n", curr->irq_events);
|
||||
printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
|
||||
print_ip_sym(curr->hardirq_enable_ip);
|
||||
printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
|
||||
print_ip_sym(curr->hardirq_disable_ip);
|
||||
printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
|
||||
print_ip_sym(curr->softirq_enable_ip);
|
||||
printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
|
||||
print_ip_sym(curr->softirq_disable_ip);
|
||||
printk("hardirqs last enabled at (%u): [<%p>] %pS\n",
|
||||
curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
|
||||
(void *)curr->hardirq_enable_ip);
|
||||
printk("hardirqs last disabled at (%u): [<%p>] %pS\n",
|
||||
curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
|
||||
(void *)curr->hardirq_disable_ip);
|
||||
printk("softirqs last enabled at (%u): [<%p>] %pS\n",
|
||||
curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
|
||||
(void *)curr->softirq_enable_ip);
|
||||
printk("softirqs last disabled at (%u): [<%p>] %pS\n",
|
||||
curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
|
||||
(void *)curr->softirq_disable_ip);
|
||||
}
|
||||
|
||||
static int HARDIRQ_verbose(struct lock_class *class)
|
||||
@@ -3235,8 +3238,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
if (very_verbose(class)) {
|
||||
printk("\nacquire class [%p] %s", class->key, class->name);
|
||||
if (class->name_version > 1)
|
||||
printk("#%d", class->name_version);
|
||||
printk("\n");
|
||||
printk(KERN_CONT "#%d", class->name_version);
|
||||
printk(KERN_CONT "\n");
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
@@ -3378,7 +3381,7 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
|
||||
printk("%s/%d is trying to release lock (",
|
||||
curr->comm, task_pid_nr(curr));
|
||||
print_lockdep_cache(lock);
|
||||
printk(") at:\n");
|
||||
printk(KERN_CONT ") at:\n");
|
||||
print_ip_sym(ip);
|
||||
printk("but there are no more locks to release!\n");
|
||||
printk("\nother info that might help us debug this:\n");
|
||||
@@ -3871,7 +3874,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
|
||||
printk("%s/%d is trying to contend lock (",
|
||||
curr->comm, task_pid_nr(curr));
|
||||
print_lockdep_cache(lock);
|
||||
printk(") at:\n");
|
||||
printk(KERN_CONT ") at:\n");
|
||||
print_ip_sym(ip);
|
||||
printk("but there are no locks held!\n");
|
||||
printk("\nother info that might help us debug this:\n");
|
||||
|
@@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
|
||||
|
||||
static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
|
||||
{
|
||||
if (!rt_mutex_has_waiters(lock))
|
||||
clear_rt_mutex_waiters(lock);
|
||||
unsigned long owner, *p = (unsigned long *) &lock->owner;
|
||||
|
||||
if (rt_mutex_has_waiters(lock))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The rbtree has no waiters enqueued, now make sure that the
|
||||
* lock->owner still has the waiters bit set, otherwise the
|
||||
* following can happen:
|
||||
*
|
||||
* CPU 0 CPU 1 CPU2
|
||||
* l->owner=T1
|
||||
* rt_mutex_lock(l)
|
||||
* lock(l->lock)
|
||||
* l->owner = T1 | HAS_WAITERS;
|
||||
* enqueue(T2)
|
||||
* boost()
|
||||
* unlock(l->lock)
|
||||
* block()
|
||||
*
|
||||
* rt_mutex_lock(l)
|
||||
* lock(l->lock)
|
||||
* l->owner = T1 | HAS_WAITERS;
|
||||
* enqueue(T3)
|
||||
* boost()
|
||||
* unlock(l->lock)
|
||||
* block()
|
||||
* signal(->T2) signal(->T3)
|
||||
* lock(l->lock)
|
||||
* dequeue(T2)
|
||||
* deboost()
|
||||
* unlock(l->lock)
|
||||
* lock(l->lock)
|
||||
* dequeue(T3)
|
||||
* ==> wait list is empty
|
||||
* deboost()
|
||||
* unlock(l->lock)
|
||||
* lock(l->lock)
|
||||
* fixup_rt_mutex_waiters()
|
||||
* if (wait_list_empty(l) {
|
||||
* l->owner = owner
|
||||
* owner = l->owner & ~HAS_WAITERS;
|
||||
* ==> l->owner = T1
|
||||
* }
|
||||
* lock(l->lock)
|
||||
* rt_mutex_unlock(l) fixup_rt_mutex_waiters()
|
||||
* if (wait_list_empty(l) {
|
||||
* owner = l->owner & ~HAS_WAITERS;
|
||||
* cmpxchg(l->owner, T1, NULL)
|
||||
* ===> Success (l->owner = NULL)
|
||||
*
|
||||
* l->owner = owner
|
||||
* ==> l->owner = T1
|
||||
* }
|
||||
*
|
||||
* With the check for the waiter bit in place T3 on CPU2 will not
|
||||
* overwrite. All tasks fiddling with the waiters bit are
|
||||
* serialized by l->lock, so nothing else can modify the waiters
|
||||
* bit. If the bit is set then nothing can change l->owner either
|
||||
* so the simple RMW is safe. The cmpxchg() will simply fail if it
|
||||
* happens in the middle of the RMW because the waiters bit is
|
||||
* still set.
|
||||
*/
|
||||
owner = READ_ONCE(*p);
|
||||
if (owner & RT_MUTEX_HAS_WAITERS)
|
||||
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p)
|
||||
|
||||
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
|
||||
{
|
||||
return (struct task_struct *)
|
||||
((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
|
||||
unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
|
||||
|
||||
return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1301,8 +1301,9 @@ static int check_version(Elf_Shdr *sechdrs,
|
||||
goto bad_version;
|
||||
}
|
||||
|
||||
pr_warn("%s: no symbol version for %s\n", mod->name, symname);
|
||||
return 0;
|
||||
/* Broken toolchain. Warn once, then let it go.. */
|
||||
pr_warn_once("%s: no symbol version for %s\n", mod->name, symname);
|
||||
return 1;
|
||||
|
||||
bad_version:
|
||||
pr_warn("%s: disagrees about version of symbol %s\n",
|
||||
|
@@ -212,6 +212,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
|
||||
{
|
||||
static unsigned long next = INITIAL_JIFFIES;
|
||||
struct autogroup *ag;
|
||||
unsigned long shares;
|
||||
int err;
|
||||
|
||||
if (nice < MIN_NICE || nice > MAX_NICE)
|
||||
@@ -230,9 +231,10 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
|
||||
|
||||
next = HZ / 10 + jiffies;
|
||||
ag = autogroup_task_get(p);
|
||||
shares = scale_load(sched_prio_to_weight[nice + 20]);
|
||||
|
||||
down_write(&ag->lock);
|
||||
err = sched_group_set_shares(ag->tg, sched_prio_to_weight[nice + 20]);
|
||||
err = sched_group_set_shares(ag->tg, shares);
|
||||
if (!err)
|
||||
ag->nice = nice;
|
||||
up_write(&ag->lock);
|
||||
|
Reference in New Issue
Block a user