Merge branch 'locking/urgent' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -76,6 +76,23 @@ module_param(lock_stat, int, 0644);
|
||||
#define lock_stat 0
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU(unsigned int, lockdep_recursion);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
|
||||
|
||||
static inline bool lockdep_enabled(void)
|
||||
{
|
||||
if (!debug_locks)
|
||||
return false;
|
||||
|
||||
if (raw_cpu_read(lockdep_recursion))
|
||||
return false;
|
||||
|
||||
if (current->lockdep_recursion)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* lockdep_lock: protects the lockdep graph, the hashes and the
|
||||
* class/list/hash allocators.
|
||||
@@ -93,7 +110,7 @@ static inline void lockdep_lock(void)
|
||||
|
||||
arch_spin_lock(&__lock);
|
||||
__owner = current;
|
||||
current->lockdep_recursion++;
|
||||
__this_cpu_inc(lockdep_recursion);
|
||||
}
|
||||
|
||||
static inline void lockdep_unlock(void)
|
||||
@@ -101,7 +118,7 @@ static inline void lockdep_unlock(void)
|
||||
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
|
||||
return;
|
||||
|
||||
current->lockdep_recursion--;
|
||||
__this_cpu_dec(lockdep_recursion);
|
||||
__owner = NULL;
|
||||
arch_spin_unlock(&__lock);
|
||||
}
|
||||
@@ -408,10 +425,15 @@ void lockdep_init_task(struct task_struct *task)
|
||||
task->lockdep_recursion = 0;
|
||||
}
|
||||
|
||||
static __always_inline void lockdep_recursion_inc(void)
|
||||
{
|
||||
__this_cpu_inc(lockdep_recursion);
|
||||
}
|
||||
|
||||
static __always_inline void lockdep_recursion_finish(void)
|
||||
{
|
||||
if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
|
||||
current->lockdep_recursion = 0;
|
||||
if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
|
||||
__this_cpu_write(lockdep_recursion, 0);
|
||||
}
|
||||
|
||||
void lockdep_set_selftest_task(struct task_struct *task)
|
||||
@@ -600,6 +622,8 @@ static const char *usage_str[] =
|
||||
#include "lockdep_states.h"
|
||||
#undef LOCKDEP_STATE
|
||||
[LOCK_USED] = "INITIAL USE",
|
||||
[LOCK_USED_READ] = "INITIAL READ USE",
|
||||
/* abused as string storage for verify_lock_unused() */
|
||||
[LOCK_USAGE_STATES] = "IN-NMI",
|
||||
};
|
||||
#endif
|
||||
@@ -2252,7 +2276,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
|
||||
#endif
|
||||
printk(KERN_CONT " {\n");
|
||||
|
||||
for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
|
||||
for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
|
||||
if (class->usage_mask & (1 << bit)) {
|
||||
int len = depth;
|
||||
|
||||
@@ -4033,7 +4057,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
|
||||
if (unlikely(in_nmi()))
|
||||
return;
|
||||
|
||||
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
|
||||
if (unlikely(__this_cpu_read(lockdep_recursion)))
|
||||
return;
|
||||
|
||||
if (unlikely(lockdep_hardirqs_enabled())) {
|
||||
@@ -4069,7 +4093,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
|
||||
|
||||
current->hardirq_chain_key = current->curr_chain_key;
|
||||
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
__trace_hardirqs_on_caller();
|
||||
lockdep_recursion_finish();
|
||||
}
|
||||
@@ -4102,7 +4126,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
|
||||
goto skip_checks;
|
||||
}
|
||||
|
||||
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
|
||||
if (unlikely(__this_cpu_read(lockdep_recursion)))
|
||||
return;
|
||||
|
||||
if (lockdep_hardirqs_enabled()) {
|
||||
@@ -4155,7 +4179,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
|
||||
if (in_nmi()) {
|
||||
if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
|
||||
return;
|
||||
} else if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
|
||||
} else if (__this_cpu_read(lockdep_recursion))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -4188,7 +4212,7 @@ void lockdep_softirqs_on(unsigned long ip)
|
||||
{
|
||||
struct irqtrace_events *trace = ¤t->irqtrace;
|
||||
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
if (unlikely(!lockdep_enabled()))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -4203,7 +4227,7 @@ void lockdep_softirqs_on(unsigned long ip)
|
||||
return;
|
||||
}
|
||||
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
/*
|
||||
* We'll do an OFF -> ON transition:
|
||||
*/
|
||||
@@ -4226,7 +4250,7 @@ void lockdep_softirqs_on(unsigned long ip)
|
||||
*/
|
||||
void lockdep_softirqs_off(unsigned long ip)
|
||||
{
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
if (unlikely(!lockdep_enabled()))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -4345,13 +4369,18 @@ static int separate_irq_context(struct task_struct *curr,
|
||||
static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
enum lock_usage_bit new_bit)
|
||||
{
|
||||
unsigned int new_mask = 1 << new_bit, ret = 1;
|
||||
unsigned int new_mask, ret = 1;
|
||||
|
||||
if (new_bit >= LOCK_USAGE_STATES) {
|
||||
DEBUG_LOCKS_WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (new_bit == LOCK_USED && this->read)
|
||||
new_bit = LOCK_USED_READ;
|
||||
|
||||
new_mask = 1 << new_bit;
|
||||
|
||||
/*
|
||||
* If already set then do not dirty the cacheline,
|
||||
* nor do any checks:
|
||||
@@ -4364,26 +4393,32 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
/*
|
||||
* Make sure we didn't race:
|
||||
*/
|
||||
if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
|
||||
graph_unlock();
|
||||
return 1;
|
||||
}
|
||||
if (unlikely(hlock_class(this)->usage_mask & new_mask))
|
||||
goto unlock;
|
||||
|
||||
hlock_class(this)->usage_mask |= new_mask;
|
||||
|
||||
if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
|
||||
return 0;
|
||||
|
||||
switch (new_bit) {
|
||||
case LOCK_USED:
|
||||
debug_atomic_dec(nr_unused_locks);
|
||||
break;
|
||||
default:
|
||||
ret = mark_lock_irq(curr, this, new_bit);
|
||||
if (!ret)
|
||||
if (new_bit < LOCK_TRACE_STATES) {
|
||||
if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (new_bit) {
|
||||
case 0 ... LOCK_USED-1:
|
||||
ret = mark_lock_irq(curr, this, new_bit);
|
||||
if (!ret)
|
||||
return 0;
|
||||
break;
|
||||
|
||||
case LOCK_USED:
|
||||
debug_atomic_dec(nr_unused_locks);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
unlock:
|
||||
graph_unlock();
|
||||
|
||||
/*
|
||||
@@ -4596,11 +4631,11 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
|
||||
if (subclass) {
|
||||
unsigned long flags;
|
||||
|
||||
if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
|
||||
if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
register_lock_class(lock, subclass, 1);
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
@@ -5283,11 +5318,11 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
if (unlikely(!lockdep_enabled()))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
check_flags(flags);
|
||||
if (__lock_set_class(lock, name, key, subclass, ip))
|
||||
check_chain_key(current);
|
||||
@@ -5300,11 +5335,11 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
if (unlikely(!lockdep_enabled()))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
check_flags(flags);
|
||||
if (__lock_downgrade(lock, ip))
|
||||
check_chain_key(current);
|
||||
@@ -5318,12 +5353,20 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
|
||||
{
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
struct lock_class *class = look_up_lock_class(lock, subclass);
|
||||
unsigned long mask = LOCKF_USED;
|
||||
|
||||
/* if it doesn't have a class (yet), it certainly hasn't been used yet */
|
||||
if (!class)
|
||||
return;
|
||||
|
||||
if (!(class->usage_mask & LOCK_USED))
|
||||
/*
|
||||
* READ locks only conflict with USED, such that if we only ever use
|
||||
* READ locks, there is no deadlock possible -- RCU.
|
||||
*/
|
||||
if (!hlock->read)
|
||||
mask |= LOCKF_USED_READ;
|
||||
|
||||
if (!(class->usage_mask & mask))
|
||||
return;
|
||||
|
||||
hlock->class_idx = class - lock_classes;
|
||||
@@ -5334,7 +5377,7 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
|
||||
|
||||
static bool lockdep_nmi(void)
|
||||
{
|
||||
if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
|
||||
if (raw_cpu_read(lockdep_recursion))
|
||||
return false;
|
||||
|
||||
if (!in_nmi())
|
||||
@@ -5369,7 +5412,10 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
|
||||
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
|
||||
|
||||
if (unlikely(current->lockdep_recursion)) {
|
||||
if (!debug_locks)
|
||||
return;
|
||||
|
||||
if (unlikely(!lockdep_enabled())) {
|
||||
/* XXX allow trylock from NMI ?!? */
|
||||
if (lockdep_nmi() && !trylock) {
|
||||
struct held_lock hlock;
|
||||
@@ -5392,7 +5438,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
__lock_acquire(lock, subclass, trylock, read, check,
|
||||
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
|
||||
lockdep_recursion_finish();
|
||||
@@ -5406,13 +5452,13 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
|
||||
|
||||
trace_lock_release(lock, ip);
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
if (unlikely(!lockdep_enabled()))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
if (__lock_release(lock, ip))
|
||||
check_chain_key(current);
|
||||
lockdep_recursion_finish();
|
||||
@@ -5425,13 +5471,13 @@ noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
if (unlikely(!lockdep_enabled()))
|
||||
return 1; /* avoid false negative lockdep_assert_held() */
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
ret = __lock_is_held(lock, read);
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
@@ -5446,13 +5492,13 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
|
||||
struct pin_cookie cookie = NIL_COOKIE;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
if (unlikely(!lockdep_enabled()))
|
||||
return cookie;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
cookie = __lock_pin_lock(lock);
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
@@ -5465,13 +5511,13 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
if (unlikely(!lockdep_enabled()))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
__lock_repin_lock(lock, cookie);
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
@@ -5482,13 +5528,13 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
if (unlikely(!lockdep_enabled()))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
__lock_unpin_lock(lock, cookie);
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
@@ -5618,15 +5664,12 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
|
||||
|
||||
trace_lock_acquired(lock, ip);
|
||||
|
||||
if (unlikely(!lock_stat || !debug_locks))
|
||||
return;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
if (unlikely(!lock_stat || !lockdep_enabled()))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
__lock_contended(lock, ip);
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
@@ -5639,15 +5682,12 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
|
||||
|
||||
trace_lock_contended(lock, ip);
|
||||
|
||||
if (unlikely(!lock_stat || !debug_locks))
|
||||
return;
|
||||
|
||||
if (unlikely(current->lockdep_recursion))
|
||||
if (unlikely(!lock_stat || !lockdep_enabled()))
|
||||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
current->lockdep_recursion++;
|
||||
lockdep_recursion_inc();
|
||||
__lock_acquired(lock, ip);
|
||||
lockdep_recursion_finish();
|
||||
raw_local_irq_restore(flags);
|
||||
|
Reference in New Issue
Block a user