Merge tag 'locking-urgent-2020-11-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Thomas Gleixner: "A couple of locking fixes: - Fix incorrect failure injection handling in the fuxtex code - Prevent a preemption warning in lockdep when tracking local_irq_enable() and interrupts are already enabled - Remove more raw_cpu_read() usage from lockdep which causes state corruption on !X86 architectures. - Make the nr_unused_locks accounting in lockdep correct again" * tag 'locking-urgent-2020-11-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: lockdep: Fix nr_unused_locks accounting locking/lockdep: Remove more raw_cpu_read() usage futex: Fix incorrect should_fail_futex() handling lockdep: Fix preemption WARN for spurious IRQ-enable
This commit is contained in:
@@ -1503,8 +1503,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
|
|||||||
*/
|
*/
|
||||||
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
|
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
|
||||||
|
|
||||||
if (unlikely(should_fail_futex(true)))
|
if (unlikely(should_fail_futex(true))) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
|
ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
|
||||||
if (!ret && (curval != uval)) {
|
if (!ret && (curval != uval)) {
|
||||||
|
@@ -84,7 +84,7 @@ static inline bool lockdep_enabled(void)
|
|||||||
if (!debug_locks)
|
if (!debug_locks)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (raw_cpu_read(lockdep_recursion))
|
if (this_cpu_read(lockdep_recursion))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (current->lockdep_recursion)
|
if (current->lockdep_recursion)
|
||||||
@@ -4057,7 +4057,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
|
|||||||
if (unlikely(in_nmi()))
|
if (unlikely(in_nmi()))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (unlikely(__this_cpu_read(lockdep_recursion)))
|
if (unlikely(this_cpu_read(lockdep_recursion)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (unlikely(lockdep_hardirqs_enabled())) {
|
if (unlikely(lockdep_hardirqs_enabled())) {
|
||||||
@@ -4126,7 +4126,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
|
|||||||
goto skip_checks;
|
goto skip_checks;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(__this_cpu_read(lockdep_recursion)))
|
if (unlikely(this_cpu_read(lockdep_recursion)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (lockdep_hardirqs_enabled()) {
|
if (lockdep_hardirqs_enabled()) {
|
||||||
@@ -4396,6 +4396,9 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
|||||||
if (unlikely(hlock_class(this)->usage_mask & new_mask))
|
if (unlikely(hlock_class(this)->usage_mask & new_mask))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
|
if (!hlock_class(this)->usage_mask)
|
||||||
|
debug_atomic_dec(nr_unused_locks);
|
||||||
|
|
||||||
hlock_class(this)->usage_mask |= new_mask;
|
hlock_class(this)->usage_mask |= new_mask;
|
||||||
|
|
||||||
if (new_bit < LOCK_TRACE_STATES) {
|
if (new_bit < LOCK_TRACE_STATES) {
|
||||||
@@ -4403,19 +4406,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (new_bit) {
|
if (new_bit < LOCK_USED) {
|
||||||
case 0 ... LOCK_USED-1:
|
|
||||||
ret = mark_lock_irq(curr, this, new_bit);
|
ret = mark_lock_irq(curr, this, new_bit);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return 0;
|
return 0;
|
||||||
break;
|
|
||||||
|
|
||||||
case LOCK_USED:
|
|
||||||
debug_atomic_dec(nr_unused_locks);
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
|
Reference in New Issue
Block a user