Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The tree got pretty big in this development cycle, but the net effect is pretty good: 115 files changed, 673 insertions(+), 1522 deletions(-) The main changes were: - Rework and generalize the mutex code to remove per arch mutex primitives. (Peter Zijlstra) - Add vCPU preemption support: add an interface to query the preemption status of vCPUs and use it in locking primitives - this optimizes paravirt performance. (Pan Xinhui, Juergen Gross, Christian Borntraeger) - Introduce cpu_relax_yield() and remov cpu_relax_lowlatency() to clean up and improve the s390 lock yielding machinery and its core kernel impact. (Christian Borntraeger) - Micro-optimize mutexes some more. (Waiman Long) - Reluctantly add the to-be-deprecated mutex_trylock_recursive() interface on a temporary basis, to give the DRM code more time to get rid of its locking hacks. Any other users will be NAK-ed on sight. (We turned off the deprecation warning for the time being to not pollute the build log.) (Peter Zijlstra) - Improve the rtmutex code a bit, in light of recent long lived bugs/races. (Thomas Gleixner) - Misc fixes, cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) x86/paravirt: Fix bool return type for PVOP_CALL() x86/paravirt: Fix native_patch() locking/ww_mutex: Use relaxed atomics locking/rtmutex: Explain locking rules for rt_mutex_proxy_unlock()/init_proxy_locked() locking/rtmutex: Get rid of RT_MUTEX_OWNER_MASKALL x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted() locking/mutex: Break out of expensive busy-loop on {mutex,rwsem}_spin_on_owner() when owner vCPU is preempted locking/osq: Break out of spin-wait busy waiting loop for a preempted vCPU in osq_lock() Documentation/virtual/kvm: Support the vCPU preemption check x86/xen: Support the vCPU preemption check x86/kvm: Support the vCPU preemption check x86/kvm: Support the vCPU preemption check kvm: Introduce kvm_write_guest_offset_cached() locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen guests locking/spinlocks, s390: Implement vcpu_is_preempted(cpu) locking/core, powerpc: Implement vcpu_is_preempted(cpu) sched/core: Introduce the vcpu_is_preempted(cpu) interface sched/wake_q: Rename WAKE_Q to DEFINE_WAKE_Q locking/core: Provide common cpu_relax_yield() definition locking/mutex: Don't mark mutex_trylock_recursive() as deprecated, temporarily ...
Цей коміт міститься в:
@@ -225,7 +225,7 @@ config ARCH_SUPPORTS_ATOMIC_RMW
|
||||
|
||||
config MUTEX_SPIN_ON_OWNER
|
||||
def_bool y
|
||||
depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
|
||||
depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
|
||||
|
||||
config RWSEM_SPIN_ON_OWNER
|
||||
def_bool y
|
||||
|
@@ -1298,7 +1298,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
|
||||
struct task_struct *new_owner;
|
||||
struct futex_pi_state *pi_state = this->pi_state;
|
||||
u32 uninitialized_var(curval), newval;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
bool deboost;
|
||||
int ret = 0;
|
||||
|
||||
@@ -1415,7 +1415,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
|
||||
struct futex_q *this, *next;
|
||||
union futex_key key = FUTEX_KEY_INIT;
|
||||
int ret;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
if (!bitset)
|
||||
return -EINVAL;
|
||||
@@ -1469,7 +1469,7 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
|
||||
struct futex_hash_bucket *hb1, *hb2;
|
||||
struct futex_q *this, *next;
|
||||
int ret, op_ret;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
retry:
|
||||
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
|
||||
@@ -1708,7 +1708,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||||
struct futex_pi_state *pi_state = NULL;
|
||||
struct futex_hash_bucket *hb1, *hb2;
|
||||
struct futex_q *this, *next;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
if (requeue_pi) {
|
||||
/*
|
||||
|
@@ -840,9 +840,9 @@ static struct lock_list *alloc_list_entry(void)
|
||||
/*
|
||||
* Add a new dependency to the head of the list:
|
||||
*/
|
||||
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
|
||||
struct list_head *head, unsigned long ip,
|
||||
int distance, struct stack_trace *trace)
|
||||
static int add_lock_to_list(struct lock_class *this, struct list_head *head,
|
||||
unsigned long ip, int distance,
|
||||
struct stack_trace *trace)
|
||||
{
|
||||
struct lock_list *entry;
|
||||
/*
|
||||
@@ -1868,14 +1868,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||
* Ok, all validations passed, add the new lock
|
||||
* to the previous lock's dependency list:
|
||||
*/
|
||||
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
|
||||
ret = add_lock_to_list(hlock_class(next),
|
||||
&hlock_class(prev)->locks_after,
|
||||
next->acquire_ip, distance, &trace);
|
||||
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
|
||||
ret = add_lock_to_list(hlock_class(prev),
|
||||
&hlock_class(next)->locks_before,
|
||||
next->acquire_ip, distance, &trace);
|
||||
if (!ret)
|
||||
|
@@ -28,7 +28,7 @@ struct mcs_spinlock {
|
||||
#define arch_mcs_spin_lock_contended(l) \
|
||||
do { \
|
||||
while (!(smp_load_acquire(l))) \
|
||||
cpu_relax_lowlatency(); \
|
||||
cpu_relax(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
@@ -108,7 +108,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
||||
return;
|
||||
/* Wait until the next pointer is set */
|
||||
while (!(next = READ_ONCE(node->next)))
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* Pass lock to next waiter. */
|
||||
|
@@ -73,21 +73,8 @@ void debug_mutex_unlock(struct mutex *lock)
|
||||
{
|
||||
if (likely(debug_locks)) {
|
||||
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||
|
||||
if (!lock->owner)
|
||||
DEBUG_LOCKS_WARN_ON(!lock->owner);
|
||||
else
|
||||
DEBUG_LOCKS_WARN_ON(lock->owner != current);
|
||||
|
||||
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
|
||||
}
|
||||
|
||||
/*
|
||||
* __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
|
||||
* mutexes so that we can do it here after we've verified state.
|
||||
*/
|
||||
mutex_clear_owner(lock);
|
||||
atomic_set(&lock->count, 1);
|
||||
}
|
||||
|
||||
void debug_mutex_init(struct mutex *lock, const char *name,
|
||||
|
@@ -27,16 +27,6 @@ extern void debug_mutex_unlock(struct mutex *lock);
|
||||
extern void debug_mutex_init(struct mutex *lock, const char *name,
|
||||
struct lock_class_key *key);
|
||||
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
WRITE_ONCE(lock->owner, current);
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
{
|
||||
WRITE_ONCE(lock->owner, NULL);
|
||||
}
|
||||
|
||||
#define spin_lock_mutex(lock, flags) \
|
||||
do { \
|
||||
struct mutex *l = container_of(lock, struct mutex, wait_lock); \
|
||||
|
@@ -27,41 +27,176 @@
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/osq_lock.h>
|
||||
|
||||
/*
|
||||
* In the DEBUG case we are using the "NULL fastpath" for mutexes,
|
||||
* which forces all calls into the slowpath:
|
||||
*/
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
# include "mutex-debug.h"
|
||||
# include <asm-generic/mutex-null.h>
|
||||
/*
|
||||
* Must be 0 for the debug case so we do not do the unlock outside of the
|
||||
* wait_lock region. debug_mutex_unlock() will do the actual unlock in this
|
||||
* case.
|
||||
*/
|
||||
# undef __mutex_slowpath_needs_to_unlock
|
||||
# define __mutex_slowpath_needs_to_unlock() 0
|
||||
#else
|
||||
# include "mutex.h"
|
||||
# include <asm/mutex.h>
|
||||
#endif
|
||||
|
||||
void
|
||||
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
|
||||
{
|
||||
atomic_set(&lock->count, 1);
|
||||
atomic_long_set(&lock->owner, 0);
|
||||
spin_lock_init(&lock->wait_lock);
|
||||
INIT_LIST_HEAD(&lock->wait_list);
|
||||
mutex_clear_owner(lock);
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
osq_lock_init(&lock->osq);
|
||||
#endif
|
||||
|
||||
debug_mutex_init(lock, name, key);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__mutex_init);
|
||||
|
||||
/*
|
||||
* @owner: contains: 'struct task_struct *' to the current lock owner,
|
||||
* NULL means not owned. Since task_struct pointers are aligned at
|
||||
* ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
|
||||
* bits to store extra state.
|
||||
*
|
||||
* Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
|
||||
* Bit1 indicates unlock needs to hand the lock to the top-waiter
|
||||
*/
|
||||
#define MUTEX_FLAG_WAITERS 0x01
|
||||
#define MUTEX_FLAG_HANDOFF 0x02
|
||||
|
||||
#define MUTEX_FLAGS 0x03
|
||||
|
||||
static inline struct task_struct *__owner_task(unsigned long owner)
|
||||
{
|
||||
return (struct task_struct *)(owner & ~MUTEX_FLAGS);
|
||||
}
|
||||
|
||||
static inline unsigned long __owner_flags(unsigned long owner)
|
||||
{
|
||||
return owner & MUTEX_FLAGS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Actual trylock that will work on any unlocked state.
|
||||
*
|
||||
* When setting the owner field, we must preserve the low flag bits.
|
||||
*
|
||||
* Be careful with @handoff, only set that in a wait-loop (where you set
|
||||
* HANDOFF) to avoid recursive lock attempts.
|
||||
*/
|
||||
static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
|
||||
{
|
||||
unsigned long owner, curr = (unsigned long)current;
|
||||
|
||||
owner = atomic_long_read(&lock->owner);
|
||||
for (;;) { /* must loop, can race against a flag */
|
||||
unsigned long old, flags = __owner_flags(owner);
|
||||
|
||||
if (__owner_task(owner)) {
|
||||
if (handoff && unlikely(__owner_task(owner) == current)) {
|
||||
/*
|
||||
* Provide ACQUIRE semantics for the lock-handoff.
|
||||
*
|
||||
* We cannot easily use load-acquire here, since
|
||||
* the actual load is a failed cmpxchg, which
|
||||
* doesn't imply any barriers.
|
||||
*
|
||||
* Also, this is a fairly unlikely scenario, and
|
||||
* this contains the cost.
|
||||
*/
|
||||
smp_mb(); /* ACQUIRE */
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* We set the HANDOFF bit, we must make sure it doesn't live
|
||||
* past the point where we acquire it. This would be possible
|
||||
* if we (accidentally) set the bit on an unlocked mutex.
|
||||
*/
|
||||
if (handoff)
|
||||
flags &= ~MUTEX_FLAG_HANDOFF;
|
||||
|
||||
old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
|
||||
if (old == owner)
|
||||
return true;
|
||||
|
||||
owner = old;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/*
|
||||
* Lockdep annotations are contained to the slow paths for simplicity.
|
||||
* There is nothing that would stop spreading the lockdep annotations outwards
|
||||
* except more code.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Optimistic trylock that only works in the uncontended case. Make sure to
|
||||
* follow with a __mutex_trylock() before failing.
|
||||
*/
|
||||
static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
|
||||
{
|
||||
unsigned long curr = (unsigned long)current;
|
||||
|
||||
if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
|
||||
{
|
||||
unsigned long curr = (unsigned long)current;
|
||||
|
||||
if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
|
||||
{
|
||||
atomic_long_or(flag, &lock->owner);
|
||||
}
|
||||
|
||||
static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
|
||||
{
|
||||
atomic_long_andnot(flag, &lock->owner);
|
||||
}
|
||||
|
||||
static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
|
||||
{
|
||||
return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give up ownership to a specific task, when @task = NULL, this is equivalent
|
||||
* to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
|
||||
* semantics like a regular unlock, the __mutex_trylock() provides matching
|
||||
* ACQUIRE semantics for the handoff.
|
||||
*/
|
||||
static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
|
||||
{
|
||||
unsigned long owner = atomic_long_read(&lock->owner);
|
||||
|
||||
for (;;) {
|
||||
unsigned long old, new;
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
|
||||
#endif
|
||||
|
||||
new = (owner & MUTEX_FLAG_WAITERS);
|
||||
new |= (unsigned long)task;
|
||||
|
||||
old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
|
||||
if (old == owner)
|
||||
break;
|
||||
|
||||
owner = old;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/*
|
||||
* We split the mutex lock/unlock logic into separate fastpath and
|
||||
@@ -69,7 +204,7 @@ EXPORT_SYMBOL(__mutex_init);
|
||||
* We also put the fastpath first in the kernel image, to make sure the
|
||||
* branch is predicted by the CPU as default-untaken.
|
||||
*/
|
||||
__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
|
||||
static void __sched __mutex_lock_slowpath(struct mutex *lock);
|
||||
|
||||
/**
|
||||
* mutex_lock - acquire the mutex
|
||||
@@ -95,14 +230,10 @@ __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
|
||||
void __sched mutex_lock(struct mutex *lock)
|
||||
{
|
||||
might_sleep();
|
||||
/*
|
||||
* The locking fastpath is the 1->0 transition from
|
||||
* 'unlocked' into 'locked' state.
|
||||
*/
|
||||
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
|
||||
mutex_set_owner(lock);
|
||||
}
|
||||
|
||||
if (!__mutex_trylock_fast(lock))
|
||||
__mutex_lock_slowpath(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_lock);
|
||||
#endif
|
||||
|
||||
@@ -149,9 +280,6 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
|
||||
/*
|
||||
* After acquiring lock with fastpath or when we lost out in contested
|
||||
* slowpath, set ctx and wake up any waiters so they can recheck.
|
||||
*
|
||||
* This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
|
||||
* as the fastpath and opportunistic spinning are disabled in that case.
|
||||
*/
|
||||
static __always_inline void
|
||||
ww_mutex_set_context_fastpath(struct ww_mutex *lock,
|
||||
@@ -176,7 +304,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
|
||||
/*
|
||||
* Check if lock is contended, if not there is nobody to wake up
|
||||
*/
|
||||
if (likely(atomic_read(&lock->base.count) == 0))
|
||||
if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -227,7 +355,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
||||
bool ret = true;
|
||||
|
||||
rcu_read_lock();
|
||||
while (lock->owner == owner) {
|
||||
while (__mutex_owner(lock) == owner) {
|
||||
/*
|
||||
* Ensure we emit the owner->on_cpu, dereference _after_
|
||||
* checking lock->owner still matches owner. If that fails,
|
||||
@@ -236,12 +364,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
|
||||
*/
|
||||
barrier();
|
||||
|
||||
if (!owner->on_cpu || need_resched()) {
|
||||
/*
|
||||
* Use vcpu_is_preempted to detect lock holder preemption issue.
|
||||
*/
|
||||
if (!owner->on_cpu || need_resched() ||
|
||||
vcpu_is_preempted(task_cpu(owner))) {
|
||||
ret = false;
|
||||
break;
|
||||
}
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -260,26 +392,24 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
owner = READ_ONCE(lock->owner);
|
||||
if (owner)
|
||||
retval = owner->on_cpu;
|
||||
rcu_read_unlock();
|
||||
owner = __mutex_owner(lock);
|
||||
|
||||
/*
|
||||
* if lock->owner is not set, the mutex owner may have just acquired
|
||||
* it and not set the owner yet or the mutex has been released.
|
||||
* As lock holder preemption issue, we both skip spinning if task is not
|
||||
* on cpu or its cpu is preempted
|
||||
*/
|
||||
if (owner)
|
||||
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* If lock->owner is not set, the mutex has been released. Return true
|
||||
* such that we'll trylock in the spin path, which is a faster option
|
||||
* than the blocking slow path.
|
||||
*/
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically try to take the lock when it is available
|
||||
*/
|
||||
static inline bool mutex_try_to_acquire(struct mutex *lock)
|
||||
{
|
||||
return !mutex_is_locked(lock) &&
|
||||
(atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Optimistic spinning.
|
||||
*
|
||||
@@ -288,13 +418,6 @@ static inline bool mutex_try_to_acquire(struct mutex *lock)
|
||||
* need to reschedule. The rationale is that if the lock owner is
|
||||
* running, it is likely to release the lock soon.
|
||||
*
|
||||
* Since this needs the lock owner, and this mutex implementation
|
||||
* doesn't track the owner atomically in the lock field, we need to
|
||||
* track it non-atomically.
|
||||
*
|
||||
* We can't do this for DEBUG_MUTEXES because that relies on wait_lock
|
||||
* to serialize everything.
|
||||
*
|
||||
* The mutex spinners are queued up using MCS lock so that only one
|
||||
* spinner can compete for the mutex. However, if mutex spinning isn't
|
||||
* going to happen, there is no point in going through the lock/unlock
|
||||
@@ -302,24 +425,39 @@ static inline bool mutex_try_to_acquire(struct mutex *lock)
|
||||
*
|
||||
* Returns true when the lock was taken, otherwise false, indicating
|
||||
* that we need to jump to the slowpath and sleep.
|
||||
*
|
||||
* The waiter flag is set to true if the spinner is a waiter in the wait
|
||||
* queue. The waiter-spinner will spin on the lock directly and concurrently
|
||||
* with the spinner at the head of the OSQ, if present, until the owner is
|
||||
* changed to itself.
|
||||
*/
|
||||
static bool mutex_optimistic_spin(struct mutex *lock,
|
||||
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
|
||||
struct ww_acquire_ctx *ww_ctx,
|
||||
const bool use_ww_ctx, const bool waiter)
|
||||
{
|
||||
struct task_struct *task = current;
|
||||
|
||||
if (!mutex_can_spin_on_owner(lock))
|
||||
goto done;
|
||||
if (!waiter) {
|
||||
/*
|
||||
* The purpose of the mutex_can_spin_on_owner() function is
|
||||
* to eliminate the overhead of osq_lock() and osq_unlock()
|
||||
* in case spinning isn't possible. As a waiter-spinner
|
||||
* is not going to take OSQ lock anyway, there is no need
|
||||
* to call mutex_can_spin_on_owner().
|
||||
*/
|
||||
if (!mutex_can_spin_on_owner(lock))
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* In order to avoid a stampede of mutex spinners trying to
|
||||
* acquire the mutex all at once, the spinners need to take a
|
||||
* MCS (queued) lock first before spinning on the owner field.
|
||||
*/
|
||||
if (!osq_lock(&lock->osq))
|
||||
goto done;
|
||||
/*
|
||||
* In order to avoid a stampede of mutex spinners trying to
|
||||
* acquire the mutex all at once, the spinners need to take a
|
||||
* MCS (queued) lock first before spinning on the owner field.
|
||||
*/
|
||||
if (!osq_lock(&lock->osq))
|
||||
goto fail;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
for (;;) {
|
||||
struct task_struct *owner;
|
||||
|
||||
if (use_ww_ctx && ww_ctx->acquired > 0) {
|
||||
@@ -335,40 +473,26 @@ static bool mutex_optimistic_spin(struct mutex *lock,
|
||||
* performed the optimistic spinning cannot be done.
|
||||
*/
|
||||
if (READ_ONCE(ww->ctx))
|
||||
break;
|
||||
goto fail_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there's an owner, wait for it to either
|
||||
* release the lock or go to sleep.
|
||||
*/
|
||||
owner = READ_ONCE(lock->owner);
|
||||
if (owner && !mutex_spin_on_owner(lock, owner))
|
||||
break;
|
||||
|
||||
/* Try to acquire the mutex if it is unlocked. */
|
||||
if (mutex_try_to_acquire(lock)) {
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
|
||||
if (use_ww_ctx) {
|
||||
struct ww_mutex *ww;
|
||||
ww = container_of(lock, struct ww_mutex, base);
|
||||
|
||||
ww_mutex_set_context_fastpath(ww, ww_ctx);
|
||||
owner = __mutex_owner(lock);
|
||||
if (owner) {
|
||||
if (waiter && owner == task) {
|
||||
smp_mb(); /* ACQUIRE */
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_set_owner(lock);
|
||||
osq_unlock(&lock->osq);
|
||||
return true;
|
||||
if (!mutex_spin_on_owner(lock, owner))
|
||||
goto fail_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* When there's no owner, we might have preempted between the
|
||||
* owner acquiring the lock and setting the owner field. If
|
||||
* we're an RT task that will live-lock because we won't let
|
||||
* the owner complete.
|
||||
*/
|
||||
if (!owner && (need_resched() || rt_task(task)))
|
||||
/* Try to acquire the mutex if it is unlocked. */
|
||||
if (__mutex_trylock(lock, waiter))
|
||||
break;
|
||||
|
||||
/*
|
||||
@@ -377,11 +501,20 @@ static bool mutex_optimistic_spin(struct mutex *lock,
|
||||
* memory barriers as we'll eventually observe the right
|
||||
* values at the cost of a few extra spins.
|
||||
*/
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
osq_unlock(&lock->osq);
|
||||
done:
|
||||
if (!waiter)
|
||||
osq_unlock(&lock->osq);
|
||||
|
||||
return true;
|
||||
|
||||
|
||||
fail_unlock:
|
||||
if (!waiter)
|
||||
osq_unlock(&lock->osq);
|
||||
|
||||
fail:
|
||||
/*
|
||||
* If we fell out of the spin path because of need_resched(),
|
||||
* reschedule now, before we try-lock the mutex. This avoids getting
|
||||
@@ -400,14 +533,14 @@ done:
|
||||
}
|
||||
#else
|
||||
static bool mutex_optimistic_spin(struct mutex *lock,
|
||||
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
|
||||
struct ww_acquire_ctx *ww_ctx,
|
||||
const bool use_ww_ctx, const bool waiter)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
__visible __used noinline
|
||||
void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
|
||||
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
|
||||
|
||||
/**
|
||||
* mutex_unlock - release the mutex
|
||||
@@ -422,21 +555,12 @@ void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
|
||||
*/
|
||||
void __sched mutex_unlock(struct mutex *lock)
|
||||
{
|
||||
/*
|
||||
* The unlocking fastpath is the 0->1 transition from 'locked'
|
||||
* into 'unlocked' state:
|
||||
*/
|
||||
#ifndef CONFIG_DEBUG_MUTEXES
|
||||
/*
|
||||
* When debugging is enabled we must not clear the owner before time,
|
||||
* the slow path will always be taken, and that clears the owner field
|
||||
* after verifying that it was indeed current.
|
||||
*/
|
||||
mutex_clear_owner(lock);
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
if (__mutex_unlock_fast(lock))
|
||||
return;
|
||||
#endif
|
||||
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
|
||||
__mutex_unlock_slowpath(lock, _RET_IP_);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mutex_unlock);
|
||||
|
||||
/**
|
||||
@@ -465,15 +589,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
||||
lock->ctx = NULL;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEBUG_MUTEXES
|
||||
/*
|
||||
* When debugging is enabled we must not clear the owner before time,
|
||||
* the slow path will always be taken, and that clears the owner field
|
||||
* after verifying that it was indeed current.
|
||||
*/
|
||||
mutex_clear_owner(&lock->base);
|
||||
#endif
|
||||
__mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
|
||||
mutex_unlock(&lock->base);
|
||||
}
|
||||
EXPORT_SYMBOL(ww_mutex_unlock);
|
||||
|
||||
@@ -509,10 +625,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
struct task_struct *task = current;
|
||||
struct mutex_waiter waiter;
|
||||
unsigned long flags;
|
||||
bool first = false;
|
||||
struct ww_mutex *ww;
|
||||
int ret;
|
||||
|
||||
if (use_ww_ctx) {
|
||||
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
||||
ww = container_of(lock, struct ww_mutex, base);
|
||||
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
||||
return -EALREADY;
|
||||
}
|
||||
@@ -520,20 +638,21 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
preempt_disable();
|
||||
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
||||
|
||||
if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
|
||||
if (__mutex_trylock(lock, false) ||
|
||||
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
|
||||
/* got the lock, yay! */
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
if (use_ww_ctx)
|
||||
ww_mutex_set_context_fastpath(ww, ww_ctx);
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
/*
|
||||
* Once more, try to acquire the lock. Only try-lock the mutex if
|
||||
* it is unlocked to reduce unnecessary xchg() operations.
|
||||
* After waiting to acquire the wait_lock, try again.
|
||||
*/
|
||||
if (!mutex_is_locked(lock) &&
|
||||
(atomic_xchg_acquire(&lock->count, 0) == 1))
|
||||
if (__mutex_trylock(lock, false))
|
||||
goto skip_wait;
|
||||
|
||||
debug_mutex_lock_common(lock, &waiter);
|
||||
@@ -543,26 +662,26 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
list_add_tail(&waiter.list, &lock->wait_list);
|
||||
waiter.task = task;
|
||||
|
||||
if (__mutex_waiter_is_first(lock, &waiter))
|
||||
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
|
||||
|
||||
lock_contended(&lock->dep_map, ip);
|
||||
|
||||
set_task_state(task, state);
|
||||
for (;;) {
|
||||
/*
|
||||
* Lets try to take the lock again - this is needed even if
|
||||
* we get here for the first time (shortly after failing to
|
||||
* acquire the lock), to make sure that we get a wakeup once
|
||||
* it's unlocked. Later on, if we sleep, this is the
|
||||
* operation that gives us the lock. We xchg it to -1, so
|
||||
* that when we release the lock, we properly wake up the
|
||||
* other waiters. We only attempt the xchg if the count is
|
||||
* non-negative in order to avoid unnecessary xchg operations:
|
||||
* Once we hold wait_lock, we're serialized against
|
||||
* mutex_unlock() handing the lock off to us, do a trylock
|
||||
* before testing the error conditions to make sure we pick up
|
||||
* the handoff.
|
||||
*/
|
||||
if (atomic_read(&lock->count) >= 0 &&
|
||||
(atomic_xchg_acquire(&lock->count, -1) == 1))
|
||||
break;
|
||||
if (__mutex_trylock(lock, first))
|
||||
goto acquired;
|
||||
|
||||
/*
|
||||
* got a signal? (This code gets eliminated in the
|
||||
* TASK_UNINTERRUPTIBLE case.)
|
||||
* Check for signals and wound conditions while holding
|
||||
* wait_lock. This ensures the lock cancellation is ordered
|
||||
* against mutex_unlock() and wake-ups do not go missing.
|
||||
*/
|
||||
if (unlikely(signal_pending_state(state, task))) {
|
||||
ret = -EINTR;
|
||||
@@ -575,36 +694,49 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
goto err;
|
||||
}
|
||||
|
||||
__set_task_state(task, state);
|
||||
|
||||
/* didn't get the lock, go to sleep: */
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
schedule_preempt_disabled();
|
||||
|
||||
if (!first && __mutex_waiter_is_first(lock, &waiter)) {
|
||||
first = true;
|
||||
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
||||
}
|
||||
|
||||
set_task_state(task, state);
|
||||
/*
|
||||
* Here we order against unlock; we must either see it change
|
||||
* state back to RUNNING and fall through the next schedule(),
|
||||
* or we must see its unlock and acquire.
|
||||
*/
|
||||
if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
|
||||
__mutex_trylock(lock, first))
|
||||
break;
|
||||
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
}
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
acquired:
|
||||
__set_task_state(task, TASK_RUNNING);
|
||||
|
||||
mutex_remove_waiter(lock, &waiter, task);
|
||||
/* set it to 0 if there are no waiters left: */
|
||||
if (likely(list_empty(&lock->wait_list)))
|
||||
atomic_set(&lock->count, 0);
|
||||
__mutex_clear_flag(lock, MUTEX_FLAGS);
|
||||
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
|
||||
skip_wait:
|
||||
/* got the lock - cleanup and rejoice! */
|
||||
lock_acquired(&lock->dep_map, ip);
|
||||
mutex_set_owner(lock);
|
||||
|
||||
if (use_ww_ctx) {
|
||||
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
||||
if (use_ww_ctx)
|
||||
ww_mutex_set_context_slowpath(ww, ww_ctx);
|
||||
}
|
||||
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
preempt_enable();
|
||||
return 0;
|
||||
|
||||
err:
|
||||
__set_task_state(task, TASK_RUNNING);
|
||||
mutex_remove_waiter(lock, &waiter, task);
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
debug_mutex_free_waiter(&waiter);
|
||||
@@ -631,7 +763,6 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
||||
0, nest, _RET_IP_, NULL, 0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
||||
|
||||
int __sched
|
||||
@@ -650,7 +781,6 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
||||
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
|
||||
subclass, NULL, _RET_IP_, NULL, 0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
||||
|
||||
static inline int
|
||||
@@ -715,56 +845,66 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
|
||||
/*
|
||||
* Release the lock, slowpath:
|
||||
*/
|
||||
static inline void
|
||||
__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
|
||||
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
|
||||
{
|
||||
unsigned long flags;
|
||||
WAKE_Q(wake_q);
|
||||
struct task_struct *next = NULL;
|
||||
unsigned long owner, flags;
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
mutex_release(&lock->dep_map, 1, ip);
|
||||
|
||||
/*
|
||||
* As a performance measurement, release the lock before doing other
|
||||
* wakeup related duties to follow. This allows other tasks to acquire
|
||||
* the lock sooner, while still handling cleanups in past unlock calls.
|
||||
* This can be done as we do not enforce strict equivalence between the
|
||||
* mutex counter and wait_list.
|
||||
* Release the lock before (potentially) taking the spinlock such that
|
||||
* other contenders can get on with things ASAP.
|
||||
*
|
||||
*
|
||||
* Some architectures leave the lock unlocked in the fastpath failure
|
||||
* case, others need to leave it locked. In the later case we have to
|
||||
* unlock it here - as the lock counter is currently 0 or negative.
|
||||
* Except when HANDOFF, in that case we must not clear the owner field,
|
||||
* but instead set it to the top waiter.
|
||||
*/
|
||||
if (__mutex_slowpath_needs_to_unlock())
|
||||
atomic_set(&lock->count, 1);
|
||||
owner = atomic_long_read(&lock->owner);
|
||||
for (;;) {
|
||||
unsigned long old;
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
|
||||
#endif
|
||||
|
||||
if (owner & MUTEX_FLAG_HANDOFF)
|
||||
break;
|
||||
|
||||
old = atomic_long_cmpxchg_release(&lock->owner, owner,
|
||||
__owner_flags(owner));
|
||||
if (old == owner) {
|
||||
if (owner & MUTEX_FLAG_WAITERS)
|
||||
break;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
owner = old;
|
||||
}
|
||||
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
mutex_release(&lock->dep_map, nested, _RET_IP_);
|
||||
debug_mutex_unlock(lock);
|
||||
|
||||
if (!list_empty(&lock->wait_list)) {
|
||||
/* get the first entry from the wait-list: */
|
||||
struct mutex_waiter *waiter =
|
||||
list_entry(lock->wait_list.next,
|
||||
struct mutex_waiter, list);
|
||||
list_first_entry(&lock->wait_list,
|
||||
struct mutex_waiter, list);
|
||||
|
||||
next = waiter->task;
|
||||
|
||||
debug_mutex_wake_waiter(lock, waiter);
|
||||
wake_q_add(&wake_q, waiter->task);
|
||||
wake_q_add(&wake_q, next);
|
||||
}
|
||||
|
||||
if (owner & MUTEX_FLAG_HANDOFF)
|
||||
__mutex_handoff(lock, next);
|
||||
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
wake_up_q(&wake_q);
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the lock, slowpath:
|
||||
*/
|
||||
__visible void
|
||||
__mutex_unlock_slowpath(atomic_t *lock_count)
|
||||
{
|
||||
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
||||
|
||||
__mutex_unlock_common_slowpath(lock, 1);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/*
|
||||
* Here come the less common (and hence less performance-critical) APIs:
|
||||
@@ -789,38 +929,30 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock);
|
||||
*/
|
||||
int __sched mutex_lock_interruptible(struct mutex *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
ret = __mutex_fastpath_lock_retval(&lock->count);
|
||||
if (likely(!ret)) {
|
||||
mutex_set_owner(lock);
|
||||
|
||||
if (__mutex_trylock_fast(lock))
|
||||
return 0;
|
||||
} else
|
||||
return __mutex_lock_interruptible_slowpath(lock);
|
||||
|
||||
return __mutex_lock_interruptible_slowpath(lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mutex_lock_interruptible);
|
||||
|
||||
int __sched mutex_lock_killable(struct mutex *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
ret = __mutex_fastpath_lock_retval(&lock->count);
|
||||
if (likely(!ret)) {
|
||||
mutex_set_owner(lock);
|
||||
|
||||
if (__mutex_trylock_fast(lock))
|
||||
return 0;
|
||||
} else
|
||||
return __mutex_lock_killable_slowpath(lock);
|
||||
|
||||
return __mutex_lock_killable_slowpath(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_lock_killable);
|
||||
|
||||
__visible void __sched
|
||||
__mutex_lock_slowpath(atomic_t *lock_count)
|
||||
static noinline void __sched
|
||||
__mutex_lock_slowpath(struct mutex *lock)
|
||||
{
|
||||
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
||||
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
|
||||
NULL, _RET_IP_, NULL, 0);
|
||||
}
|
||||
@@ -856,37 +988,6 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Spinlock based trylock, we take the spinlock and check whether we
|
||||
* can get the lock:
|
||||
*/
|
||||
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
|
||||
{
|
||||
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
||||
unsigned long flags;
|
||||
int prev;
|
||||
|
||||
/* No need to trylock if the mutex is locked. */
|
||||
if (mutex_is_locked(lock))
|
||||
return 0;
|
||||
|
||||
spin_lock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
prev = atomic_xchg_acquire(&lock->count, -1);
|
||||
if (likely(prev == 1)) {
|
||||
mutex_set_owner(lock);
|
||||
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||
}
|
||||
|
||||
/* Set it back to 0 if there are no waiters: */
|
||||
if (likely(list_empty(&lock->wait_list)))
|
||||
atomic_set(&lock->count, 0);
|
||||
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
||||
return prev == 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* mutex_trylock - try to acquire the mutex, without waiting
|
||||
* @lock: the mutex to be acquired
|
||||
@@ -903,13 +1004,12 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
|
||||
*/
|
||||
int __sched mutex_trylock(struct mutex *lock)
|
||||
{
|
||||
int ret;
|
||||
bool locked = __mutex_trylock(lock, false);
|
||||
|
||||
ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
|
||||
if (ret)
|
||||
mutex_set_owner(lock);
|
||||
if (locked)
|
||||
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||
|
||||
return ret;
|
||||
return locked;
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_trylock);
|
||||
|
||||
@@ -917,36 +1017,28 @@ EXPORT_SYMBOL(mutex_trylock);
|
||||
int __sched
|
||||
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
|
||||
ret = __mutex_fastpath_lock_retval(&lock->base.count);
|
||||
|
||||
if (likely(!ret)) {
|
||||
if (__mutex_trylock_fast(&lock->base)) {
|
||||
ww_mutex_set_context_fastpath(lock, ctx);
|
||||
mutex_set_owner(&lock->base);
|
||||
} else
|
||||
ret = __ww_mutex_lock_slowpath(lock, ctx);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return __ww_mutex_lock_slowpath(lock, ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(__ww_mutex_lock);
|
||||
|
||||
int __sched
|
||||
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
|
||||
ret = __mutex_fastpath_lock_retval(&lock->base.count);
|
||||
|
||||
if (likely(!ret)) {
|
||||
if (__mutex_trylock_fast(&lock->base)) {
|
||||
ww_mutex_set_context_fastpath(lock, ctx);
|
||||
mutex_set_owner(&lock->base);
|
||||
} else
|
||||
ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
|
||||
|
||||
|
@@ -16,32 +16,6 @@
|
||||
#define mutex_remove_waiter(lock, waiter, task) \
|
||||
__list_del((waiter)->list.prev, (waiter)->list.next)
|
||||
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
/*
|
||||
* The mutex owner can get read and written to locklessly.
|
||||
* We should use WRITE_ONCE when writing the owner value to
|
||||
* avoid store tearing, otherwise, a thread could potentially
|
||||
* read a partially written and incomplete owner value.
|
||||
*/
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
WRITE_ONCE(lock->owner, current);
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
{
|
||||
WRITE_ONCE(lock->owner, NULL);
|
||||
}
|
||||
#else
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
|
||||
#define debug_mutex_free_waiter(waiter) do { } while (0)
|
||||
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
|
||||
|
@@ -21,6 +21,11 @@ static inline int encode_cpu(int cpu_nr)
|
||||
return cpu_nr + 1;
|
||||
}
|
||||
|
||||
static inline int node_cpu(struct optimistic_spin_node *node)
|
||||
{
|
||||
return node->cpu - 1;
|
||||
}
|
||||
|
||||
static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
|
||||
{
|
||||
int cpu_nr = encoded_cpu_val - 1;
|
||||
@@ -75,7 +80,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
|
||||
break;
|
||||
}
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
return next;
|
||||
@@ -118,11 +123,13 @@ bool osq_lock(struct optimistic_spin_queue *lock)
|
||||
while (!READ_ONCE(node->locked)) {
|
||||
/*
|
||||
* If we need to reschedule bail... so we can block.
|
||||
* Use vcpu_is_preempted() to avoid waiting for a preempted
|
||||
* lock holder:
|
||||
*/
|
||||
if (need_resched())
|
||||
if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
|
||||
goto unqueue;
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
}
|
||||
return true;
|
||||
|
||||
@@ -148,7 +155,7 @@ unqueue:
|
||||
if (smp_load_acquire(&node->locked))
|
||||
return true;
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
|
||||
/*
|
||||
* Or we race against a concurrent unqueue()'s step-B, in which
|
||||
|
@@ -54,7 +54,7 @@ static __always_inline void
|
||||
rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
|
||||
{
|
||||
while ((cnts & _QW_WMASK) == _QW_LOCKED) {
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
cnts = atomic_read_acquire(&lock->cnts);
|
||||
}
|
||||
}
|
||||
@@ -130,7 +130,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
|
||||
(cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
|
||||
break;
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* When no more readers, set the locked flag */
|
||||
@@ -141,7 +141,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
|
||||
_QW_LOCKED) == _QW_WAITING))
|
||||
break;
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
}
|
||||
unlock:
|
||||
arch_spin_unlock(&lock->wait_lock);
|
||||
|
@@ -1446,7 +1446,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
|
||||
bool (*slowfn)(struct rt_mutex *lock,
|
||||
struct wake_q_head *wqh))
|
||||
{
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
|
||||
rt_mutex_deadlock_account_unlock(current);
|
||||
@@ -1619,11 +1619,15 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
|
||||
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
|
||||
* proxy owner
|
||||
*
|
||||
* @lock: the rt_mutex to be locked
|
||||
* @lock: the rt_mutex to be locked
|
||||
* @proxy_owner:the task to set as owner
|
||||
*
|
||||
* No locking. Caller has to do serializing itself
|
||||
* Special API call for PI-futex support
|
||||
*
|
||||
* Special API call for PI-futex support. This initializes the rtmutex and
|
||||
* assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
|
||||
* possible at this point because the pi_state which contains the rtmutex
|
||||
* is not yet visible to other tasks.
|
||||
*/
|
||||
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
||||
struct task_struct *proxy_owner)
|
||||
@@ -1637,10 +1641,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
||||
/**
|
||||
* rt_mutex_proxy_unlock - release a lock on behalf of owner
|
||||
*
|
||||
* @lock: the rt_mutex to be locked
|
||||
* @lock: the rt_mutex to be locked
|
||||
*
|
||||
* No locking. Caller has to do serializing itself
|
||||
* Special API call for PI-futex support
|
||||
*
|
||||
* Special API call for PI-futex support. This merrily cleans up the rtmutex
|
||||
* (debugging) state. Concurrent operations on this rt_mutex are not
|
||||
* possible because it belongs to the pi_state which is about to be freed
|
||||
* and it is not longer visible to other tasks.
|
||||
*/
|
||||
void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
||||
struct task_struct *proxy_owner)
|
||||
|
@@ -71,13 +71,12 @@ task_top_pi_waiter(struct task_struct *p)
|
||||
* lock->owner state tracking:
|
||||
*/
|
||||
#define RT_MUTEX_HAS_WAITERS 1UL
|
||||
#define RT_MUTEX_OWNER_MASKALL 1UL
|
||||
|
||||
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
|
||||
{
|
||||
unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
|
||||
|
||||
return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
|
||||
return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -225,7 +225,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
|
||||
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
|
||||
struct rwsem_waiter waiter;
|
||||
struct task_struct *tsk = current;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
waiter.task = tsk;
|
||||
waiter.type = RWSEM_WAITING_FOR_READ;
|
||||
@@ -336,7 +336,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = owner->on_cpu;
|
||||
/*
|
||||
* As lock holder preemption issue, we both skip spinning if task is not
|
||||
* on cpu or its cpu is preempted
|
||||
*/
|
||||
ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
|
||||
done:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
@@ -362,13 +366,17 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
|
||||
*/
|
||||
barrier();
|
||||
|
||||
/* abort spinning when need_resched or owner is not running */
|
||||
if (!owner->on_cpu || need_resched()) {
|
||||
/*
|
||||
* abort spinning when need_resched or owner is not running or
|
||||
* owner's cpu is preempted.
|
||||
*/
|
||||
if (!owner->on_cpu || need_resched() ||
|
||||
vcpu_is_preempted(task_cpu(owner))) {
|
||||
rcu_read_unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
@@ -423,7 +431,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
|
||||
* memory barriers as we'll eventually observe the right
|
||||
* values at the cost of a few extra spins.
|
||||
*/
|
||||
cpu_relax_lowlatency();
|
||||
cpu_relax();
|
||||
}
|
||||
osq_unlock(&sem->osq);
|
||||
done:
|
||||
@@ -461,7 +469,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
|
||||
bool waiting = true; /* any queued threads before us */
|
||||
struct rwsem_waiter waiter;
|
||||
struct rw_semaphore *ret = sem;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
/* undo write bias from down_write operation, stop active locking */
|
||||
count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
|
||||
@@ -495,7 +503,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
|
||||
* wake any read locks that were queued ahead of us.
|
||||
*/
|
||||
if (count > RWSEM_WAITING_BIAS) {
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
|
||||
/*
|
||||
@@ -571,7 +579,7 @@ __visible
|
||||
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
/*
|
||||
* If a spinner is present, it is not necessary to do the wakeup.
|
||||
@@ -625,7 +633,7 @@ __visible
|
||||
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
WAKE_Q(wake_q);
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
|
||||
raw_spin_lock_irqsave(&sem->wait_lock, flags);
|
||||
|
||||
|
@@ -75,11 +75,11 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/frame.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/mutex.h>
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#endif
|
||||
|
@@ -194,7 +194,7 @@ static int multi_cpu_stop(void *data)
|
||||
/* Simple state machine */
|
||||
do {
|
||||
/* Chill out and ensure we re-read multi_stop_state. */
|
||||
cpu_relax();
|
||||
cpu_relax_yield();
|
||||
if (msdata->state != curstate) {
|
||||
curstate = msdata->state;
|
||||
switch (curstate) {
|
||||
|
Посилання в новій задачі
Заблокувати користувача