locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics

The qrwlock implementation is slightly heavy in its use of memory
barriers, mainly through the use of _cmpxchg() and _return() atomics, which
imply full barrier semantics.

This patch modifies the qrwlock code to use the more relaxed atomic
routines so that we can reduce the unnecessary barrier overhead on
weakly-ordered architectures.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman.Long@hp.com
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1438880084-18856-7-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
此提交包含在:
Will Deacon
2015-08-06 17:54:42 +01:00
提交者 Ingo Molnar
父節點 2b2a85a4d3
當前提交 77e430e3e4
共有 2 個檔案被更改,包括 18 行新增19 行删除

查看文件

@@ -68,7 +68,7 @@ static inline int queued_read_trylock(struct qrwlock *lock)
cnts = atomic_read(&lock->cnts);
if (likely(!(cnts & _QW_WMASK))) {
cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
return 1;
atomic_sub(_QR_BIAS, &lock->cnts);
@@ -89,8 +89,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)
if (unlikely(cnts))
return 0;
return likely(atomic_cmpxchg(&lock->cnts,
cnts, cnts | _QW_LOCKED) == cnts);
return likely(atomic_cmpxchg_acquire(&lock->cnts,
cnts, cnts | _QW_LOCKED) == cnts);
}
/**
* queued_read_lock - acquire read lock of a queue rwlock
@@ -100,7 +100,7 @@ static inline void queued_read_lock(struct qrwlock *lock)
{
u32 cnts;
cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
if (likely(!(cnts & _QW_WMASK)))
return;
@@ -115,7 +115,7 @@ static inline void queued_read_lock(struct qrwlock *lock)
static inline void queued_write_lock(struct qrwlock *lock)
{
/* Optimize for the unfair lock case where the fair flag is 0. */
if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0)
if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
return;
queued_write_lock_slowpath(lock);
@@ -130,8 +130,7 @@ static inline void queued_read_unlock(struct qrwlock *lock)
/*
* Atomically decrement the reader count
*/
smp_mb__before_atomic();
atomic_sub(_QR_BIAS, &lock->cnts);
(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
}
/**