Merge branch 'locking/arch-atomic' into locking/core, because the topic is ready

Signed-off-by: Ingo Molnar <mingo@kernel.org>
此提交包含在:
Ingo Molnar
2016-07-07 09:12:02 +02:00
當前提交 36e91aa262
共有 52 個檔案被更改,包括 2518 行新增689 行删除

查看文件

@@ -93,7 +93,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
* that accesses can't leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write.
*/
cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS;
cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
rspin_until_writer_unlock(lock, cnts);
/*

查看文件

@@ -112,12 +112,12 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
#else /* _Q_PENDING_BITS == 8 */
static __always_inline void set_pending(struct qspinlock *lock)
{
atomic_set_mask(_Q_PENDING_VAL, &lock->val);
atomic_or(_Q_PENDING_VAL, &lock->val);
}
static __always_inline void clear_pending(struct qspinlock *lock)
{
atomic_clear_mask(_Q_PENDING_VAL, &lock->val);
atomic_andnot(_Q_PENDING_VAL, &lock->val);
}
static __always_inline int trylock_clear_pending(struct qspinlock *lock)

查看文件

@@ -153,7 +153,7 @@ __rwsem_mark_wake(struct rw_semaphore *sem,
if (wake_type != RWSEM_WAKE_READ_OWNED) {
adjustment = RWSEM_ACTIVE_READ_BIAS;
try_reader_grant:
oldcount = atomic_long_add_return(adjustment, &sem->count) - adjustment;
oldcount = atomic_long_fetch_add(adjustment, &sem->count);
if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
/*