Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The locking tree was busier in this cycle than the usual pattern - a couple of major projects happened to coincide. The main changes are: - implement the atomic_fetch_{add,sub,and,or,xor}() API natively across all SMP architectures (Peter Zijlstra) - add atomic_fetch_{inc/dec}() as well, using the generic primitives (Davidlohr Bueso) - optimize various aspects of rwsems (Jason Low, Davidlohr Bueso, Waiman Long) - optimize smp_cond_load_acquire() on arm64 and implement LSE based atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() on arm64 (Will Deacon) - introduce smp_acquire__after_ctrl_dep() and fix various barrier mis-uses and bugs (Peter Zijlstra) - after discovering ancient spin_unlock_wait() barrier bugs in its implementation and usage, strengthen its semantics and update/fix usage sites (Peter Zijlstra) - optimize mutex_trylock() fastpath (Peter Zijlstra) - ... misc fixes and cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits) locking/atomic: Introduce inc/dec variants for the atomic_fetch_$op() API locking/barriers, arch/arm64: Implement LDXR+WFE based smp_cond_load_acquire() locking/static_keys: Fix non static symbol Sparse warning locking/qspinlock: Use __this_cpu_dec() instead of full-blown this_cpu_dec() locking/atomic, arch/tile: Fix tilepro build locking/atomic, arch/m68k: Remove comment locking/atomic, arch/arc: Fix build locking/Documentation: Clarify limited control-dependency scope locking/atomic, arch/rwsem: Employ atomic_long_fetch_add() locking/atomic, arch/qrwlock: Employ atomic_fetch_add_acquire() locking/atomic, arch/mips: Convert to _relaxed atomics locking/atomic, arch/alpha: Convert to _relaxed atomics locking/atomic: Remove the deprecated atomic_{set,clear}_mask() functions locking/atomic: Remove linux/atomic.h:atomic_fetch_or() locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() locking/atomic: Fix atomic64_relaxed() bits locking/atomic, arch/xtensa: Implement atomic_fetch_{add,sub,and,or,xor}() locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() locking/atomic, arch/sparc: Implement atomic{,64}_fetch_{add,sub,and,or,xor}() ...
This commit is contained in:
@@ -110,7 +110,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
|
||||
); \
|
||||
} \
|
||||
|
||||
#define ATOMIC_OP_RETURN(op) \
|
||||
#define ATOMIC_OP_RETURN(op) \
|
||||
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
int output; \
|
||||
@@ -127,16 +127,37 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
return output; \
|
||||
}
|
||||
|
||||
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
|
||||
#define ATOMIC_FETCH_OP(op) \
|
||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
int output, val; \
|
||||
\
|
||||
__asm__ __volatile__ ( \
|
||||
"1: %0 = memw_locked(%2);\n" \
|
||||
" %1 = "#op "(%0,%3);\n" \
|
||||
" memw_locked(%2,P3)=%1;\n" \
|
||||
" if !P3 jump 1b;\n" \
|
||||
: "=&r" (output), "=&r" (val) \
|
||||
: "r" (&v->counter), "r" (i) \
|
||||
: "memory", "p3" \
|
||||
); \
|
||||
return output; \
|
||||
}
|
||||
|
||||
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
|
||||
|
||||
ATOMIC_OPS(add)
|
||||
ATOMIC_OPS(sub)
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
#undef ATOMIC_OPS
|
||||
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
|
||||
|
||||
ATOMIC_OPS(and)
|
||||
ATOMIC_OPS(or)
|
||||
ATOMIC_OPS(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_FETCH_OP
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
|
@@ -23,6 +23,8 @@
|
||||
#define _ASM_SPINLOCK_H
|
||||
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
/*
|
||||
* This file is pulled in for SMP builds.
|
||||
@@ -176,8 +178,12 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
* SMP spinlocks are intended to allow only a single CPU at the lock
|
||||
*/
|
||||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
|
||||
#define arch_spin_unlock_wait(lock) \
|
||||
do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
|
||||
|
||||
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||
{
|
||||
smp_cond_load_acquire(&lock->lock, !VAL);
|
||||
}
|
||||
|
||||
#define arch_spin_is_locked(x) ((x)->lock != 0)
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
|
Reference in New Issue
Block a user