locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations
By defining our SMP atomics in terms of relaxed operations, we gain a small reduction in code size and have acquire/release/fence variants generated automatically by the core code. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman.Long@hp.com Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1438880084-18856-9-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -57,12 +57,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
|
||||
} \
|
||||
|
||||
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
|
||||
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int result; \
|
||||
\
|
||||
smp_mb(); \
|
||||
prefetchw(&v->counter); \
|
||||
\
|
||||
__asm__ __volatile__("@ atomic_" #op "_return\n" \
|
||||
@@ -75,17 +74,17 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
: "r" (&v->counter), "Ir" (i) \
|
||||
: "cc"); \
|
||||
\
|
||||
smp_mb(); \
|
||||
\
|
||||
return result; \
|
||||
}
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
#define atomic_add_return_relaxed atomic_add_return_relaxed
|
||||
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
|
||||
|
||||
static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
|
||||
{
|
||||
int oldval;
|
||||
unsigned long res;
|
||||
|
||||
smp_mb();
|
||||
prefetchw(&ptr->counter);
|
||||
|
||||
do {
|
||||
@@ -99,10 +98,9 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
|
||||
smp_mb();
|
||||
|
||||
return oldval;
|
||||
}
|
||||
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
@@ -297,12 +295,12 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
|
||||
} \
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op, op1, op2) \
|
||||
static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
|
||||
static inline long long \
|
||||
atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
|
||||
{ \
|
||||
long long result; \
|
||||
unsigned long tmp; \
|
||||
\
|
||||
smp_mb(); \
|
||||
prefetchw(&v->counter); \
|
||||
\
|
||||
__asm__ __volatile__("@ atomic64_" #op "_return\n" \
|
||||
@@ -316,8 +314,6 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
|
||||
: "r" (&v->counter), "r" (i) \
|
||||
: "cc"); \
|
||||
\
|
||||
smp_mb(); \
|
||||
\
|
||||
return result; \
|
||||
}
|
||||
|
||||
@@ -328,6 +324,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
|
||||
ATOMIC64_OPS(add, adds, adc)
|
||||
ATOMIC64_OPS(sub, subs, sbc)
|
||||
|
||||
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
|
||||
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
|
||||
|
||||
#define atomic64_andnot atomic64_andnot
|
||||
|
||||
ATOMIC64_OP(and, and, and)
|
||||
@@ -339,13 +338,12 @@ ATOMIC64_OP(xor, eor, eor)
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
|
||||
long long new)
|
||||
static inline long long
|
||||
atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
|
||||
{
|
||||
long long oldval;
|
||||
unsigned long res;
|
||||
|
||||
smp_mb();
|
||||
prefetchw(&ptr->counter);
|
||||
|
||||
do {
|
||||
@@ -360,17 +358,15 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
|
||||
: "cc");
|
||||
} while (res);
|
||||
|
||||
smp_mb();
|
||||
|
||||
return oldval;
|
||||
}
|
||||
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
|
||||
|
||||
static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
|
||||
static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
|
||||
{
|
||||
long long result;
|
||||
unsigned long tmp;
|
||||
|
||||
smp_mb();
|
||||
prefetchw(&ptr->counter);
|
||||
|
||||
__asm__ __volatile__("@ atomic64_xchg\n"
|
||||
@@ -382,10 +378,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
|
||||
: "r" (&ptr->counter), "r" (new)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
|
||||
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
|
Reference in New Issue
Block a user