locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations
By defining our SMP atomics in terms of relaxed operations, we gain a small reduction in code size and have acquire/release/fence variants generated automatically by the core code. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman.Long@hp.com Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1438880084-18856-9-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -35,7 +35,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
unsigned int tmp;
|
||||
#endif
|
||||
|
||||
smp_mb();
|
||||
prefetchw((const void *)ptr);
|
||||
|
||||
switch (size) {
|
||||
@@ -98,12 +97,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
__bad_xchg(ptr, size), ret = 0;
|
||||
break;
|
||||
}
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define xchg(ptr, x) ({ \
|
||||
#define xchg_relaxed(ptr, x) ({ \
|
||||
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
@@ -117,6 +115,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
#error "SMP is not supported on this platform"
|
||||
#endif
|
||||
|
||||
#define xchg xchg_relaxed
|
||||
|
||||
/*
|
||||
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
|
||||
* them available.
|
||||
@@ -194,23 +194,11 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr,o,n) ({ \
|
||||
(__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
#define cmpxchg_relaxed(ptr,o,n) ({ \
|
||||
(__typeof__(*(ptr)))__cmpxchg((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||
@@ -273,25 +261,6 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
|
||||
|
||||
static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
unsigned long long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg64(ptr, old, new);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg64(ptr, o, n) ({ \
|
||||
(__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)); \
|
||||
})
|
||||
|
||||
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
#endif /* __ASM_ARM_CMPXCHG_H */
|
||||
|
Reference in New Issue
Block a user