Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking/atomics update from Thomas Gleixner: "The locking, atomics and memory model brains delivered: - A larger update to the atomics code which reworks the ordering barriers, consolidates the atomic primitives, provides the new atomic64_fetch_add_unless() primitive and cleans up the include hell. - Simplify cmpxchg() instrumentation and add instrumentation for xchg() and cmpxchg_double(). - Updates to the memory model and documentation" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits) locking/atomics: Rework ordering barriers locking/atomics: Instrument cmpxchg_double*() locking/atomics: Instrument xchg() locking/atomics: Simplify cmpxchg() instrumentation locking/atomics/x86: Reduce arch_cmpxchg64*() instrumentation tools/memory-model: Rename litmus tests to comply to norm7 tools/memory-model/Documentation: Fix typo, smb->smp sched/Documentation: Update wake_up() & co. memory-barrier guarantees locking/spinlock, sched/core: Clarify requirements for smp_mb__after_spinlock() sched/core: Use smp_mb() in wake_woken_function() tools/memory-model: Add informal LKMM documentation to MAINTAINERS locking/atomics/Documentation: Describe atomic_set() as a write operation tools/memory-model: Make scripts executable tools/memory-model: Remove ACCESS_ONCE() from model tools/memory-model: Remove ACCESS_ONCE() from recipes locking/memory-barriers.txt/kokr: Update Korean translation to fix broken DMA vs. MMIO ordering example MAINTAINERS: Add Daniel Lustig as an LKMM reviewer tools/memory-model: Fix ISA2+pooncelock+pooncelock+pombonce name tools/memory-model: Add litmus test for full multicopy atomicity locking/refcount: Always allow checked forms ...
This commit is contained in:
@@ -130,7 +130,7 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
|
||||
}
|
||||
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int oldval, newval;
|
||||
unsigned long tmp;
|
||||
@@ -156,6 +156,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
|
||||
return oldval;
|
||||
}
|
||||
#define atomic_fetch_add_unless atomic_fetch_add_unless
|
||||
|
||||
#else /* ARM_ARCH_6 */
|
||||
|
||||
@@ -215,15 +216,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
|
||||
c = atomic_read(v);
|
||||
while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
|
||||
c = old;
|
||||
return c;
|
||||
}
|
||||
#define atomic_fetch_andnot atomic_fetch_andnot
|
||||
|
||||
#endif /* __LINUX_ARM_ARCH__ */
|
||||
|
||||
@@ -254,17 +247,6 @@ ATOMIC_OPS(xor, ^=, eor)
|
||||
|
||||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
#define atomic_inc(v) atomic_add(1, v)
|
||||
#define atomic_dec(v) atomic_sub(1, v)
|
||||
|
||||
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
||||
#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
|
||||
#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
||||
|
||||
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
|
||||
|
||||
#ifndef CONFIG_GENERIC_ATOMIC64
|
||||
typedef struct {
|
||||
long long counter;
|
||||
@@ -494,12 +476,13 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
|
||||
return result;
|
||||
}
|
||||
#define atomic64_dec_if_positive atomic64_dec_if_positive
|
||||
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
|
||||
long long u)
|
||||
{
|
||||
long long val;
|
||||
long long oldval, newval;
|
||||
unsigned long tmp;
|
||||
int ret = 1;
|
||||
|
||||
smp_mb();
|
||||
prefetchw(&v->counter);
|
||||
@@ -508,33 +491,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
"1: ldrexd %0, %H0, [%4]\n"
|
||||
" teq %0, %5\n"
|
||||
" teqeq %H0, %H5\n"
|
||||
" moveq %1, #0\n"
|
||||
" beq 2f\n"
|
||||
" adds %Q0, %Q0, %Q6\n"
|
||||
" adc %R0, %R0, %R6\n"
|
||||
" strexd %2, %0, %H0, [%4]\n"
|
||||
" adds %Q1, %Q0, %Q6\n"
|
||||
" adc %R1, %R0, %R6\n"
|
||||
" strexd %2, %1, %H1, [%4]\n"
|
||||
" teq %2, #0\n"
|
||||
" bne 1b\n"
|
||||
"2:"
|
||||
: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
|
||||
: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
|
||||
: "r" (&v->counter), "r" (u), "r" (a)
|
||||
: "cc");
|
||||
|
||||
if (ret)
|
||||
if (oldval != u)
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
return oldval;
|
||||
}
|
||||
|
||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
||||
#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
||||
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
||||
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
||||
#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
|
||||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
||||
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
|
||||
|
||||
#endif /* !CONFIG_GENERIC_ATOMIC64 */
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user