s390/atomic: refactor atomic primitives
Rework atomic.h to make the low level functions avaible for use in other headers without using atomic_t, e.g. in bitops.h. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
@@ -1,13 +1,8 @@
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2009
|
||||
* Copyright IBM Corp. 1999, 2016
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
||||
* Denis Joseph Barrow,
|
||||
* Arnd Bergmann <arndb@de.ibm.com>,
|
||||
*
|
||||
* Atomic operations that C can't guarantee us.
|
||||
* Useful for resource counting etc.
|
||||
* s390 uses 'Compare And Swap' for atomicity in SMP environment.
|
||||
*
|
||||
* Arnd Bergmann,
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_S390_ATOMIC__
|
||||
@@ -15,62 +10,12 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/atomic_ops.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
#define __ATOMIC_NO_BARRIER "\n"
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC_OR "lao"
|
||||
#define __ATOMIC_AND "lan"
|
||||
#define __ATOMIC_ADD "laa"
|
||||
#define __ATOMIC_XOR "lax"
|
||||
#define __ATOMIC_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
int old_val; \
|
||||
\
|
||||
typecheck(atomic_t *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
__barrier \
|
||||
: "=d" (old_val), "+Q" ((ptr)->counter) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC_OR "or"
|
||||
#define __ATOMIC_AND "nr"
|
||||
#define __ATOMIC_ADD "ar"
|
||||
#define __ATOMIC_XOR "xr"
|
||||
#define __ATOMIC_BARRIER "\n"
|
||||
|
||||
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
int old_val, new_val; \
|
||||
\
|
||||
typecheck(atomic_t *, ptr); \
|
||||
asm volatile( \
|
||||
" l %0,%2\n" \
|
||||
"0: lr %1,%0\n" \
|
||||
op_string " %1,%3\n" \
|
||||
" cs %0,%1,%2\n" \
|
||||
" jl 0b" \
|
||||
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
int c;
|
||||
@@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
|
||||
return __atomic_add_barrier(i, &v->counter) + i;
|
||||
}
|
||||
|
||||
static inline int atomic_fetch_add(int i, atomic_t *v)
|
||||
{
|
||||
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
|
||||
return __atomic_add_barrier(i, &v->counter);
|
||||
}
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
|
||||
asm volatile(
|
||||
"asi %0,%1\n"
|
||||
: "+Q" (v->counter)
|
||||
: "i" (i)
|
||||
: "cc", "memory");
|
||||
__atomic_add_const(i, &v->counter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
|
||||
__atomic_add(i, &v->counter);
|
||||
}
|
||||
|
||||
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
|
||||
@@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
|
||||
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
|
||||
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
|
||||
|
||||
#define ATOMIC_OPS(op, OP) \
|
||||
#define ATOMIC_OPS(op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
|
||||
__atomic_##op(i, &v->counter); \
|
||||
} \
|
||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
|
||||
return __atomic_##op##_barrier(i, &v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC_OPS(and, AND)
|
||||
ATOMIC_OPS(or, OR)
|
||||
ATOMIC_OPS(xor, XOR)
|
||||
ATOMIC_OPS(and)
|
||||
ATOMIC_OPS(or)
|
||||
ATOMIC_OPS(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
|
||||
@@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %0,%2,%1"
|
||||
: "+d" (old), "+Q" (v->counter)
|
||||
: "d" (new)
|
||||
: "cc", "memory");
|
||||
return old;
|
||||
return __atomic_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
@@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
return c;
|
||||
}
|
||||
|
||||
|
||||
#undef __ATOMIC_LOOP
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
#define __ATOMIC64_NO_BARRIER "\n"
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC64_OR "laog"
|
||||
#define __ATOMIC64_AND "lang"
|
||||
#define __ATOMIC64_ADD "laag"
|
||||
#define __ATOMIC64_XOR "laxg"
|
||||
#define __ATOMIC64_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
long long old_val; \
|
||||
\
|
||||
typecheck(atomic64_t *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
__barrier \
|
||||
: "=d" (old_val), "+Q" ((ptr)->counter) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC64_OR "ogr"
|
||||
#define __ATOMIC64_AND "ngr"
|
||||
#define __ATOMIC64_ADD "agr"
|
||||
#define __ATOMIC64_XOR "xgr"
|
||||
#define __ATOMIC64_BARRIER "\n"
|
||||
|
||||
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
long long old_val, new_val; \
|
||||
\
|
||||
typecheck(atomic64_t *, ptr); \
|
||||
asm volatile( \
|
||||
" lg %0,%2\n" \
|
||||
"0: lgr %1,%0\n" \
|
||||
op_string " %1,%3\n" \
|
||||
" csg %0,%1,%2\n" \
|
||||
" jl 0b" \
|
||||
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline long long atomic64_read(const atomic64_t *v)
|
||||
static inline long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
long long c;
|
||||
long c;
|
||||
|
||||
asm volatile(
|
||||
" lg %0,%1\n"
|
||||
@@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline void atomic64_set(atomic64_t *v, long long i)
|
||||
static inline void atomic64_set(atomic64_t *v, long i)
|
||||
{
|
||||
asm volatile(
|
||||
" stg %1,%0\n"
|
||||
: "=Q" (v->counter) : "d" (i));
|
||||
}
|
||||
|
||||
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
||||
static inline long atomic64_add_return(long i, atomic64_t *v)
|
||||
{
|
||||
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
|
||||
return __atomic64_add_barrier(i, &v->counter) + i;
|
||||
}
|
||||
|
||||
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
|
||||
static inline long atomic64_fetch_add(long i, atomic64_t *v)
|
||||
{
|
||||
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
|
||||
return __atomic64_add_barrier(i, &v->counter);
|
||||
}
|
||||
|
||||
static inline void atomic64_add(long long i, atomic64_t *v)
|
||||
static inline void atomic64_add(long i, atomic64_t *v)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
|
||||
asm volatile(
|
||||
"agsi %0,%1\n"
|
||||
: "+Q" (v->counter)
|
||||
: "i" (i)
|
||||
: "cc", "memory");
|
||||
__atomic64_add_const(i, &v->counter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
|
||||
__atomic64_add(i, &v->counter);
|
||||
}
|
||||
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
static inline long long atomic64_cmpxchg(atomic64_t *v,
|
||||
long long old, long long new)
|
||||
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %0,%2,%1"
|
||||
: "+d" (old), "+Q" (v->counter)
|
||||
: "d" (new)
|
||||
: "cc", "memory");
|
||||
return old;
|
||||
return __atomic64_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
#define ATOMIC64_OPS(op, OP) \
|
||||
#define ATOMIC64_OPS(op) \
|
||||
static inline void atomic64_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
|
||||
__atomic64_##op(i, &v->counter); \
|
||||
} \
|
||||
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
|
||||
return __atomic64_##op##_barrier(i, &v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC64_OPS(and, AND)
|
||||
ATOMIC64_OPS(or, OR)
|
||||
ATOMIC64_OPS(xor, XOR)
|
||||
ATOMIC64_OPS(and)
|
||||
ATOMIC64_OPS(or)
|
||||
ATOMIC64_OPS(xor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef __ATOMIC64_LOOP
|
||||
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
|
||||
{
|
||||
long long c, old;
|
||||
long c, old;
|
||||
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
@@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
|
||||
return c != u;
|
||||
}
|
||||
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long long c, old, dec;
|
||||
long c, old, dec;
|
||||
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
@@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
#define atomic64_inc(_v) atomic64_add(1, _v)
|
||||
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
|
||||
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
|
||||
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
|
||||
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
|
||||
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
|
||||
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
|
||||
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
|
||||
#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
|
||||
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
|
||||
#define atomic64_dec(_v) atomic64_sub(1, _v)
|
||||
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
|
||||
|
Reference in New Issue
Block a user