locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra
2016-04-18 01:16:03 +02:00
committed by Ingo Molnar
parent 3a1adb23a5
commit 1af5de9af1
6 changed files with 164 additions and 99 deletions

View File

@@ -88,29 +88,29 @@ int _atomic_cmpxchg(int *v, int o, int n)
}
EXPORT_SYMBOL(_atomic_cmpxchg);
unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
{
return __atomic_or((int *)p, __atomic_setup(p), mask).val;
return __atomic_fetch_or((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_or);
EXPORT_SYMBOL(_atomic_fetch_or);
unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
{
return __atomic_and((int *)p, __atomic_setup(p), mask).val;
return __atomic_fetch_and((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_and);
EXPORT_SYMBOL(_atomic_fetch_and);
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
{
return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
return __atomic_fetch_andn((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_andn);
EXPORT_SYMBOL(_atomic_fetch_andn);
unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
{
return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
return __atomic_fetch_xor((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_xor);
EXPORT_SYMBOL(_atomic_fetch_xor);
long long _atomic64_xchg(long long *v, long long n)
@@ -142,23 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
}
EXPORT_SYMBOL(_atomic64_cmpxchg);
long long _atomic64_and(long long *v, long long n)
long long _atomic64_fetch_and(long long *v, long long n)
{
return __atomic64_and(v, __atomic_setup(v), n);
return __atomic64_fetch_and(v, __atomic_setup(v), n);
}
EXPORT_SYMBOL(_atomic64_and);
EXPORT_SYMBOL(_atomic64_fetch_and);
long long _atomic64_or(long long *v, long long n)
long long _atomic64_fetch_or(long long *v, long long n)
{
return __atomic64_or(v, __atomic_setup(v), n);
return __atomic64_fetch_or(v, __atomic_setup(v), n);
}
EXPORT_SYMBOL(_atomic64_or);
EXPORT_SYMBOL(_atomic64_fetch_or);
long long _atomic64_xor(long long *v, long long n)
long long _atomic64_fetch_xor(long long *v, long long n)
{
return __atomic64_xor(v, __atomic_setup(v), n);
return __atomic64_fetch_xor(v, __atomic_setup(v), n);
}
EXPORT_SYMBOL(_atomic64_xor);
EXPORT_SYMBOL(_atomic64_fetch_xor);
/*
* If any of the atomic or futex routines hit a bad address (not in

View File

@@ -177,10 +177,10 @@ atomic_op _xchg, 32, "move r24, r2"
atomic_op _xchg_add, 32, "add r24, r22, r2"
atomic_op _xchg_add_unless, 32, \
"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
atomic_op _or, 32, "or r24, r22, r2"
atomic_op _and, 32, "and r24, r22, r2"
atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
atomic_op _xor, 32, "xor r24, r22, r2"
atomic_op _fetch_or, 32, "or r24, r22, r2"
atomic_op _fetch_and, 32, "and r24, r22, r2"
atomic_op _fetch_andn, 32, "nor r2, r2, zero; and r24, r22, r2"
atomic_op _fetch_xor, 32, "xor r24, r22, r2"
atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \
{ bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }"
@@ -192,9 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \
{ bbns r26, 3f; add r24, r22, r4 }; \
{ bbns r27, 3f; add r25, r23, r5 }; \
slt_u r26, r24, r22; add r25, r25, r26"
atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
atomic_op 64_fetch_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
atomic_op 64_fetch_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
atomic_op 64_fetch_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
jrp lr /* happy backtracer */