locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
Implement FETCH-OP atomic primitives, these are very similar to the existing OP-RETURN primitives we already have, except they return the value of the atomic variable _before_ modification. This is especially useful for irreversible operations -- such as bitops (because it becomes impossible to reconstruct the state prior to modification). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Chris Metcalf <cmetcalf@mellanox.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
3a1adb23a5
commit
1af5de9af1
@@ -19,9 +19,9 @@
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* Tile-specific routines to support <asm/bitops.h>. */
|
||||
unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
|
||||
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask);
|
||||
unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
|
||||
unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask);
|
||||
unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask);
|
||||
unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask);
|
||||
|
||||
/**
|
||||
* set_bit - Atomically set a bit in memory
|
||||
@@ -35,7 +35,7 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
|
||||
*/
|
||||
static inline void set_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
_atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
_atomic_fetch_or(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -54,7 +54,7 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
_atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
_atomic_fetch_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -69,7 +69,7 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline void change_bit(unsigned nr, volatile unsigned long *addr)
|
||||
{
|
||||
_atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
_atomic_fetch_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -85,7 +85,7 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
addr += BIT_WORD(nr);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return (_atomic_or(addr, mask) & mask) != 0;
|
||||
return (_atomic_fetch_or(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -101,7 +101,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
addr += BIT_WORD(nr);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return (_atomic_andn(addr, mask) & mask) != 0;
|
||||
return (_atomic_fetch_andn(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -118,7 +118,7 @@ static inline int test_and_change_bit(unsigned nr,
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
addr += BIT_WORD(nr);
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return (_atomic_xor(addr, mask) & mask) != 0;
|
||||
return (_atomic_fetch_xor(addr, mask) & mask) != 0;
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/ext2-atomic.h>
|
||||
|
Reference in New Issue
Block a user