Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
This commit is contained in:
@@ -15,6 +15,7 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
@@ -208,10 +209,4 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
|
||||
#define atomic64_inc(v) atomic64_add(1, (v))
|
||||
#define atomic64_dec(v) atomic64_sub(1, (v))
|
||||
|
||||
/* Atomic operations are already serializing */
|
||||
#define smp_mb__before_atomic_dec() barrier()
|
||||
#define smp_mb__after_atomic_dec() barrier()
|
||||
#define smp_mb__before_atomic_inc() barrier()
|
||||
#define smp_mb__after_atomic_inc() barrier()
|
||||
|
||||
#endif /* _ASM_IA64_ATOMIC_H */
|
||||
|
@@ -55,6 +55,9 @@
|
||||
|
||||
#endif
|
||||
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
|
||||
/*
|
||||
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
|
||||
* need for asm trickery!
|
||||
|
@@ -16,6 +16,7 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/intrinsics.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/**
|
||||
* set_bit - Atomically set a bit in memory
|
||||
@@ -65,12 +66,6 @@ __set_bit (int nr, volatile void *addr)
|
||||
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
|
||||
}
|
||||
|
||||
/*
|
||||
* clear_bit() has "acquire" semantics.
|
||||
*/
|
||||
#define smp_mb__before_clear_bit() smp_mb()
|
||||
#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
|
||||
|
||||
/**
|
||||
* clear_bit - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
@@ -78,7 +73,7 @@ __set_bit (int nr, volatile void *addr)
|
||||
*
|
||||
* clear_bit() is atomic and may not be reordered. However, it does
|
||||
* not contain a memory barrier, so if it is used for locking purposes,
|
||||
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
|
||||
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
|
||||
* in order to ensure changes are visible on other processors.
|
||||
*/
|
||||
static __inline__ void
|
||||
|
@@ -118,6 +118,15 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
|
||||
#define cmpxchg_rel(ptr, o, n) \
|
||||
ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* Worse still - early processor implementations actually just ignored
|
||||
* the acquire/release and did a full fence all the time. Unfortunately
|
||||
* this meant a lot of badly written code that used .acq when they really
|
||||
* wanted .rel became legacy out in the wild - so when we made a cpu
|
||||
* that strictly did the .acq or .rel ... all that code started breaking - so
|
||||
* we had to back-pedal and keep the "legacy" behavior of a full fence :-(
|
||||
*/
|
||||
|
||||
/* for compatibility with other platforms: */
|
||||
#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
|
||||
#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
|
||||
|
Reference in New Issue
Block a user