Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking changes from Ingo Molnar: - futex performance increases: larger hashes, smarter wakeups - mutex debugging improvements - lots of SMP ordering documentation updates - introduce the smp_load_acquire(), smp_store_release() primitives. (There are WIP patches that make use of them - not yet merged) - lockdep micro-optimizations - lockdep improvement: better cover IRQ contexts - liblockdep at last. We'll continue to monitor how useful this is * 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits) futexes: Fix futex_hashsize initialization arch: Re-sort some Kbuild files to hopefully help avoid some conflicts futexes: Avoid taking the hb->lock if there's nothing to wake up futexes: Document multiprocessor ordering guarantees futexes: Increase hash table size for better performance futexes: Clean up various details arch: Introduce smp_load_acquire(), smp_store_release() arch: Clean up asm/barrier.h implementations using asm-generic/barrier.h arch: Move smp_mb__{before,after}_atomic_{inc,dec}.h into asm/atomic.h locking/doc: Rename LOCK/UNLOCK to ACQUIRE/RELEASE mutexes: Give more informative mutex warning in the !lock->owner case powerpc: Full barrier for smp_mb__after_unlock_lock() rcu: Apply smp_mb__after_unlock_lock() to preserve grace periods Documentation/memory-barriers.txt: Downgrade UNLOCK+BLOCK locking: Add an smp_mb__after_unlock_lock() for UNLOCK+BLOCK barrier Documentation/memory-barriers.txt: Document ACCESS_ONCE() Documentation/memory-barriers.txt: Prohibit speculative writes Documentation/memory-barriers.txt: Add long atomic examples to memory-barriers.txt Documentation/memory-barriers.txt: Add needed ACCESS_ONCE() calls to memory-barriers.txt Revert "smp/cpumask: Make CONFIG_CPUMASK_OFFSTACK=y usable without debug dependency" ...
This commit is contained in:
@@ -1,15 +1,7 @@
|
||||
#ifndef __SPARC_BARRIER_H
|
||||
#define __SPARC_BARRIER_H
|
||||
|
||||
/* XXX Change this if we ever use a PSO mode kernel. */
|
||||
#define mb() __asm__ __volatile__ ("" : : : "memory")
|
||||
#define rmb() mb()
|
||||
#define wmb() mb()
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
|
||||
#define smp_mb() __asm__ __volatile__("":::"memory")
|
||||
#define smp_rmb() __asm__ __volatile__("":::"memory")
|
||||
#define smp_wmb() __asm__ __volatile__("":::"memory")
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
#include <asm/processor.h> /* for nop() */
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* !(__SPARC_BARRIER_H) */
|
||||
|
@@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
|
||||
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
ACCESS_ONCE(*p) = (v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#endif /* !(__SPARC64_BARRIER_H) */
|
||||
|
Reference in New Issue
Block a user