Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The main changes in this cycle are: - Another attempt at enabling cross-release lockdep dependency tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time with better performance and fewer false positives. (Byungchul Park) - Introduce lockdep_assert_irqs_enabled()/disabled() and convert open-coded equivalents to lockdep variants. (Frederic Weisbecker) - Add down_read_killable() and use it in the VFS's iterate_dir() method. (Kirill Tkhai) - Convert remaining uses of ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle driven. (Mark Rutland, Paul E. McKenney) - Get rid of lockless_dereference(), by strengthening Alpha atomics, strengthening READ_ONCE() with smp_read_barrier_depends() and thus being able to convert users of lockless_dereference() to READ_ONCE(). (Will Deacon) - Various micro-optimizations: - better PV qspinlocks (Waiman Long), - better x86 barriers (Michael S. Tsirkin) - better x86 refcounts (Kees Cook) - ... plus other fixes and enhancements. (Borislav Petkov, Juergen Gross, Miguel Bernal Marin)" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE rcu: Use lockdep to assert IRQs are disabled/enabled netpoll: Use lockdep to assert IRQs are disabled/enabled timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled irq_work: Use lockdep to assert IRQs are disabled/enabled irq/timings: Use lockdep to assert IRQs are disabled/enabled perf/core: Use lockdep to assert IRQs are disabled/enabled x86: Use lockdep to assert IRQs are disabled/enabled smp/core: Use lockdep to assert IRQs are disabled/enabled timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled timers/nohz: Use lockdep to assert IRQs are disabled/enabled workqueue: Use lockdep to assert IRQs are disabled/enabled irq/softirqs: Use lockdep to assert IRQs are disabled/enabled locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled() locking/pvqspinlock: Implement hybrid PV queued/unfair locks locking/rwlocks: Fix comments x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion() workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes ...
This commit is contained in:
@@ -38,15 +38,31 @@
|
||||
/*
|
||||
* lock for reading
|
||||
*/
|
||||
static inline void
|
||||
__down_read (struct rw_semaphore *sem)
|
||||
static inline int
|
||||
___down_read (struct rw_semaphore *sem)
|
||||
{
|
||||
long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
|
||||
|
||||
if (result < 0)
|
||||
return (result < 0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__down_read (struct rw_semaphore *sem)
|
||||
{
|
||||
if (___down_read(sem))
|
||||
rwsem_down_read_failed(sem);
|
||||
}
|
||||
|
||||
static inline int
|
||||
__down_read_killable (struct rw_semaphore *sem)
|
||||
{
|
||||
if (___down_read(sem))
|
||||
if (IS_ERR(rwsem_down_read_failed_killable(sem)))
|
||||
return -EINTR;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* lock for writing
|
||||
*/
|
||||
@@ -73,9 +89,10 @@ __down_write (struct rw_semaphore *sem)
|
||||
static inline int
|
||||
__down_write_killable (struct rw_semaphore *sem)
|
||||
{
|
||||
if (___down_write(sem))
|
||||
if (___down_write(sem)) {
|
||||
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -62,7 +62,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
||||
|
||||
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
int tmp = ACCESS_ONCE(lock->lock);
|
||||
int tmp = READ_ONCE(lock->lock);
|
||||
|
||||
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
|
||||
return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
|
||||
@@ -74,19 +74,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
||||
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
|
||||
|
||||
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
|
||||
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
|
||||
WRITE_ONCE(*p, (tmp + 2) & ~1);
|
||||
}
|
||||
|
||||
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
long tmp = ACCESS_ONCE(lock->lock);
|
||||
long tmp = READ_ONCE(lock->lock);
|
||||
|
||||
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
|
||||
}
|
||||
|
||||
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
||||
{
|
||||
long tmp = ACCESS_ONCE(lock->lock);
|
||||
long tmp = READ_ONCE(lock->lock);
|
||||
|
||||
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
|
||||
}
|
||||
@@ -127,9 +127,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
||||
{
|
||||
arch_spin_lock(lock);
|
||||
}
|
||||
|
||||
#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
|
||||
#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
|
||||
#define arch_spin_lock_flags arch_spin_lock_flags
|
||||
|
||||
#ifdef ASM_SUPPORTED
|
||||
|
||||
@@ -157,6 +155,7 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
||||
: "p6", "p7", "r2", "memory");
|
||||
}
|
||||
|
||||
#define arch_read_lock_flags arch_read_lock_flags
|
||||
#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
|
||||
|
||||
#else /* !ASM_SUPPORTED */
|
||||
@@ -209,6 +208,7 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
|
||||
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
|
||||
}
|
||||
|
||||
#define arch_write_lock_flags arch_write_lock_flags
|
||||
#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
|
||||
|
||||
#define arch_write_trylock(rw) \
|
||||
@@ -232,8 +232,6 @@ static inline void arch_write_unlock(arch_rwlock_t *x)
|
||||
|
||||
#else /* !ASM_SUPPORTED */
|
||||
|
||||
#define arch_write_lock_flags(l, flags) arch_write_lock(l)
|
||||
|
||||
#define arch_write_lock(l) \
|
||||
({ \
|
||||
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
|
||||
@@ -273,8 +271,4 @@ static inline int arch_read_trylock(arch_rwlock_t *x)
|
||||
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
|
||||
}
|
||||
|
||||
#define arch_spin_relax(lock) cpu_relax()
|
||||
#define arch_read_relax(lock) cpu_relax()
|
||||
#define arch_write_relax(lock) cpu_relax()
|
||||
|
||||
#endif /* _ASM_IA64_SPINLOCK_H */
|
||||
|
Reference in New Issue
Block a user