Merge branch 'locking-rwsem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull support for killable rwsems from Ingo Molnar:
"This, by Michal Hocko, implements down_write_killable().
The main usecase will be to update mm_sem usage sites to use this new
API, to allow the mm-reaper introduced in commit aac4536355
("mm,
oom: introduce oom reaper") to tear down oom victim address spaces
asynchronously with minimum latencies and without deadlock worries"
[ The vfs will want it too as the inode lock is changed from a mutex to
a rwsem due to the parallel lookup and readdir updates ]
* 'locking-rwsem-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/rwsem: Fix comment on register clobbering
locking/rwsem: Fix down_write_killable()
locking/rwsem, x86: Add frame annotation for call_rwsem_down_write_failed_killable()
locking/rwsem: Provide down_write_killable()
locking/rwsem, x86: Provide __down_write_killable()
locking/rwsem, s390: Provide __down_write_killable()
locking/rwsem, ia64: Provide __down_write_killable()
locking/rwsem, alpha: Provide __down_write_killable()
locking/rwsem: Introduce basis for down_write_killable()
locking/rwsem, sparc: Drop superfluous arch specific implementation
locking/rwsem, sh: Drop superfluous arch specific implementation
locking/rwsem, xtensa: Drop superfluous arch specific implementation
locking/rwsem: Drop explicit memory barriers
locking/rwsem: Get rid of __down_write_nested()
This commit is contained in:
@@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h
|
||||
generic-y += module.h
|
||||
generic-y += mutex.h
|
||||
generic-y += preempt.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += serial.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += types.h
|
||||
|
@@ -1,124 +0,0 @@
|
||||
/*
|
||||
* rwsem.h: R/W semaphores implemented using CAS
|
||||
*
|
||||
* Written by David S. Miller (davem@redhat.com), 2001.
|
||||
* Derived from asm-i386/rwsem.h
|
||||
*/
|
||||
#ifndef _SPARC64_RWSEM_H
|
||||
#define _SPARC64_RWSEM_H
|
||||
|
||||
#ifndef _LINUX_RWSEM_H
|
||||
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define RWSEM_UNLOCKED_VALUE 0x00000000L
|
||||
#define RWSEM_ACTIVE_BIAS 0x00000001L
|
||||
#define RWSEM_ACTIVE_MASK 0xffffffffL
|
||||
#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
|
||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||
|
||||
/*
|
||||
* lock for reading
|
||||
*/
|
||||
static inline void __down_read(struct rw_semaphore *sem)
|
||||
{
|
||||
if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
|
||||
rwsem_down_read_failed(sem);
|
||||
}
|
||||
|
||||
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
long tmp;
|
||||
|
||||
while ((tmp = sem->count) >= 0L) {
|
||||
if (tmp == cmpxchg(&sem->count, tmp,
|
||||
tmp + RWSEM_ACTIVE_READ_BIAS)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* lock for writing
|
||||
*/
|
||||
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
|
||||
{
|
||||
long tmp;
|
||||
|
||||
tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
|
||||
(atomic64_t *)(&sem->count));
|
||||
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
|
||||
rwsem_down_write_failed(sem);
|
||||
}
|
||||
|
||||
static inline void __down_write(struct rw_semaphore *sem)
|
||||
{
|
||||
__down_write_nested(sem, 0);
|
||||
}
|
||||
|
||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
long tmp;
|
||||
|
||||
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
|
||||
RWSEM_ACTIVE_WRITE_BIAS);
|
||||
return tmp == RWSEM_UNLOCKED_VALUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* unlock after reading
|
||||
*/
|
||||
static inline void __up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
long tmp;
|
||||
|
||||
tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
|
||||
if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
|
||||
rwsem_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* unlock after writing
|
||||
*/
|
||||
static inline void __up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
|
||||
(atomic64_t *)(&sem->count)) < 0L))
|
||||
rwsem_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* implement atomic add functionality
|
||||
*/
|
||||
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
atomic64_add(delta, (atomic64_t *)(&sem->count));
|
||||
}
|
||||
|
||||
/*
|
||||
* downgrade write lock to read lock
|
||||
*/
|
||||
static inline void __downgrade_write(struct rw_semaphore *sem)
|
||||
{
|
||||
long tmp;
|
||||
|
||||
tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
|
||||
if (tmp < 0L)
|
||||
rwsem_downgrade_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* implement exchange and add functionality
|
||||
*/
|
||||
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _SPARC64_RWSEM_H */
|
Reference in New Issue
Block a user