123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256 |
- /* SPDX-License-Identifier: GPL-2.0 */
- /* rwsem.h: R/W semaphores, public interface
- *
- * Written by David Howells ([email protected]).
- * Derived from asm-i386/semaphore.h
- */
- #ifndef _LINUX_RWSEM_H
- #define _LINUX_RWSEM_H
- #include <linux/linkage.h>
- #include <linux/types.h>
- #include <linux/list.h>
- #include <linux/spinlock.h>
- #include <linux/atomic.h>
- #include <linux/err.h>
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- # define __RWSEM_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_SLEEP, \
- },
- #else
- # define __RWSEM_DEP_MAP_INIT(lockname)
- #endif
- #ifndef CONFIG_PREEMPT_RT
- #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- #include <linux/osq_lock.h>
- #endif
- #include <linux/android_vendor.h>
- /*
- * For an uncontended rwsem, count and owner are the only fields a task
- * needs to touch when acquiring the rwsem. So they are put next to each
- * other to increase the chance that they will share the same cacheline.
- *
- * In a contended rwsem, the owner is likely the most frequently accessed
- * field in the structure as the optimistic waiter that holds the osq lock
- * will spin on owner. For an embedded rwsem, other hot fields in the
- * containing structure should be moved further away from the rwsem to
- * reduce the chance that they will share the same cacheline causing
- * cacheline bouncing problem.
- */
- struct rw_semaphore {
- atomic_long_t count;
- /*
- * Write owner or one of the read owners as well flags regarding
- * the current state of the rwsem. Can be used as a speculative
- * check to see if the write owner is running on the cpu.
- */
- atomic_long_t owner;
- #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- struct optimistic_spin_queue osq; /* spinner MCS lock */
- #endif
- raw_spinlock_t wait_lock;
- struct list_head wait_list;
- #ifdef CONFIG_DEBUG_RWSEMS
- void *magic;
- #endif
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
- #endif
- ANDROID_VENDOR_DATA(1);
- ANDROID_OEM_DATA_ARRAY(1, 2);
- };
- /* In all implementations count != 0 means locked */
- static inline int rwsem_is_locked(struct rw_semaphore *sem)
- {
- return atomic_long_read(&sem->count) != 0;
- }
- #define RWSEM_UNLOCKED_VALUE 0L
- #define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
- /* Common initializer macros and functions */
- #ifdef CONFIG_DEBUG_RWSEMS
- # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
- #else
- # define __RWSEM_DEBUG_INIT(lockname)
- #endif
- #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- #define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
- #else
- #define __RWSEM_OPT_INIT(lockname)
- #endif
- #define __RWSEM_INITIALIZER(name) \
- { __RWSEM_COUNT_INIT(name), \
- .owner = ATOMIC_LONG_INIT(0), \
- __RWSEM_OPT_INIT(name) \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
- __RWSEM_DEBUG_INIT(name) \
- __RWSEM_DEP_MAP_INIT(name) }
- #define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
- extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
- #define init_rwsem(sem) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
- } while (0)
- /*
- * This is the same regardless of which rwsem implementation that is being used.
- * It is just a heuristic meant to be called by somebody already holding the
- * rwsem to see if somebody from an incompatible type is wanting access to the
- * lock.
- */
- static inline int rwsem_is_contended(struct rw_semaphore *sem)
- {
- return !list_empty(&sem->wait_list);
- }
- #else /* !CONFIG_PREEMPT_RT */
- #include <linux/rwbase_rt.h>
- struct rw_semaphore {
- struct rwbase_rt rwbase;
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
- #endif
- };
- #define __RWSEM_INITIALIZER(name) \
- { \
- .rwbase = __RWBASE_INITIALIZER(name), \
- __RWSEM_DEP_MAP_INIT(name) \
- }
- #define DECLARE_RWSEM(lockname) \
- struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
- extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
- struct lock_class_key *key);
- #define init_rwsem(sem) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
- } while (0)
- static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
- {
- return rw_base_is_locked(&sem->rwbase);
- }
- static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
- {
- return rw_base_is_contended(&sem->rwbase);
- }
- #endif /* CONFIG_PREEMPT_RT */
- /*
- * The functions below are the same for all rwsem implementations including
- * the RT specific variant.
- */
- /*
- * lock for reading
- */
- extern void down_read(struct rw_semaphore *sem);
- extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
- extern int __must_check down_read_killable(struct rw_semaphore *sem);
- /*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
- extern int down_read_trylock(struct rw_semaphore *sem);
- /*
- * lock for writing
- */
- extern void down_write(struct rw_semaphore *sem);
- extern int __must_check down_write_killable(struct rw_semaphore *sem);
- /*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
- extern int down_write_trylock(struct rw_semaphore *sem);
- /*
- * release a read lock
- */
- extern void up_read(struct rw_semaphore *sem);
- /*
- * release a write lock
- */
- extern void up_write(struct rw_semaphore *sem);
- /*
- * downgrade write lock to read lock
- */
- extern void downgrade_write(struct rw_semaphore *sem);
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * nested locking. NOTE: rwsems are not allowed to recurse
- * (which occurs if the same task tries to acquire the same
- * lock instance multiple times), but multiple locks of the
- * same lock class might be taken, if the order of the locks
- * is always the same. This ordering rule can be expressed
- * to lockdep via the _nested() APIs, but enumerating the
- * subclasses that are used. (If the nesting relationship is
- * static then another method for expressing nested locking is
- * the explicit definition of lock class keys and the use of
- * lockdep_set_class() at lock initialization time.
- * See Documentation/locking/lockdep-design.rst for more details.)
- */
- extern void down_read_nested(struct rw_semaphore *sem, int subclass);
- extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
- extern void down_write_nested(struct rw_semaphore *sem, int subclass);
- extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
- extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
- # define down_write_nest_lock(sem, nest_lock) \
- do { \
- typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
- _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
- } while (0)
- /*
- * Take/release a lock when not the owner will release it.
- *
- * [ This API should be avoided as much as possible - the
- * proper abstraction for this case is completions. ]
- */
- extern void down_read_non_owner(struct rw_semaphore *sem);
- extern void up_read_non_owner(struct rw_semaphore *sem);
- #else
- # define down_read_nested(sem, subclass) down_read(sem)
- # define down_read_killable_nested(sem, subclass) down_read_killable(sem)
- # define down_write_nest_lock(sem, nest_lock) down_write(sem)
- # define down_write_nested(sem, subclass) down_write(sem)
- # define down_write_killable_nested(sem, subclass) down_write_killable(sem)
- # define down_read_non_owner(sem) down_read(sem)
- # define up_read_non_owner(sem) up_read(sem)
- #endif
- #endif /* _LINUX_RWSEM_H */
|