Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: - improve rwsem scalability - add uninitialized rwsem debugging check - reduce lockdep's stacktrace memory usage and add diagnostics - misc cleanups, code consolidation and constification * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: mutex: Fix up mutex_waiter usage locking/mutex: Use mutex flags macro instead of hard code locking/mutex: Make __mutex_owner static to mutex.c locking/qspinlock,x86: Clarify virt_spin_lock_key locking/rwsem: Check for operations on an uninitialized rwsem locking/rwsem: Make handoff writer optimistically spin on owner locking/lockdep: Report more stack trace statistics locking/lockdep: Reduce space occupied by stack traces stacktrace: Constify 'entries' arguments locking/lockdep: Make it clear that what lock_class::key points at is not modified
This commit is contained in:
@@ -63,10 +63,25 @@ static inline bool vcpu_is_preempted(long cpu)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
/*
|
||||
* virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
|
||||
*
|
||||
* Native (and PV wanting native due to vCPU pinning) should disable this key.
|
||||
* It is done in this backwards fashion to only have a single direction change,
|
||||
* which removes ordering between native_pv_spin_init() and HV setup.
|
||||
*/
|
||||
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
|
||||
|
||||
void native_pv_lock_init(void) __init;
|
||||
|
||||
/*
|
||||
* Shortcut for the queued_spin_lock_slowpath() function that allows
|
||||
* virt to hijack it.
|
||||
*
|
||||
* Returns:
|
||||
* true - lock has been negotiated, all done;
|
||||
* false - queued_spin_lock_slowpath() will do its thing.
|
||||
*/
|
||||
#define virt_spin_lock virt_spin_lock
|
||||
static inline bool virt_spin_lock(struct qspinlock *lock)
|
||||
{
|
||||
|
Reference in New Issue
Block a user