Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The tree got pretty big in this development cycle, but the net effect is pretty good: 115 files changed, 673 insertions(+), 1522 deletions(-) The main changes were: - Rework and generalize the mutex code to remove per arch mutex primitives. (Peter Zijlstra) - Add vCPU preemption support: add an interface to query the preemption status of vCPUs and use it in locking primitives - this optimizes paravirt performance. (Pan Xinhui, Juergen Gross, Christian Borntraeger) - Introduce cpu_relax_yield() and remov cpu_relax_lowlatency() to clean up and improve the s390 lock yielding machinery and its core kernel impact. (Christian Borntraeger) - Micro-optimize mutexes some more. (Waiman Long) - Reluctantly add the to-be-deprecated mutex_trylock_recursive() interface on a temporary basis, to give the DRM code more time to get rid of its locking hacks. Any other users will be NAK-ed on sight. (We turned off the deprecation warning for the time being to not pollute the build log.) (Peter Zijlstra) - Improve the rtmutex code a bit, in light of recent long lived bugs/races. (Thomas Gleixner) - Misc fixes, cleanups" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) x86/paravirt: Fix bool return type for PVOP_CALL() x86/paravirt: Fix native_patch() locking/ww_mutex: Use relaxed atomics locking/rtmutex: Explain locking rules for rt_mutex_proxy_unlock()/init_proxy_locked() locking/rtmutex: Get rid of RT_MUTEX_OWNER_MASKALL x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted() locking/mutex: Break out of expensive busy-loop on {mutex,rwsem}_spin_on_owner() when owner vCPU is preempted locking/osq: Break out of spin-wait busy waiting loop for a preempted vCPU in osq_lock() Documentation/virtual/kvm: Support the vCPU preemption check x86/xen: Support the vCPU preemption check x86/kvm: Support the vCPU preemption check x86/kvm: Support the vCPU preemption check kvm: Introduce kvm_write_guest_offset_cached() locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen guests locking/spinlocks, s390: Implement vcpu_is_preempted(cpu) locking/core, powerpc: Implement vcpu_is_preempted(cpu) sched/core: Introduce the vcpu_is_preempted(cpu) interface sched/wake_q: Rename WAKE_Q to DEFINE_WAKE_Q locking/core: Provide common cpu_relax_yield() definition locking/mutex: Don't mark mutex_trylock_recursive() as deprecated, temporarily ...
This commit is contained in:
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@@ -234,9 +234,10 @@ static inline unsigned short stap(void)
|
||||
/*
|
||||
* Give up the time slice of the virtual PU.
|
||||
*/
|
||||
void cpu_relax(void);
|
||||
#define cpu_relax_yield cpu_relax_yield
|
||||
void cpu_relax_yield(void);
|
||||
|
||||
#define cpu_relax_lowlatency() barrier()
|
||||
#define cpu_relax() barrier()
|
||||
|
||||
#define ECAG_CACHE_ATTRIBUTE 0
|
||||
#define ECAG_CPU_ATTRIBUTE 1
|
||||
|
@@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
|
||||
return __sync_bool_compare_and_swap(lock, old, new);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
|
||||
#else
|
||||
bool arch_vcpu_is_preempted(int cpu);
|
||||
#endif
|
||||
|
||||
#define vcpu_is_preempted arch_vcpu_is_preempted
|
||||
|
||||
/*
|
||||
* Simple spin lock operations. There are two variants, one clears IRQ's
|
||||
* on the local processor, one does not.
|
||||
|
@@ -53,7 +53,7 @@ void s390_update_cpu_mhz(void)
|
||||
on_each_cpu(update_cpu_mhz, NULL, 0);
|
||||
}
|
||||
|
||||
void notrace cpu_relax(void)
|
||||
void notrace cpu_relax_yield(void)
|
||||
{
|
||||
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
|
||||
diag_stat_inc(DIAG_STAT_X044);
|
||||
@@ -61,7 +61,7 @@ void notrace cpu_relax(void)
|
||||
}
|
||||
barrier();
|
||||
}
|
||||
EXPORT_SYMBOL(cpu_relax);
|
||||
EXPORT_SYMBOL(cpu_relax_yield);
|
||||
|
||||
/*
|
||||
* cpu_init - initializes state that is per-CPU.
|
||||
|
@@ -368,10 +368,15 @@ int smp_find_processor_id(u16 address)
|
||||
return -1;
|
||||
}
|
||||
|
||||
int smp_vcpu_scheduled(int cpu)
|
||||
bool arch_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return pcpu_running(pcpu_devices + cpu);
|
||||
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
|
||||
return false;
|
||||
if (pcpu_running(pcpu_devices + cpu))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_vcpu_is_preempted);
|
||||
|
||||
void smp_yield_cpu(int cpu)
|
||||
{
|
||||
|
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
|
||||
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
|
||||
}
|
||||
|
||||
static inline int cpu_is_preempted(int cpu)
|
||||
{
|
||||
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
|
||||
return 0;
|
||||
if (smp_vcpu_scheduled(cpu))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
continue;
|
||||
}
|
||||
/* First iteration: check if the lock owner is running. */
|
||||
if (first_diag && cpu_is_preempted(~owner)) {
|
||||
if (first_diag && arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
continue;
|
||||
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
}
|
||||
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
continue;
|
||||
}
|
||||
/* Check if the lock owner is running. */
|
||||
if (first_diag && cpu_is_preempted(~owner)) {
|
||||
if (first_diag && arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
continue;
|
||||
@@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
}
|
||||
@@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
@@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
@@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
@@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
|
||||
{
|
||||
if (!cpu)
|
||||
return;
|
||||
if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
|
||||
if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
|
||||
return;
|
||||
smp_yield_cpu(~cpu);
|
||||
}
|
||||
|
Reference in New Issue
Block a user