
With commit247f2f6f3c
("sched/core: Don't schedule threads on pre-empted vCPUs"), the scheduler avoids preempted vCPUs to schedule tasks on wakeup. This leads to wrong choice of CPU, which in-turn leads to larger wakeup latencies. Eventually, it leads to performance regression in latency sensitive benchmarks like soltp, schbench etc. On Powerpc, vcpu_is_preempted() only looks at yield_count. If the yield_count is odd, the vCPU is assumed to be preempted. However yield_count is increased whenever the LPAR enters CEDE state (idle). So any CPU that has entered CEDE state is assumed to be preempted. Even if vCPU of dedicated LPAR is preempted/donated, it should have right of first-use since they are supposed to own the vCPU. On a Power9 System with 32 cores: # lscpu Architecture: ppc64le Byte Order: Little Endian CPU(s): 128 On-line CPU(s) list: 0-127 Thread(s) per core: 8 Core(s) per socket: 1 Socket(s): 16 NUMA node(s): 2 Model: 2.2 (pvr 004e 0202) Model name: POWER9 (architected), altivec supported Hypervisor vendor: pHyp Virtualization type: para L1d cache: 32K L1i cache: 32K L2 cache: 512K L3 cache: 10240K NUMA node0 CPU(s): 0-63 NUMA node1 CPU(s): 64-127 # perf stat -a -r 5 ./schbench v5.4 v5.4 + patch Latency percentiles (usec) Latency percentiles (usec) 50.0000th: 45 50.0th: 45 75.0000th: 62 75.0th: 63 90.0000th: 71 90.0th: 74 95.0000th: 77 95.0th: 78 *99.0000th: 91 *99.0th: 82 99.5000th: 707 99.5th: 83 99.9000th: 6920 99.9th: 86 min=0, max=10048 min=0, max=96 Latency percentiles (usec) Latency percentiles (usec) 50.0000th: 45 50.0th: 46 75.0000th: 61 75.0th: 64 90.0000th: 72 90.0th: 75 95.0000th: 79 95.0th: 79 *99.0000th: 691 *99.0th: 83 99.5000th: 3972 99.5th: 85 99.9000th: 8368 99.9th: 91 min=0, max=16606 min=0, max=117 Latency percentiles (usec) Latency percentiles (usec) 50.0000th: 45 50.0th: 46 75.0000th: 61 75.0th: 64 90.0000th: 71 90.0th: 75 95.0000th: 77 95.0th: 79 *99.0000th: 106 *99.0th: 83 99.5000th: 2364 99.5th: 84 99.9000th: 7480 99.9th: 90 min=0, max=10001 min=0, max=95 Latency percentiles (usec) Latency percentiles (usec) 50.0000th: 45 50.0th: 47 75.0000th: 62 75.0th: 65 90.0000th: 72 90.0th: 75 95.0000th: 78 95.0th: 79 *99.0000th: 93 *99.0th: 84 99.5000th: 108 99.5th: 85 99.9000th: 6792 99.9th: 90 min=0, max=17681 min=0, max=117 Latency percentiles (usec) Latency percentiles (usec) 50.0000th: 46 50.0th: 45 75.0000th: 62 75.0th: 64 90.0000th: 73 90.0th: 75 95.0000th: 79 95.0th: 79 *99.0000th: 113 *99.0th: 82 99.5000th: 2724 99.5th: 83 99.9000th: 6184 99.9th: 93 min=0, max=9887 min=0, max=111 Performance counter stats for 'system wide' (5 runs): context-switches 43,373 ( +- 0.40% ) 44,597 ( +- 0.55% ) cpu-migrations 1,211 ( +- 5.04% ) 220 ( +- 6.23% ) page-faults 15,983 ( +- 5.21% ) 15,360 ( +- 3.38% ) Waiman Long suggested using static_keys. Fixes:247f2f6f3c
("sched/core: Don't schedule threads on pre-empted vCPUs") Cc: stable@vger.kernel.org # v4.18+ Reported-by: Parth Shah <parth@linux.ibm.com> Reported-by: Ihor Pasichnyk <Ihor.Pasichnyk@ibm.com> Tested-by: Juri Lelli <juri.lelli@redhat.com> Acked-by: Waiman Long <longman@redhat.com> Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com> Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Acked-by: Phil Auld <pauld@redhat.com> Reviewed-by: Vaidyanathan Srinivasan <svaidy@linux.ibm.com> Tested-by: Parth Shah <parth@linux.ibm.com> [mpe: Move the key and setting of the key to pseries/setup.c] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20191213035036.6913-1-mpe@ellerman.id.au
323 lines
7.2 KiB
C
323 lines
7.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
#ifndef __ASM_SPINLOCK_H
|
|
#define __ASM_SPINLOCK_H
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
* Simple spin lock operations.
|
|
*
|
|
* Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
|
|
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
|
|
* Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
|
|
* Rework to support virtual processors
|
|
*
|
|
* Type of int is used as a full 64b word is not necessary.
|
|
*
|
|
* (the type definitions are in asm/spinlock_types.h)
|
|
*/
|
|
#include <linux/irqflags.h>
|
|
#ifdef CONFIG_PPC64
|
|
#include <asm/paca.h>
|
|
#include <asm/hvcall.h>
|
|
#endif
|
|
#include <asm/synch.h>
|
|
#include <asm/ppc-opcode.h>
|
|
#include <asm/asm-405.h>
|
|
|
|
#ifdef CONFIG_PPC64
|
|
/* use 0x800000yy when locked, where yy == CPU number */
|
|
#ifdef __BIG_ENDIAN__
|
|
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
|
|
#else
|
|
#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
|
|
#endif
|
|
#else
|
|
#define LOCK_TOKEN 1
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
DECLARE_STATIC_KEY_FALSE(shared_processor);
|
|
|
|
#define vcpu_is_preempted vcpu_is_preempted
|
|
static inline bool vcpu_is_preempted(int cpu)
|
|
{
|
|
if (!static_branch_unlikely(&shared_processor))
|
|
return false;
|
|
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
|
|
}
|
|
#endif
|
|
|
|
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
|
{
|
|
return lock.slock == 0;
|
|
}
|
|
|
|
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
|
{
|
|
smp_mb();
|
|
return !arch_spin_value_unlocked(*lock);
|
|
}
|
|
|
|
/*
|
|
* This returns the old value in the lock, so we succeeded
|
|
* in getting the lock if the return value is 0.
|
|
*/
|
|
static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
|
|
{
|
|
unsigned long tmp, token;
|
|
|
|
token = LOCK_TOKEN;
|
|
__asm__ __volatile__(
|
|
"1: " PPC_LWARX(%0,0,%2,1) "\n\
|
|
cmpwi 0,%0,0\n\
|
|
bne- 2f\n\
|
|
stwcx. %1,0,%2\n\
|
|
bne- 1b\n"
|
|
PPC_ACQUIRE_BARRIER
|
|
"2:"
|
|
: "=&r" (tmp)
|
|
: "r" (token), "r" (&lock->slock)
|
|
: "cr0", "memory");
|
|
|
|
return tmp;
|
|
}
|
|
|
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
{
|
|
return __arch_spin_trylock(lock) == 0;
|
|
}
|
|
|
|
/*
|
|
* On a system with shared processors (that is, where a physical
|
|
* processor is multiplexed between several virtual processors),
|
|
* there is no point spinning on a lock if the holder of the lock
|
|
* isn't currently scheduled on a physical processor. Instead
|
|
* we detect this situation and ask the hypervisor to give the
|
|
* rest of our timeslice to the lock holder.
|
|
*
|
|
* So that we can tell which virtual processor is holding a lock,
|
|
* we put 0x80000000 | smp_processor_id() in the lock when it is
|
|
* held. Conveniently, we have a word in the paca that holds this
|
|
* value.
|
|
*/
|
|
|
|
#if defined(CONFIG_PPC_SPLPAR)
|
|
/* We only yield to the hypervisor if we are in shared processor mode */
|
|
void splpar_spin_yield(arch_spinlock_t *lock);
|
|
void splpar_rw_yield(arch_rwlock_t *lock);
|
|
#else /* SPLPAR */
|
|
static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
|
|
static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
|
|
#endif
|
|
|
|
static inline bool is_shared_processor(void)
|
|
{
|
|
/*
|
|
* LPPACA is only available on Pseries so guard anything LPPACA related to
|
|
* allow other platforms (which include this common header) to compile.
|
|
*/
|
|
#ifdef CONFIG_PPC_PSERIES
|
|
return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
|
|
lppaca_shared_proc(local_paca->lppaca_ptr));
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
static inline void spin_yield(arch_spinlock_t *lock)
|
|
{
|
|
if (is_shared_processor())
|
|
splpar_spin_yield(lock);
|
|
else
|
|
barrier();
|
|
}
|
|
|
|
static inline void rw_yield(arch_rwlock_t *lock)
|
|
{
|
|
if (is_shared_processor())
|
|
splpar_rw_yield(lock);
|
|
else
|
|
barrier();
|
|
}
|
|
|
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
{
|
|
while (1) {
|
|
if (likely(__arch_spin_trylock(lock) == 0))
|
|
break;
|
|
do {
|
|
HMT_low();
|
|
if (is_shared_processor())
|
|
splpar_spin_yield(lock);
|
|
} while (unlikely(lock->slock != 0));
|
|
HMT_medium();
|
|
}
|
|
}
|
|
|
|
static inline
|
|
void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
|
{
|
|
unsigned long flags_dis;
|
|
|
|
while (1) {
|
|
if (likely(__arch_spin_trylock(lock) == 0))
|
|
break;
|
|
local_save_flags(flags_dis);
|
|
local_irq_restore(flags);
|
|
do {
|
|
HMT_low();
|
|
if (is_shared_processor())
|
|
splpar_spin_yield(lock);
|
|
} while (unlikely(lock->slock != 0));
|
|
HMT_medium();
|
|
local_irq_restore(flags_dis);
|
|
}
|
|
}
|
|
#define arch_spin_lock_flags arch_spin_lock_flags
|
|
|
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
{
|
|
__asm__ __volatile__("# arch_spin_unlock\n\t"
|
|
PPC_RELEASE_BARRIER: : :"memory");
|
|
lock->slock = 0;
|
|
}
|
|
|
|
/*
|
|
* Read-write spinlocks, allowing multiple readers
|
|
* but only one writer.
|
|
*
|
|
* NOTE! it is quite common to have readers in interrupts
|
|
* but no interrupt writers. For those circumstances we
|
|
* can "mix" irq-safe locks - any writer needs to get a
|
|
* irq-safe write-lock, but readers can get non-irqsafe
|
|
* read-locks.
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
|
|
#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
|
|
#else
|
|
#define __DO_SIGN_EXTEND
|
|
#define WRLOCK_TOKEN (-1)
|
|
#endif
|
|
|
|
/*
|
|
* This returns the old value in the lock + 1,
|
|
* so we got a read lock if the return value is > 0.
|
|
*/
|
|
static inline long __arch_read_trylock(arch_rwlock_t *rw)
|
|
{
|
|
long tmp;
|
|
|
|
__asm__ __volatile__(
|
|
"1: " PPC_LWARX(%0,0,%1,1) "\n"
|
|
__DO_SIGN_EXTEND
|
|
" addic. %0,%0,1\n\
|
|
ble- 2f\n"
|
|
PPC405_ERR77(0,%1)
|
|
" stwcx. %0,0,%1\n\
|
|
bne- 1b\n"
|
|
PPC_ACQUIRE_BARRIER
|
|
"2:" : "=&r" (tmp)
|
|
: "r" (&rw->lock)
|
|
: "cr0", "xer", "memory");
|
|
|
|
return tmp;
|
|
}
|
|
|
|
/*
|
|
* This returns the old value in the lock,
|
|
* so we got the write lock if the return value is 0.
|
|
*/
|
|
static inline long __arch_write_trylock(arch_rwlock_t *rw)
|
|
{
|
|
long tmp, token;
|
|
|
|
token = WRLOCK_TOKEN;
|
|
__asm__ __volatile__(
|
|
"1: " PPC_LWARX(%0,0,%2,1) "\n\
|
|
cmpwi 0,%0,0\n\
|
|
bne- 2f\n"
|
|
PPC405_ERR77(0,%1)
|
|
" stwcx. %1,0,%2\n\
|
|
bne- 1b\n"
|
|
PPC_ACQUIRE_BARRIER
|
|
"2:" : "=&r" (tmp)
|
|
: "r" (token), "r" (&rw->lock)
|
|
: "cr0", "memory");
|
|
|
|
return tmp;
|
|
}
|
|
|
|
static inline void arch_read_lock(arch_rwlock_t *rw)
|
|
{
|
|
while (1) {
|
|
if (likely(__arch_read_trylock(rw) > 0))
|
|
break;
|
|
do {
|
|
HMT_low();
|
|
if (is_shared_processor())
|
|
splpar_rw_yield(rw);
|
|
} while (unlikely(rw->lock < 0));
|
|
HMT_medium();
|
|
}
|
|
}
|
|
|
|
static inline void arch_write_lock(arch_rwlock_t *rw)
|
|
{
|
|
while (1) {
|
|
if (likely(__arch_write_trylock(rw) == 0))
|
|
break;
|
|
do {
|
|
HMT_low();
|
|
if (is_shared_processor())
|
|
splpar_rw_yield(rw);
|
|
} while (unlikely(rw->lock != 0));
|
|
HMT_medium();
|
|
}
|
|
}
|
|
|
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
{
|
|
return __arch_read_trylock(rw) > 0;
|
|
}
|
|
|
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|
{
|
|
return __arch_write_trylock(rw) == 0;
|
|
}
|
|
|
|
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|
{
|
|
long tmp;
|
|
|
|
__asm__ __volatile__(
|
|
"# read_unlock\n\t"
|
|
PPC_RELEASE_BARRIER
|
|
"1: lwarx %0,0,%1\n\
|
|
addic %0,%0,-1\n"
|
|
PPC405_ERR77(0,%1)
|
|
" stwcx. %0,0,%1\n\
|
|
bne- 1b"
|
|
: "=&r"(tmp)
|
|
: "r"(&rw->lock)
|
|
: "cr0", "xer", "memory");
|
|
}
|
|
|
|
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|
{
|
|
__asm__ __volatile__("# write_unlock\n\t"
|
|
PPC_RELEASE_BARRIER: : :"memory");
|
|
rw->lock = 0;
|
|
}
|
|
|
|
#define arch_spin_relax(lock) spin_yield(lock)
|
|
#define arch_read_relax(lock) rw_yield(lock)
|
|
#define arch_write_relax(lock) rw_yield(lock)
|
|
|
|
/* See include/linux/spinlock.h */
|
|
#define smp_mb__after_spinlock() smp_mb()
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_SPINLOCK_H */
|