Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Heiko Carstens: "Since Martin is on vacation you get the s390 pull request for the v4.15 merge window this time from me. Besides a lot of cleanups and bug fixes these are the most important changes: - a new regset for runtime instrumentation registers - hardware accelerated AES-GCM support for the aes_s390 module - support for the new CEX6S crypto cards - support for FORTIFY_SOURCE - addition of missing z13 and new z14 instructions to the in-kernel disassembler - generate opcode tables for the in-kernel disassembler out of a simple text file instead of having to manually maintain those tables - fast memset16, memset32 and memset64 implementations - removal of named saved segment support - hardware counter support for z14 - queued spinlocks and queued rwlocks implementations for s390 - use the stack_depth tracking feature for s390 BPF JIT - a new s390_sthyi system call which emulates the sthyi (store hypervisor information) instruction - removal of the old KVM virtio transport - an s390 specific CPU alternatives implementation which is used in the new spinlock code" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (88 commits) MAINTAINERS: add virtio-ccw.h to virtio/s390 section s390/noexec: execute kexec datamover without DAT s390: fix transactional execution control register handling s390/bpf: take advantage of stack_depth tracking s390: simplify transactional execution elf hwcap handling s390/zcrypt: Rework struct ap_qact_ap_info. s390/virtio: remove unused header file kvm_virtio.h s390: avoid undefined behaviour s390/disassembler: generate opcode tables from text file s390/disassembler: remove insn_to_mnemonic() s390/dasd: avoid calling do_gettimeofday() s390: vfio-ccw: Do not attempt to free no-op, test and tic cda. s390: remove named saved segment support s390/archrandom: Reconsider s390 arch random implementation s390/pci: do not require AIS facility s390/qdio: sanitize put_indicator s390/qdio: use atomic_cmpxchg s390/nmi: avoid using long-displacement facility s390: pass endianness info to sparse s390/decompressor: remove informational messages ...
This commit is contained in:
@@ -14,6 +14,7 @@
|
||||
#include <asm/atomic_ops.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
|
||||
|
||||
@@ -36,20 +37,15 @@ bool arch_vcpu_is_preempted(int cpu);
|
||||
* (the type definitions are in asm/spinlock_types.h)
|
||||
*/
|
||||
|
||||
void arch_lock_relax(int cpu);
|
||||
void arch_spin_relax(arch_spinlock_t *lock);
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *);
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *);
|
||||
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
|
||||
|
||||
static inline void arch_spin_relax(arch_spinlock_t *lock)
|
||||
{
|
||||
arch_lock_relax(lock->lock);
|
||||
}
|
||||
void arch_spin_lock_setup(int cpu);
|
||||
|
||||
static inline u32 arch_spin_lockval(int cpu)
|
||||
{
|
||||
return ~cpu;
|
||||
return cpu + 1;
|
||||
}
|
||||
|
||||
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||
@@ -65,8 +61,7 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
|
||||
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
|
||||
{
|
||||
barrier();
|
||||
return likely(arch_spin_value_unlocked(*lp) &&
|
||||
__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
|
||||
return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lp)
|
||||
@@ -79,7 +74,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (!arch_spin_trylock_once(lp))
|
||||
arch_spin_lock_wait_flags(lp, flags);
|
||||
arch_spin_lock_wait(lp);
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *lp)
|
||||
@@ -93,11 +88,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
||||
{
|
||||
typecheck(int, lp->lock);
|
||||
asm volatile(
|
||||
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
|
||||
" .long 0xb2fa0070\n" /* NIAI 7 */
|
||||
#endif
|
||||
" st %1,%0\n"
|
||||
: "=Q" (lp->lock) : "d" (0) : "cc", "memory");
|
||||
ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
|
||||
" sth %1,%0\n"
|
||||
: "=Q" (((unsigned short *) &lp->lock)[1])
|
||||
: "d" (0) : "cc", "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -115,164 +109,63 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
|
||||
#define arch_read_can_lock(x) (((x)->cnts & 0xffff0000) == 0)
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_write_can_lock(x) ((x)->lock == 0)
|
||||
|
||||
extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
|
||||
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
|
||||
#define arch_write_can_lock(x) ((x)->cnts == 0)
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
#define arch_read_relax(rw) barrier()
|
||||
#define arch_write_relax(rw) barrier()
|
||||
|
||||
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
return likely(old >= 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, old, old + 1));
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
return likely(old == 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __RAW_OP_OR "lao"
|
||||
#define __RAW_OP_AND "lan"
|
||||
#define __RAW_OP_ADD "laa"
|
||||
|
||||
#define __RAW_LOCK(ptr, op_val, op_string) \
|
||||
({ \
|
||||
int old_val; \
|
||||
\
|
||||
typecheck(int *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
"bcr 14,0\n" \
|
||||
: "=d" (old_val), "+Q" (*ptr) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#define __RAW_UNLOCK(ptr, op_val, op_string) \
|
||||
({ \
|
||||
int old_val; \
|
||||
\
|
||||
typecheck(int *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
: "=d" (old_val), "+Q" (*ptr) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
|
||||
extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
|
||||
void arch_read_lock_wait(arch_rwlock_t *lp);
|
||||
void arch_write_lock_wait(arch_rwlock_t *lp);
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
int old;
|
||||
|
||||
old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
|
||||
if (old < 0)
|
||||
_raw_read_lock_wait(rw);
|
||||
old = __atomic_add(1, &rw->cnts);
|
||||
if (old & 0xffff0000)
|
||||
arch_read_lock_wait(rw);
|
||||
}
|
||||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
|
||||
__atomic_add_const_barrier(-1, &rw->cnts);
|
||||
}
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
int old;
|
||||
|
||||
old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
|
||||
if (old != 0)
|
||||
_raw_write_lock_wait(rw, old);
|
||||
rw->owner = SPINLOCK_LOCKVAL;
|
||||
if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
|
||||
arch_write_lock_wait(rw);
|
||||
}
|
||||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
rw->owner = 0;
|
||||
__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
|
||||
__atomic_add_barrier(-0x30000, &rw->cnts);
|
||||
}
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
|
||||
extern void _raw_write_lock_wait(arch_rwlock_t *lp);
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (!arch_read_trylock_once(rw))
|
||||
_raw_read_lock_wait(rw);
|
||||
}
|
||||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
int old;
|
||||
|
||||
do {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
|
||||
}
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (!arch_write_trylock_once(rw))
|
||||
_raw_write_lock_wait(rw);
|
||||
rw->owner = SPINLOCK_LOCKVAL;
|
||||
}
|
||||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
typecheck(int, rw->lock);
|
||||
|
||||
rw->owner = 0;
|
||||
asm volatile(
|
||||
"st %1,%0\n"
|
||||
: "+Q" (rw->lock)
|
||||
: "d" (0)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (!arch_read_trylock_once(rw))
|
||||
return _raw_read_trylock_retry(rw);
|
||||
return 1;
|
||||
int old;
|
||||
|
||||
old = READ_ONCE(rw->cnts);
|
||||
return (!(old & 0xffff0000) &&
|
||||
__atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
|
||||
return 0;
|
||||
rw->owner = SPINLOCK_LOCKVAL;
|
||||
return 1;
|
||||
}
|
||||
int old;
|
||||
|
||||
static inline void arch_read_relax(arch_rwlock_t *rw)
|
||||
{
|
||||
arch_lock_relax(rw->owner);
|
||||
}
|
||||
|
||||
static inline void arch_write_relax(arch_rwlock_t *rw)
|
||||
{
|
||||
arch_lock_relax(rw->owner);
|
||||
old = READ_ONCE(rw->cnts);
|
||||
return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SPINLOCK_H */
|
||||
|
Reference in New Issue
Block a user