
Pull locking updates from Ingo Molnar: - LKMM updates: mostly documentation changes, but also some new litmus tests for atomic ops. - KCSAN updates: the most important change is that GCC 11 now has all fixes in place to support KCSAN, so GCC support can be enabled again. Also more annotations. - futex updates: minor cleanups and simplifications - seqlock updates: merge preparatory changes/cleanups for the 'associated locks' facilities. - lockdep updates: - simplify IRQ trace event handling - add various new debug checks - simplify header dependencies, split out <linux/lockdep_types.h>, decouple lockdep from other low level headers some more - fix NMI handling - misc cleanups and smaller fixes * tag 'locking-core-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (60 commits) kcsan: Improve IRQ state trace reporting lockdep: Refactor IRQ trace events fields into struct seqlock: lockdep assert non-preemptibility on seqcount_t write lockdep: Add preemption enabled/disabled assertion APIs seqlock: Implement raw_seqcount_begin() in terms of raw_read_seqcount() seqlock: Add kernel-doc for seqcount_t and seqlock_t APIs seqlock: Reorder seqcount_t and seqlock_t API definitions seqlock: seqcount_t latch: End read sections with read_seqcount_retry() seqlock: Properly format kernel-doc code samples Documentation: locking: Describe seqlock design and usage locking/qspinlock: Do not include atomic.h from qspinlock_types.h locking/atomic: Move ATOMIC_INIT into linux/types.h lockdep: Move list.h inclusion into lockdep.h locking/lockdep: Fix TRACE_IRQFLAGS vs. NMIs futex: Remove unused or redundant includes futex: Consistently use fshared as boolean futex: Remove needless goto's futex: Remove put_futex_key() rwsem: fix commas in initialisation docs: locking: Replace HTTP links with HTTPS ones ...
69 lines
1.9 KiB
C
69 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright IBM Corp. 1999, 2012
|
|
* Author(s): Denis Joseph Barrow,
|
|
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
|
* Heiko Carstens <heiko.carstens@de.ibm.com>,
|
|
*/
|
|
#ifndef __ASM_SMP_H
|
|
#define __ASM_SMP_H
|
|
|
|
#include <asm/sigp.h>
|
|
#include <asm/lowcore.h>
|
|
#include <asm/processor.h>
|
|
|
|
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
|
|
|
|
extern struct mutex smp_cpu_state_mutex;
|
|
extern unsigned int smp_cpu_mt_shift;
|
|
extern unsigned int smp_cpu_mtid;
|
|
extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
|
|
|
|
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
|
|
|
|
extern void arch_send_call_function_single_ipi(int cpu);
|
|
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
|
|
|
extern void smp_call_online_cpu(void (*func)(void *), void *);
|
|
extern void smp_call_ipl_cpu(void (*func)(void *), void *);
|
|
extern void smp_emergency_stop(void);
|
|
|
|
extern int smp_find_processor_id(u16 address);
|
|
extern int smp_store_status(int cpu);
|
|
extern void smp_save_dump_cpus(void);
|
|
extern int smp_vcpu_scheduled(int cpu);
|
|
extern void smp_yield_cpu(int cpu);
|
|
extern void smp_cpu_set_polarization(int cpu, int val);
|
|
extern int smp_cpu_get_polarization(int cpu);
|
|
extern int smp_cpu_get_cpu_address(int cpu);
|
|
extern void smp_fill_possible_mask(void);
|
|
extern void smp_detect_cpus(void);
|
|
|
|
static inline void smp_stop_cpu(void)
|
|
{
|
|
u16 pcpu = stap();
|
|
|
|
for (;;) {
|
|
__pcpu_sigp(pcpu, SIGP_STOP, 0, NULL);
|
|
cpu_relax();
|
|
}
|
|
}
|
|
|
|
/* Return thread 0 CPU number as base CPU */
|
|
static inline int smp_get_base_cpu(int cpu)
|
|
{
|
|
return cpu - (cpu % (smp_cpu_mtid + 1));
|
|
}
|
|
|
|
static inline void smp_cpus_done(unsigned int max_cpus)
|
|
{
|
|
}
|
|
|
|
extern int smp_rescan_cpus(void);
|
|
extern void __noreturn cpu_die(void);
|
|
extern void __cpu_die(unsigned int cpu);
|
|
extern int __cpu_disable(void);
|
|
extern void schedule_mcck_handler(void);
|
|
|
|
#endif /* __ASM_SMP_H */
|