Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The main bulk of the s390 patches for the 4.10 merge window: - Add support for the contiguous memory allocator. - The recovery for I/O errors in the dasd device driver is improved, the driver will now remove channel paths that are not working properly. - Additional fields are added to /proc/sysinfo, the extended partition name and the partition UUID. - New naming for PCI devices with system defined UIDs. - The last few remaining alloc_bootmem calls are converted to memblock. - The thread_info structure is stripped down and moved to the task_struct. The only field left in thread_info is the flags field. - Rework of the arch topology code to fix a fake numa issue. - Refactoring of the atomic primitives and add a new preempt_count implementation. - Clocksource steering for the STP sync check offsets. - The s390 specific headers are changed to make them usable with CLANG. - Bug fixes and cleanup" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (70 commits) s390/cpumf: Use configuration level indication for sampling data s390: provide memmove implementation s390: cleanup arch/s390/kernel Makefile s390: fix initrd corruptions with gcov/kcov instrumented kernels s390: exclude early C code from gcov profiling s390/dasd: channel path aware error recovery s390/dasd: extend dasd path handling s390: remove unused labels from entry.S s390/vmlogrdr: fix IUCV buffer allocation s390/crypto: unlock on error in prng_tdes_read() s390/sysinfo: show partition extended name and UUID if available s390/numa: pin all possible cpus to nodes early s390/numa: establish cpu to node mapping early s390/topology: use cpu_topology array instead of per cpu variable s390/smp: initialize cpu_present_mask in setup_arch s390/topology: always use s390 specific sched_domain_topology_level s390/smp: use smp_get_base_cpu() helper function s390/numa: always use logical cpu and core ids s390: Remove VLAIS in ptff() and clear_table() s390: fix machine check panic stack switch ...
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
|
||||
|
||||
generic-y += asm-offsets.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += dma-contiguous.h
|
||||
generic-y += export.h
|
||||
generic-y += irq_work.h
|
||||
generic-y += mcs_spinlock.h
|
||||
|
@@ -1 +0,0 @@
|
||||
#include <generated/asm-offsets.h>
|
@@ -1,13 +1,8 @@
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2009
|
||||
* Copyright IBM Corp. 1999, 2016
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
||||
* Denis Joseph Barrow,
|
||||
* Arnd Bergmann <arndb@de.ibm.com>,
|
||||
*
|
||||
* Atomic operations that C can't guarantee us.
|
||||
* Useful for resource counting etc.
|
||||
* s390 uses 'Compare And Swap' for atomicity in SMP environment.
|
||||
*
|
||||
* Arnd Bergmann,
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_S390_ATOMIC__
|
||||
@@ -15,62 +10,12 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/atomic_ops.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
#define __ATOMIC_NO_BARRIER "\n"
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC_OR "lao"
|
||||
#define __ATOMIC_AND "lan"
|
||||
#define __ATOMIC_ADD "laa"
|
||||
#define __ATOMIC_XOR "lax"
|
||||
#define __ATOMIC_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
int old_val; \
|
||||
\
|
||||
typecheck(atomic_t *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
__barrier \
|
||||
: "=d" (old_val), "+Q" ((ptr)->counter) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC_OR "or"
|
||||
#define __ATOMIC_AND "nr"
|
||||
#define __ATOMIC_ADD "ar"
|
||||
#define __ATOMIC_XOR "xr"
|
||||
#define __ATOMIC_BARRIER "\n"
|
||||
|
||||
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
int old_val, new_val; \
|
||||
\
|
||||
typecheck(atomic_t *, ptr); \
|
||||
asm volatile( \
|
||||
" l %0,%2\n" \
|
||||
"0: lr %1,%0\n" \
|
||||
op_string " %1,%3\n" \
|
||||
" cs %0,%1,%2\n" \
|
||||
" jl 0b" \
|
||||
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
int c;
|
||||
@@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
|
||||
return __atomic_add_barrier(i, &v->counter) + i;
|
||||
}
|
||||
|
||||
static inline int atomic_fetch_add(int i, atomic_t *v)
|
||||
{
|
||||
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
|
||||
return __atomic_add_barrier(i, &v->counter);
|
||||
}
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
|
||||
asm volatile(
|
||||
"asi %0,%1\n"
|
||||
: "+Q" (v->counter)
|
||||
: "i" (i)
|
||||
: "cc", "memory");
|
||||
__atomic_add_const(i, &v->counter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
|
||||
__atomic_add(i, &v->counter);
|
||||
}
|
||||
|
||||
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
|
||||
@@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
|
||||
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
|
||||
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
|
||||
|
||||
#define ATOMIC_OPS(op, OP) \
|
||||
#define ATOMIC_OPS(op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
|
||||
__atomic_##op(i, &v->counter); \
|
||||
} \
|
||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
|
||||
return __atomic_##op##_barrier(i, &v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC_OPS(and, AND)
|
||||
ATOMIC_OPS(or, OR)
|
||||
ATOMIC_OPS(xor, XOR)
|
||||
ATOMIC_OPS(and)
|
||||
ATOMIC_OPS(or)
|
||||
ATOMIC_OPS(xor)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
|
||||
@@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %0,%2,%1"
|
||||
: "+d" (old), "+Q" (v->counter)
|
||||
: "d" (new)
|
||||
: "cc", "memory");
|
||||
return old;
|
||||
return __atomic_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
@@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
return c;
|
||||
}
|
||||
|
||||
|
||||
#undef __ATOMIC_LOOP
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
#define __ATOMIC64_NO_BARRIER "\n"
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC64_OR "laog"
|
||||
#define __ATOMIC64_AND "lang"
|
||||
#define __ATOMIC64_ADD "laag"
|
||||
#define __ATOMIC64_XOR "laxg"
|
||||
#define __ATOMIC64_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
long long old_val; \
|
||||
\
|
||||
typecheck(atomic64_t *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
__barrier \
|
||||
: "=d" (old_val), "+Q" ((ptr)->counter) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC64_OR "ogr"
|
||||
#define __ATOMIC64_AND "ngr"
|
||||
#define __ATOMIC64_ADD "agr"
|
||||
#define __ATOMIC64_XOR "xgr"
|
||||
#define __ATOMIC64_BARRIER "\n"
|
||||
|
||||
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
|
||||
({ \
|
||||
long long old_val, new_val; \
|
||||
\
|
||||
typecheck(atomic64_t *, ptr); \
|
||||
asm volatile( \
|
||||
" lg %0,%2\n" \
|
||||
"0: lgr %1,%0\n" \
|
||||
op_string " %1,%3\n" \
|
||||
" csg %0,%1,%2\n" \
|
||||
" jl 0b" \
|
||||
: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline long long atomic64_read(const atomic64_t *v)
|
||||
static inline long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
long long c;
|
||||
long c;
|
||||
|
||||
asm volatile(
|
||||
" lg %0,%1\n"
|
||||
@@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline void atomic64_set(atomic64_t *v, long long i)
|
||||
static inline void atomic64_set(atomic64_t *v, long i)
|
||||
{
|
||||
asm volatile(
|
||||
" stg %1,%0\n"
|
||||
: "=Q" (v->counter) : "d" (i));
|
||||
}
|
||||
|
||||
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
||||
static inline long atomic64_add_return(long i, atomic64_t *v)
|
||||
{
|
||||
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
|
||||
return __atomic64_add_barrier(i, &v->counter) + i;
|
||||
}
|
||||
|
||||
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
|
||||
static inline long atomic64_fetch_add(long i, atomic64_t *v)
|
||||
{
|
||||
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
|
||||
return __atomic64_add_barrier(i, &v->counter);
|
||||
}
|
||||
|
||||
static inline void atomic64_add(long long i, atomic64_t *v)
|
||||
static inline void atomic64_add(long i, atomic64_t *v)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
|
||||
asm volatile(
|
||||
"agsi %0,%1\n"
|
||||
: "+Q" (v->counter)
|
||||
: "i" (i)
|
||||
: "cc", "memory");
|
||||
__atomic64_add_const(i, &v->counter);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
|
||||
__atomic64_add(i, &v->counter);
|
||||
}
|
||||
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
static inline long long atomic64_cmpxchg(atomic64_t *v,
|
||||
long long old, long long new)
|
||||
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %0,%2,%1"
|
||||
: "+d" (old), "+Q" (v->counter)
|
||||
: "d" (new)
|
||||
: "cc", "memory");
|
||||
return old;
|
||||
return __atomic64_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
#define ATOMIC64_OPS(op, OP) \
|
||||
#define ATOMIC64_OPS(op) \
|
||||
static inline void atomic64_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
|
||||
__atomic64_##op(i, &v->counter); \
|
||||
} \
|
||||
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
|
||||
return __atomic64_##op##_barrier(i, &v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC64_OPS(and, AND)
|
||||
ATOMIC64_OPS(or, OR)
|
||||
ATOMIC64_OPS(xor, XOR)
|
||||
ATOMIC64_OPS(and)
|
||||
ATOMIC64_OPS(or)
|
||||
ATOMIC64_OPS(xor)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef __ATOMIC64_LOOP
|
||||
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
|
||||
{
|
||||
long long c, old;
|
||||
long c, old;
|
||||
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
@@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
|
||||
return c != u;
|
||||
}
|
||||
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long long c, old, dec;
|
||||
long c, old, dec;
|
||||
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
@@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
#define atomic64_inc(_v) atomic64_add(1, _v)
|
||||
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
|
||||
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
|
||||
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
|
||||
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
|
||||
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
|
||||
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
|
||||
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
|
||||
#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
|
||||
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
|
||||
#define atomic64_dec(_v) atomic64_sub(1, _v)
|
||||
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
|
||||
|
130
arch/s390/include/asm/atomic_ops.h
Normal file
130
arch/s390/include/asm/atomic_ops.h
Normal file
@@ -0,0 +1,130 @@
|
||||
/*
|
||||
* Low level function for atomic operations
|
||||
*
|
||||
* Copyright IBM Corp. 1999, 2016
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_S390_ATOMIC_OPS__
|
||||
#define __ARCH_S390_ATOMIC_OPS__
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
|
||||
static inline op_type op_name(op_type val, op_type *ptr) \
|
||||
{ \
|
||||
op_type old; \
|
||||
\
|
||||
asm volatile( \
|
||||
op_string " %[old],%[val],%[ptr]\n" \
|
||||
op_barrier \
|
||||
: [old] "=d" (old), [ptr] "+Q" (*ptr) \
|
||||
: [val] "d" (val) : "cc", "memory"); \
|
||||
return old; \
|
||||
} \
|
||||
|
||||
#define __ATOMIC_OPS(op_name, op_type, op_string) \
|
||||
__ATOMIC_OP(op_name, op_type, op_string, "\n") \
|
||||
__ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
|
||||
|
||||
__ATOMIC_OPS(__atomic_add, int, "laa")
|
||||
__ATOMIC_OPS(__atomic_and, int, "lan")
|
||||
__ATOMIC_OPS(__atomic_or, int, "lao")
|
||||
__ATOMIC_OPS(__atomic_xor, int, "lax")
|
||||
|
||||
__ATOMIC_OPS(__atomic64_add, long, "laag")
|
||||
__ATOMIC_OPS(__atomic64_and, long, "lang")
|
||||
__ATOMIC_OPS(__atomic64_or, long, "laog")
|
||||
__ATOMIC_OPS(__atomic64_xor, long, "laxg")
|
||||
|
||||
#undef __ATOMIC_OPS
|
||||
#undef __ATOMIC_OP
|
||||
|
||||
static inline void __atomic_add_const(int val, int *ptr)
|
||||
{
|
||||
asm volatile(
|
||||
" asi %[ptr],%[val]\n"
|
||||
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
|
||||
}
|
||||
|
||||
static inline void __atomic64_add_const(long val, long *ptr)
|
||||
{
|
||||
asm volatile(
|
||||
" agsi %[ptr],%[val]\n"
|
||||
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
|
||||
}
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC_OP(op_name, op_string) \
|
||||
static inline int op_name(int val, int *ptr) \
|
||||
{ \
|
||||
int old, new; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: lr %[new],%[old]\n" \
|
||||
op_string " %[new],%[val]\n" \
|
||||
" cs %[old],%[new],%[ptr]\n" \
|
||||
" jl 0b" \
|
||||
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
|
||||
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
|
||||
return old; \
|
||||
}
|
||||
|
||||
#define __ATOMIC_OPS(op_name, op_string) \
|
||||
__ATOMIC_OP(op_name, op_string) \
|
||||
__ATOMIC_OP(op_name##_barrier, op_string)
|
||||
|
||||
__ATOMIC_OPS(__atomic_add, "ar")
|
||||
__ATOMIC_OPS(__atomic_and, "nr")
|
||||
__ATOMIC_OPS(__atomic_or, "or")
|
||||
__ATOMIC_OPS(__atomic_xor, "xr")
|
||||
|
||||
#undef __ATOMIC_OPS
|
||||
|
||||
#define __ATOMIC64_OP(op_name, op_string) \
|
||||
static inline long op_name(long val, long *ptr) \
|
||||
{ \
|
||||
long old, new; \
|
||||
\
|
||||
asm volatile( \
|
||||
"0: lgr %[new],%[old]\n" \
|
||||
op_string " %[new],%[val]\n" \
|
||||
" csg %[old],%[new],%[ptr]\n" \
|
||||
" jl 0b" \
|
||||
: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
|
||||
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
|
||||
return old; \
|
||||
}
|
||||
|
||||
#define __ATOMIC64_OPS(op_name, op_string) \
|
||||
__ATOMIC64_OP(op_name, op_string) \
|
||||
__ATOMIC64_OP(op_name##_barrier, op_string)
|
||||
|
||||
__ATOMIC64_OPS(__atomic64_add, "agr")
|
||||
__ATOMIC64_OPS(__atomic64_and, "ngr")
|
||||
__ATOMIC64_OPS(__atomic64_or, "ogr")
|
||||
__ATOMIC64_OPS(__atomic64_xor, "xgr")
|
||||
|
||||
#undef __ATOMIC64_OPS
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+Q" (*ptr)
|
||||
: [new] "d" (new) : "cc", "memory");
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %[old],%[new],%[ptr]"
|
||||
: [old] "+d" (old), [ptr] "+Q" (*ptr)
|
||||
: [new] "d" (new) : "cc", "memory");
|
||||
return old;
|
||||
}
|
||||
|
||||
#endif /* __ARCH_S390_ATOMIC_OPS__ */
|
@@ -42,57 +42,9 @@
|
||||
|
||||
#include <linux/typecheck.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/atomic_ops.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define __BITOPS_NO_BARRIER "\n"
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __BITOPS_OR "laog"
|
||||
#define __BITOPS_AND "lang"
|
||||
#define __BITOPS_XOR "laxg"
|
||||
#define __BITOPS_BARRIER "bcr 14,0\n"
|
||||
|
||||
#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
|
||||
({ \
|
||||
unsigned long __old; \
|
||||
\
|
||||
typecheck(unsigned long *, (__addr)); \
|
||||
asm volatile( \
|
||||
__op_string " %0,%2,%1\n" \
|
||||
__barrier \
|
||||
: "=d" (__old), "+Q" (*(__addr)) \
|
||||
: "d" (__val) \
|
||||
: "cc", "memory"); \
|
||||
__old; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __BITOPS_OR "ogr"
|
||||
#define __BITOPS_AND "ngr"
|
||||
#define __BITOPS_XOR "xgr"
|
||||
#define __BITOPS_BARRIER "\n"
|
||||
|
||||
#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
|
||||
({ \
|
||||
unsigned long __old, __new; \
|
||||
\
|
||||
typecheck(unsigned long *, (__addr)); \
|
||||
asm volatile( \
|
||||
" lg %0,%2\n" \
|
||||
"0: lgr %1,%0\n" \
|
||||
__op_string " %1,%3\n" \
|
||||
" csg %0,%1,%2\n" \
|
||||
" jl 0b" \
|
||||
: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
|
||||
: "d" (__val) \
|
||||
: "cc", "memory"); \
|
||||
__old; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
|
||||
|
||||
static inline unsigned long *
|
||||
@@ -128,7 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
}
|
||||
#endif
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
__BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
|
||||
__atomic64_or(mask, addr);
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
@@ -149,7 +101,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
}
|
||||
#endif
|
||||
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
|
||||
__BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
|
||||
__atomic64_and(mask, addr);
|
||||
}
|
||||
|
||||
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
@@ -170,7 +122,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
}
|
||||
#endif
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
__BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
|
||||
__atomic64_xor(mask, addr);
|
||||
}
|
||||
|
||||
static inline int
|
||||
@@ -180,7 +132,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
unsigned long old, mask;
|
||||
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
|
||||
old = __atomic64_or_barrier(mask, addr);
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
@@ -191,7 +143,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
unsigned long old, mask;
|
||||
|
||||
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
|
||||
old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
|
||||
old = __atomic64_and_barrier(mask, addr);
|
||||
return (old & ~mask) != 0;
|
||||
}
|
||||
|
||||
@@ -202,7 +154,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
|
||||
unsigned long old, mask;
|
||||
|
||||
mask = 1UL << (nr & (BITS_PER_LONG - 1));
|
||||
old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
|
||||
old = __atomic64_xor_barrier(mask, addr);
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
|
@@ -104,7 +104,8 @@ struct hws_basic_entry {
|
||||
unsigned int P:1; /* 28 PSW Problem state */
|
||||
unsigned int AS:2; /* 29-30 PSW address-space control */
|
||||
unsigned int I:1; /* 31 entry valid or invalid */
|
||||
unsigned int:16;
|
||||
unsigned int CL:2; /* 32-33 Configuration Level */
|
||||
unsigned int:14;
|
||||
unsigned int prim_asn:16; /* primary ASN */
|
||||
unsigned long long ia; /* Instruction Address */
|
||||
unsigned long long gpp; /* Guest Program Parameter */
|
||||
|
@@ -193,7 +193,7 @@ extern char elf_platform[];
|
||||
do { \
|
||||
set_personality(PER_LINUX | \
|
||||
(current->personality & (~PER_MASK))); \
|
||||
current_thread_info()->sys_call_table = \
|
||||
current->thread.sys_call_table = \
|
||||
(unsigned long) &sys_call_table; \
|
||||
} while (0)
|
||||
#else /* CONFIG_COMPAT */
|
||||
@@ -204,11 +204,11 @@ do { \
|
||||
(current->personality & ~PER_MASK)); \
|
||||
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \
|
||||
set_thread_flag(TIF_31BIT); \
|
||||
current_thread_info()->sys_call_table = \
|
||||
current->thread.sys_call_table = \
|
||||
(unsigned long) &sys_call_table_emu; \
|
||||
} else { \
|
||||
clear_thread_flag(TIF_31BIT); \
|
||||
current_thread_info()->sys_call_table = \
|
||||
current->thread.sys_call_table = \
|
||||
(unsigned long) &sys_call_table; \
|
||||
} \
|
||||
} while (0)
|
||||
|
@@ -1,82 +0,0 @@
|
||||
/*
|
||||
* Copyright IBM Corp. 2015
|
||||
*/
|
||||
|
||||
#ifndef S390_GEN_FACILITIES_C
|
||||
#error "This file can only be included by gen_facilities.c"
|
||||
#endif
|
||||
|
||||
#include <linux/kconfig.h>
|
||||
|
||||
struct facility_def {
|
||||
char *name;
|
||||
int *bits;
|
||||
};
|
||||
|
||||
static struct facility_def facility_defs[] = {
|
||||
{
|
||||
/*
|
||||
* FACILITIES_ALS contains the list of facilities that are
|
||||
* required to run a kernel that is compiled e.g. with
|
||||
* -march=<machine>.
|
||||
*/
|
||||
.name = "FACILITIES_ALS",
|
||||
.bits = (int[]){
|
||||
#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
|
||||
0, /* N3 instructions */
|
||||
1, /* z/Arch mode installed */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
|
||||
18, /* long displacement facility */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
|
||||
7, /* stfle */
|
||||
17, /* message security assist */
|
||||
21, /* extended-immediate facility */
|
||||
25, /* store clock fast */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
|
||||
27, /* mvcos */
|
||||
32, /* compare and swap and store */
|
||||
33, /* compare and swap and store 2 */
|
||||
34, /* general extension facility */
|
||||
35, /* execute extensions */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
45, /* fast-BCR, etc. */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
|
||||
49, /* misc-instruction-extensions */
|
||||
52, /* interlocked facility 2 */
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
|
||||
53, /* load-and-zero-rightmost-byte, etc. */
|
||||
#endif
|
||||
-1 /* END */
|
||||
}
|
||||
},
|
||||
{
|
||||
.name = "FACILITIES_KVM",
|
||||
.bits = (int[]){
|
||||
0, /* N3 instructions */
|
||||
1, /* z/Arch mode installed */
|
||||
2, /* z/Arch mode active */
|
||||
3, /* DAT-enhancement */
|
||||
4, /* idte segment table */
|
||||
5, /* idte region table */
|
||||
6, /* ASN-and-LX reuse */
|
||||
7, /* stfle */
|
||||
8, /* enhanced-DAT 1 */
|
||||
9, /* sense-running-status */
|
||||
10, /* conditional sske */
|
||||
13, /* ipte-range */
|
||||
14, /* nonquiescing key-setting */
|
||||
73, /* transactional execution */
|
||||
75, /* access-exception-fetch/store indication */
|
||||
76, /* msa extension 3 */
|
||||
77, /* msa extension 4 */
|
||||
78, /* enhanced-DAT 2 */
|
||||
-1 /* END */
|
||||
}
|
||||
},
|
||||
};
|
@@ -97,7 +97,7 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
|
||||
extern void do_reipl(void);
|
||||
extern void do_halt(void);
|
||||
extern void do_poff(void);
|
||||
extern void ipl_save_parameters(void);
|
||||
extern void ipl_verify_parameters(void);
|
||||
extern void ipl_update_parameters(void);
|
||||
extern size_t append_ipl_vmparm(char *, size_t);
|
||||
extern size_t append_ipl_scpdata(char *, size_t);
|
||||
|
@@ -95,7 +95,7 @@ struct lowcore {
|
||||
|
||||
/* Current process. */
|
||||
__u64 current_task; /* 0x0310 */
|
||||
__u64 thread_info; /* 0x0318 */
|
||||
__u8 pad_0x318[0x320-0x318]; /* 0x0318 */
|
||||
__u64 kernel_stack; /* 0x0320 */
|
||||
|
||||
/* Interrupt, panic and restart stack. */
|
||||
@@ -126,7 +126,8 @@ struct lowcore {
|
||||
__u64 percpu_offset; /* 0x0378 */
|
||||
__u64 vdso_per_cpu_data; /* 0x0380 */
|
||||
__u64 machine_flags; /* 0x0388 */
|
||||
__u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
|
||||
__u32 preempt_count; /* 0x0390 */
|
||||
__u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */
|
||||
__u64 gmap; /* 0x0398 */
|
||||
__u32 spinlock_lockval; /* 0x03a0 */
|
||||
__u32 fpu_flags; /* 0x03a4 */
|
||||
|
@@ -46,6 +46,8 @@ struct clp_fh_list_entry {
|
||||
#define CLP_UTIL_STR_LEN 64
|
||||
#define CLP_PFIP_NR_SEGMENTS 4
|
||||
|
||||
extern bool zpci_unique_uid;
|
||||
|
||||
/* List PCI functions request */
|
||||
struct clp_req_list_pci {
|
||||
struct clp_req_hdr hdr;
|
||||
@@ -59,7 +61,8 @@ struct clp_rsp_list_pci {
|
||||
u64 resume_token;
|
||||
u32 reserved2;
|
||||
u16 max_fn;
|
||||
u8 reserved3;
|
||||
u8 : 7;
|
||||
u8 uid_checking : 1;
|
||||
u8 entry_size;
|
||||
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
|
||||
} __packed;
|
||||
|
@@ -27,17 +27,17 @@ extern int page_table_allocate_pgste;
|
||||
|
||||
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
||||
{
|
||||
typedef struct { char _[n]; } addrtype;
|
||||
struct addrtype { char _[256]; };
|
||||
int i;
|
||||
|
||||
*s = val;
|
||||
n = (n / 256) - 1;
|
||||
asm volatile(
|
||||
" mvc 8(248,%0),0(%0)\n"
|
||||
"0: mvc 256(256,%0),0(%0)\n"
|
||||
" la %0,256(%0)\n"
|
||||
" brct %1,0b\n"
|
||||
: "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
|
||||
: "m" (*(addrtype *) s));
|
||||
for (i = 0; i < n; i += 256) {
|
||||
*s = val;
|
||||
asm volatile(
|
||||
"mvc 8(248,%[s]),0(%[s])\n"
|
||||
: "+m" (*(struct addrtype *) s)
|
||||
: [s] "a" (s));
|
||||
s += 256 / sizeof(long);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
|
||||
|
137
arch/s390/include/asm/preempt.h
Normal file
137
arch/s390/include/asm/preempt.h
Normal file
@@ -0,0 +1,137 @@
|
||||
#ifndef __ASM_PREEMPT_H
|
||||
#define __ASM_PREEMPT_H
|
||||
|
||||
#include <asm/current.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/atomic_ops.h>
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
|
||||
|
||||
static inline int preempt_count(void)
|
||||
{
|
||||
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
|
||||
}
|
||||
|
||||
static inline void preempt_count_set(int pc)
|
||||
{
|
||||
int old, new;
|
||||
|
||||
do {
|
||||
old = READ_ONCE(S390_lowcore.preempt_count);
|
||||
new = (old & PREEMPT_NEED_RESCHED) |
|
||||
(pc & ~PREEMPT_NEED_RESCHED);
|
||||
} while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
|
||||
old, new) != old);
|
||||
}
|
||||
|
||||
#define init_task_preempt_count(p) do { } while (0)
|
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \
|
||||
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
|
||||
} while (0)
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
{
|
||||
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void clear_preempt_need_resched(void)
|
||||
{
|
||||
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline bool test_preempt_need_resched(void)
|
||||
{
|
||||
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
|
||||
}
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
{
|
||||
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
|
||||
__atomic_add_const(val, &S390_lowcore.preempt_count);
|
||||
else
|
||||
__atomic_add(val, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void __preempt_count_sub(int val)
|
||||
{
|
||||
__preempt_count_add(-val);
|
||||
}
|
||||
|
||||
static inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
|
||||
}
|
||||
|
||||
static inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
|
||||
preempt_offset);
|
||||
}
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define PREEMPT_ENABLED (0)
|
||||
|
||||
static inline int preempt_count(void)
|
||||
{
|
||||
return READ_ONCE(S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void preempt_count_set(int pc)
|
||||
{
|
||||
S390_lowcore.preempt_count = pc;
|
||||
}
|
||||
|
||||
#define init_task_preempt_count(p) do { } while (0)
|
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \
|
||||
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
|
||||
} while (0)
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void clear_preempt_need_resched(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool test_preempt_need_resched(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
{
|
||||
S390_lowcore.preempt_count += val;
|
||||
}
|
||||
|
||||
static inline void __preempt_count_sub(int val)
|
||||
{
|
||||
S390_lowcore.preempt_count -= val;
|
||||
}
|
||||
|
||||
static inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
return !--S390_lowcore.preempt_count && tif_need_resched();
|
||||
}
|
||||
|
||||
static inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(preempt_count() == preempt_offset &&
|
||||
tif_need_resched());
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
extern asmlinkage void preempt_schedule(void);
|
||||
#define __preempt_schedule() preempt_schedule()
|
||||
extern asmlinkage void preempt_schedule_notrace(void);
|
||||
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
|
||||
#endif /* __ASM_PREEMPT_H */
|
@@ -110,14 +110,20 @@ typedef struct {
|
||||
struct thread_struct {
|
||||
unsigned int acrs[NUM_ACRS];
|
||||
unsigned long ksp; /* kernel stack pointer */
|
||||
unsigned long user_timer; /* task cputime in user space */
|
||||
unsigned long system_timer; /* task cputime in kernel space */
|
||||
unsigned long sys_call_table; /* system call table address */
|
||||
mm_segment_t mm_segment;
|
||||
unsigned long gmap_addr; /* address of last gmap fault. */
|
||||
unsigned int gmap_write_flag; /* gmap fault write indication */
|
||||
unsigned int gmap_int_code; /* int code of last gmap fault */
|
||||
unsigned int gmap_pfault; /* signal of a pending guest pfault */
|
||||
/* Per-thread information related to debugging */
|
||||
struct per_regs per_user; /* User specified PER registers */
|
||||
struct per_event per_event; /* Cause of the last PER trap */
|
||||
unsigned long per_flags; /* Flags to control debug behavior */
|
||||
unsigned int system_call; /* system call number in signal */
|
||||
unsigned long last_break; /* last breaking-event-address. */
|
||||
/* pfault_wait is used to block the process on a pfault event */
|
||||
unsigned long pfault_wait;
|
||||
struct list_head list;
|
||||
|
@@ -101,7 +101,8 @@ struct zpci_report_error_header {
|
||||
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
|
||||
} __packed;
|
||||
|
||||
int sclp_get_core_info(struct sclp_core_info *info);
|
||||
int _sclp_get_core_info_early(struct sclp_core_info *info);
|
||||
int _sclp_get_core_info(struct sclp_core_info *info);
|
||||
int sclp_core_configure(u8 core);
|
||||
int sclp_core_deconfigure(u8 core);
|
||||
int sclp_sdias_blk_count(void);
|
||||
@@ -119,4 +120,11 @@ void sclp_early_detect(void);
|
||||
void _sclp_print_early(const char *);
|
||||
void sclp_ocf_cpc_name_copy(char *dst);
|
||||
|
||||
static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
|
||||
{
|
||||
if (early)
|
||||
return _sclp_get_core_info_early(info);
|
||||
return _sclp_get_core_info(info);
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_SCLP_H */
|
||||
|
@@ -96,7 +96,8 @@ struct tm_scsw {
|
||||
u32 dstat:8;
|
||||
u32 cstat:8;
|
||||
u32 fcxs:8;
|
||||
u32 schxs:8;
|
||||
u32 ifob:1;
|
||||
u32 sesq:7;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/**
|
||||
@@ -177,6 +178,9 @@ union scsw {
|
||||
#define SCHN_STAT_INTF_CTRL_CHK 0x02
|
||||
#define SCHN_STAT_CHAIN_CHECK 0x01
|
||||
|
||||
#define SCSW_SESQ_DEV_NOFCX 3
|
||||
#define SCSW_SESQ_PATH_NOFCX 4
|
||||
|
||||
/*
|
||||
* architectured values for first sense byte
|
||||
*/
|
||||
|
@@ -36,6 +36,7 @@ extern void smp_yield_cpu(int cpu);
|
||||
extern void smp_cpu_set_polarization(int cpu, int val);
|
||||
extern int smp_cpu_get_polarization(int cpu);
|
||||
extern void smp_fill_possible_mask(void);
|
||||
extern void smp_detect_cpus(void);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
@@ -56,6 +57,7 @@ static inline int smp_store_status(int cpu) { return 0; }
|
||||
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
|
||||
static inline void smp_yield_cpu(int cpu) { }
|
||||
static inline void smp_fill_possible_mask(void) { }
|
||||
static inline void smp_detect_cpus(void) { }
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
@@ -69,6 +71,12 @@ static inline void smp_stop_cpu(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* Return thread 0 CPU number as base CPU */
|
||||
static inline int smp_get_base_cpu(int cpu)
|
||||
{
|
||||
return cpu - (cpu % (smp_cpu_mtid + 1));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int smp_rescan_cpus(void);
|
||||
extern void __noreturn cpu_die(void);
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#define __HAVE_ARCH_MEMCHR /* inline & arch function */
|
||||
#define __HAVE_ARCH_MEMCMP /* arch function */
|
||||
#define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */
|
||||
#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
|
||||
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
|
||||
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
|
||||
#define __HAVE_ARCH_STRCAT /* inline & arch function */
|
||||
@@ -32,6 +33,7 @@
|
||||
extern int memcmp(const void *, const void *, size_t);
|
||||
extern void *memcpy(void *, const void *, size_t);
|
||||
extern void *memset(void *, int, size_t);
|
||||
extern void *memmove(void *, const void *, size_t);
|
||||
extern int strcmp(const char *,const char *);
|
||||
extern size_t strlcat(char *, const char *, size_t);
|
||||
extern size_t strlcpy(char *, const char *, size_t);
|
||||
@@ -40,7 +42,6 @@ extern char *strncpy(char *, const char *, size_t);
|
||||
extern char *strrchr(const char *, int);
|
||||
extern char *strstr(const char *, const char *);
|
||||
|
||||
#undef __HAVE_ARCH_MEMMOVE
|
||||
#undef __HAVE_ARCH_STRCHR
|
||||
#undef __HAVE_ARCH_STRNCHR
|
||||
#undef __HAVE_ARCH_STRNCMP
|
||||
|
@@ -107,6 +107,11 @@ struct sysinfo_2_2_2 {
|
||||
char reserved_3[5];
|
||||
unsigned short cpus_dedicated;
|
||||
unsigned short cpus_shared;
|
||||
char reserved_4[3];
|
||||
unsigned char vsne;
|
||||
uuid_be uuid;
|
||||
char reserved_5[160];
|
||||
char ext_name[256];
|
||||
};
|
||||
|
||||
#define LPAR_CHAR_DEDICATED (1 << 7)
|
||||
@@ -127,7 +132,7 @@ struct sysinfo_3_2_2 {
|
||||
unsigned int caf;
|
||||
char cpi[16];
|
||||
char reserved_1[3];
|
||||
char ext_name_encoding;
|
||||
unsigned char evmne;
|
||||
unsigned int reserved_2;
|
||||
uuid_be uuid;
|
||||
} vm[8];
|
||||
|
@@ -12,10 +12,10 @@
|
||||
/*
|
||||
* Size of kernel stack for each process
|
||||
*/
|
||||
#define THREAD_ORDER 2
|
||||
#define THREAD_SIZE_ORDER 2
|
||||
#define ASYNC_ORDER 2
|
||||
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
@@ -30,15 +30,7 @@
|
||||
* - if the contents of this structure are changed, the assembly constants must also be changed
|
||||
*/
|
||||
struct thread_info {
|
||||
struct task_struct *task; /* main task structure */
|
||||
unsigned long flags; /* low level flags */
|
||||
unsigned long sys_call_table; /* System call table address */
|
||||
unsigned int cpu; /* current CPU */
|
||||
int preempt_count; /* 0 => preemptable, <0 => BUG */
|
||||
unsigned int system_call;
|
||||
__u64 user_timer;
|
||||
__u64 system_timer;
|
||||
unsigned long last_break; /* last breaking-event-address. */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -46,26 +38,14 @@ struct thread_info {
|
||||
*/
|
||||
#define INIT_THREAD_INFO(tsk) \
|
||||
{ \
|
||||
.task = &tsk, \
|
||||
.flags = 0, \
|
||||
.cpu = 0, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
}
|
||||
|
||||
#define init_thread_info (init_thread_union.thread_info)
|
||||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
/* how to get the thread information struct from C */
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
return (struct thread_info *) S390_lowcore.thread_info;
|
||||
}
|
||||
|
||||
void arch_release_task_struct(struct task_struct *tsk);
|
||||
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
|
||||
|
||||
#define THREAD_SIZE_ORDER THREAD_ORDER
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@@ -52,11 +52,9 @@ static inline void store_clock_comparator(__u64 *time)
|
||||
|
||||
void clock_comparator_work(void);
|
||||
|
||||
void __init ptff_init(void);
|
||||
void __init time_early_init(void);
|
||||
|
||||
extern unsigned char ptff_function_mask[16];
|
||||
extern unsigned long lpar_offset;
|
||||
extern unsigned long initial_leap_seconds;
|
||||
|
||||
/* Function codes for the ptff instruction. */
|
||||
#define PTFF_QAF 0x00 /* query available functions */
|
||||
@@ -100,21 +98,28 @@ struct ptff_qui {
|
||||
unsigned int pad_0x5c[41];
|
||||
} __packed;
|
||||
|
||||
static inline int ptff(void *ptff_block, size_t len, unsigned int func)
|
||||
{
|
||||
typedef struct { char _[len]; } addrtype;
|
||||
register unsigned int reg0 asm("0") = func;
|
||||
register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
|
||||
int rc;
|
||||
|
||||
asm volatile(
|
||||
" .word 0x0104\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
: "=d" (rc), "+m" (*(addrtype *) ptff_block)
|
||||
: "d" (reg0), "d" (reg1) : "cc");
|
||||
return rc;
|
||||
}
|
||||
/*
|
||||
* ptff - Perform timing facility function
|
||||
* @ptff_block: Pointer to ptff parameter block
|
||||
* @len: Length of parameter block
|
||||
* @func: Function code
|
||||
* Returns: Condition code (0 on success)
|
||||
*/
|
||||
#define ptff(ptff_block, len, func) \
|
||||
({ \
|
||||
struct addrtype { char _[len]; }; \
|
||||
register unsigned int reg0 asm("0") = func; \
|
||||
register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
|
||||
int rc; \
|
||||
\
|
||||
asm volatile( \
|
||||
" .word 0x0104\n" \
|
||||
" ipm %0\n" \
|
||||
" srl %0,28\n" \
|
||||
: "=d" (rc), "+m" (*(struct addrtype *) reg1) \
|
||||
: "d" (reg0), "d" (reg1) : "cc"); \
|
||||
rc; \
|
||||
})
|
||||
|
||||
static inline unsigned long long local_tick_disable(void)
|
||||
{
|
||||
|
@@ -22,21 +22,22 @@ struct cpu_topology_s390 {
|
||||
cpumask_t drawer_mask;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
|
||||
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
|
||||
extern cpumask_t cpus_with_topology;
|
||||
|
||||
#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
|
||||
#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
|
||||
#define topology_sibling_cpumask(cpu) \
|
||||
(&per_cpu(cpu_topology, cpu).thread_mask)
|
||||
#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
|
||||
#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
|
||||
#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
|
||||
#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
|
||||
#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id)
|
||||
#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
|
||||
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
|
||||
#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
|
||||
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
|
||||
#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
|
||||
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
|
||||
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
|
||||
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
|
||||
|
||||
#define mc_capable() 1
|
||||
|
||||
void topology_init_early(void);
|
||||
int topology_cpu_init(struct cpu *);
|
||||
int topology_set_cpu_management(int fc);
|
||||
void topology_schedule_update(void);
|
||||
@@ -46,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
|
||||
#else /* CONFIG_SCHED_TOPOLOGY */
|
||||
|
||||
static inline void topology_init_early(void) { }
|
||||
static inline void topology_schedule_update(void) { }
|
||||
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
|
||||
static inline void topology_expect_change(void) { }
|
||||
@@ -65,7 +67,7 @@ static inline void topology_expect_change(void) { }
|
||||
#define cpu_to_node cpu_to_node
|
||||
static inline int cpu_to_node(int cpu)
|
||||
{
|
||||
return per_cpu(cpu_topology, cpu).node_id;
|
||||
return cpu_topology[cpu].node_id;
|
||||
}
|
||||
|
||||
/* Returns a pointer to the cpumask of CPUs on node 'node'. */
|
||||
|
@@ -37,14 +37,14 @@
|
||||
#define get_ds() (KERNEL_DS)
|
||||
#define get_fs() (current->thread.mm_segment)
|
||||
|
||||
#define set_fs(x) \
|
||||
({ \
|
||||
#define set_fs(x) \
|
||||
{ \
|
||||
unsigned long __pto; \
|
||||
current->thread.mm_segment = (x); \
|
||||
__pto = current->thread.mm_segment.ar4 ? \
|
||||
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
|
||||
__ctl_load(__pto, 7, 7); \
|
||||
})
|
||||
}
|
||||
|
||||
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
|
||||
|
||||
|
@@ -33,6 +33,8 @@ struct vdso_data {
|
||||
__u32 ectg_available; /* ECTG instruction present 0x58 */
|
||||
__u32 tk_mult; /* Mult. used for xtime_nsec 0x5c */
|
||||
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
|
||||
__u32 ts_dir; /* TOD steering direction 0x64 */
|
||||
__u64 ts_end; /* TOD steering end 0x68 */
|
||||
};
|
||||
|
||||
struct vdso_per_cpu_data {
|
||||
|
Reference in New Issue
Block a user