Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: - The remaining patches for the z13 machine support: kernel build option for z13, the cache synonym avoidance, SMT support, compare-and-delay for spinloops and the CES5S crypto adapater. - The ftrace support for function tracing with the gcc hotpatch option. This touches common code Makefiles, Steven is ok with the changes. - The hypfs file system gets an extension to access diagnose 0x0c data in user space for performance analysis for Linux running under z/VM. - The iucv hvc console gets wildcard spport for the user id filtering. - The cacheinfo code is converted to use the generic infrastructure. - Cleanup and bug fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits) s390/process: free vx save area when releasing tasks s390/hypfs: Eliminate hypfs interval s390/hypfs: Add diagnose 0c support s390/cacheinfo: don't use smp_processor_id() in preemptible context s390/zcrypt: fixed domain scanning problem (again) s390/smp: increase maximum value of NR_CPUS to 512 s390/jump label: use different nop instruction s390/jump label: add sanity checks s390/mm: correct missing space when reporting user process faults s390/dasd: cleanup profiling s390/dasd: add locking for global_profile access s390/ftrace: hotpatch support for function tracing ftrace: let notrace function attribute disable hotpatching if necessary ftrace: allow architectures to specify ftrace compile options s390: reintroduce diag 44 calls for cpu_relax() s390/zcrypt: Add support for new crypto express (CEX5S) adapter. s390/zcrypt: Number of supported ap domains is not retrievable. s390/spinlock: add compare-and-delay to lock wait loops s390/tape: remove redundant if statement s390/hvc_iucv: add simple wildcard matches to the iucv allow filter ...
This commit is contained in:
@@ -4,8 +4,8 @@
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Don't trace early setup code and tracing code
|
||||
CFLAGS_REMOVE_early.o = -pg
|
||||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
#
|
||||
|
@@ -97,7 +97,8 @@ ENTRY(diag308_reset)
|
||||
lg %r4,0(%r4) # Save PSW
|
||||
sturg %r4,%r3 # Use sturg, because of large pages
|
||||
lghi %r1,1
|
||||
diag %r1,%r1,0x308
|
||||
lghi %r0,0
|
||||
diag %r0,%r1,0x308
|
||||
.Lrestart_part2:
|
||||
lhi %r0,0 # Load r0 with zero
|
||||
lhi %r1,2 # Use mode 2 = ESAME (dump)
|
||||
|
@@ -5,37 +5,11 @@
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cacheinfo.h>
|
||||
#include <asm/facility.h>
|
||||
|
||||
struct cache {
|
||||
unsigned long size;
|
||||
unsigned int line_size;
|
||||
unsigned int associativity;
|
||||
unsigned int nr_sets;
|
||||
unsigned int level : 3;
|
||||
unsigned int type : 2;
|
||||
unsigned int private : 1;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct cache_dir {
|
||||
struct kobject *kobj;
|
||||
struct cache_index_dir *index;
|
||||
};
|
||||
|
||||
struct cache_index_dir {
|
||||
struct kobject kobj;
|
||||
int cpu;
|
||||
struct cache *cache;
|
||||
struct cache_index_dir *next;
|
||||
};
|
||||
|
||||
enum {
|
||||
CACHE_SCOPE_NOTEXISTS,
|
||||
CACHE_SCOPE_PRIVATE,
|
||||
@@ -44,10 +18,10 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
CACHE_TYPE_SEPARATE,
|
||||
CACHE_TYPE_DATA,
|
||||
CACHE_TYPE_INSTRUCTION,
|
||||
CACHE_TYPE_UNIFIED,
|
||||
CTYPE_SEPARATE,
|
||||
CTYPE_DATA,
|
||||
CTYPE_INSTRUCTION,
|
||||
CTYPE_UNIFIED,
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -70,37 +44,60 @@ struct cache_info {
|
||||
};
|
||||
|
||||
#define CACHE_MAX_LEVEL 8
|
||||
|
||||
union cache_topology {
|
||||
struct cache_info ci[CACHE_MAX_LEVEL];
|
||||
unsigned long long raw;
|
||||
};
|
||||
|
||||
static const char * const cache_type_string[] = {
|
||||
"Data",
|
||||
"",
|
||||
"Instruction",
|
||||
"Data",
|
||||
"",
|
||||
"Unified",
|
||||
};
|
||||
|
||||
static struct cache_dir *cache_dir_cpu[NR_CPUS];
|
||||
static LIST_HEAD(cache_list);
|
||||
static const enum cache_type cache_type_map[] = {
|
||||
[CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
|
||||
[CTYPE_DATA] = CACHE_TYPE_DATA,
|
||||
[CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
|
||||
[CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
|
||||
};
|
||||
|
||||
void show_cacheinfo(struct seq_file *m)
|
||||
{
|
||||
struct cache *cache;
|
||||
int index = 0;
|
||||
struct cpu_cacheinfo *this_cpu_ci;
|
||||
struct cacheinfo *cache;
|
||||
int idx;
|
||||
|
||||
list_for_each_entry(cache, &cache_list, list) {
|
||||
seq_printf(m, "cache%-11d: ", index);
|
||||
get_online_cpus();
|
||||
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
|
||||
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
|
||||
cache = this_cpu_ci->info_list + idx;
|
||||
seq_printf(m, "cache%-11d: ", idx);
|
||||
seq_printf(m, "level=%d ", cache->level);
|
||||
seq_printf(m, "type=%s ", cache_type_string[cache->type]);
|
||||
seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
|
||||
seq_printf(m, "size=%luK ", cache->size >> 10);
|
||||
seq_printf(m, "line_size=%u ", cache->line_size);
|
||||
seq_printf(m, "associativity=%d", cache->associativity);
|
||||
seq_printf(m, "scope=%s ",
|
||||
cache->disable_sysfs ? "Shared" : "Private");
|
||||
seq_printf(m, "size=%dK ", cache->size >> 10);
|
||||
seq_printf(m, "line_size=%u ", cache->coherency_line_size);
|
||||
seq_printf(m, "associativity=%d", cache->ways_of_associativity);
|
||||
seq_puts(m, "\n");
|
||||
index++;
|
||||
}
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
|
||||
{
|
||||
if (level >= CACHE_MAX_LEVEL)
|
||||
return CACHE_TYPE_NOCACHE;
|
||||
|
||||
ci += level;
|
||||
|
||||
if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
|
||||
return CACHE_TYPE_NOCACHE;
|
||||
|
||||
return cache_type_map[ci->type];
|
||||
}
|
||||
|
||||
static inline unsigned long ecag(int ai, int li, int ti)
|
||||
@@ -113,277 +110,79 @@ static inline unsigned long ecag(int ai, int li, int ti)
|
||||
return val;
|
||||
}
|
||||
|
||||
static int __init cache_add(int level, int private, int type)
|
||||
static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
|
||||
enum cache_type type, unsigned int level)
|
||||
{
|
||||
struct cache *cache;
|
||||
int ti;
|
||||
int ti, num_sets;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
|
||||
if (!cache)
|
||||
return -ENOMEM;
|
||||
if (type == CACHE_TYPE_INSTRUCTION)
|
||||
if (type == CACHE_TYPE_INST)
|
||||
ti = CACHE_TI_INSTRUCTION;
|
||||
else
|
||||
ti = CACHE_TI_UNIFIED;
|
||||
cache->size = ecag(EXTRACT_SIZE, level, ti);
|
||||
cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
|
||||
cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
|
||||
cache->nr_sets = cache->size / cache->associativity;
|
||||
cache->nr_sets /= cache->line_size;
|
||||
cache->private = private;
|
||||
cache->level = level + 1;
|
||||
cache->type = type - 1;
|
||||
list_add_tail(&cache->list, &cache_list);
|
||||
return 0;
|
||||
|
||||
this_leaf->level = level + 1;
|
||||
this_leaf->type = type;
|
||||
this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
|
||||
this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
|
||||
level, ti);
|
||||
this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
|
||||
|
||||
num_sets = this_leaf->size / this_leaf->coherency_line_size;
|
||||
num_sets /= this_leaf->ways_of_associativity;
|
||||
this_leaf->number_of_sets = num_sets;
|
||||
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
|
||||
if (!private)
|
||||
this_leaf->disable_sysfs = true;
|
||||
}
|
||||
|
||||
static void __init cache_build_info(void)
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
struct cache *cache, *next;
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
unsigned int level = 0, leaves = 0;
|
||||
union cache_topology ct;
|
||||
int level, private, rc;
|
||||
enum cache_type ctype;
|
||||
|
||||
if (!this_cpu_ci)
|
||||
return -EINVAL;
|
||||
|
||||
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
|
||||
for (level = 0; level < CACHE_MAX_LEVEL; level++) {
|
||||
switch (ct.ci[level].scope) {
|
||||
case CACHE_SCOPE_SHARED:
|
||||
private = 0;
|
||||
do {
|
||||
ctype = get_cache_type(&ct.ci[0], level);
|
||||
if (ctype == CACHE_TYPE_NOCACHE)
|
||||
break;
|
||||
case CACHE_SCOPE_PRIVATE:
|
||||
private = 1;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
|
||||
rc = cache_add(level, private, CACHE_TYPE_DATA);
|
||||
rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
|
||||
/* Separate instruction and data caches */
|
||||
leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
|
||||
} while (++level < CACHE_MAX_LEVEL);
|
||||
|
||||
this_cpu_ci->num_levels = level;
|
||||
this_cpu_ci->num_leaves = leaves;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int populate_cache_leaves(unsigned int cpu)
|
||||
{
|
||||
unsigned int level, idx, pvt;
|
||||
union cache_topology ct;
|
||||
enum cache_type ctype;
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
||||
|
||||
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
|
||||
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
|
||||
idx < this_cpu_ci->num_leaves; idx++, level++) {
|
||||
if (!this_leaf)
|
||||
return -EINVAL;
|
||||
|
||||
pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
|
||||
ctype = get_cache_type(&ct.ci[0], level);
|
||||
if (ctype == CACHE_TYPE_SEPARATE) {
|
||||
ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
|
||||
ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
|
||||
} else {
|
||||
rc = cache_add(level, private, ct.ci[level].type);
|
||||
ci_leaf_init(this_leaf++, pvt, ctype, level);
|
||||
}
|
||||
if (rc)
|
||||
goto error;
|
||||
}
|
||||
return;
|
||||
error:
|
||||
list_for_each_entry_safe(cache, next, &cache_list, list) {
|
||||
list_del(&cache->list);
|
||||
kfree(cache);
|
||||
}
|
||||
}
|
||||
|
||||
static struct cache_dir *cache_create_cache_dir(int cpu)
|
||||
{
|
||||
struct cache_dir *cache_dir;
|
||||
struct kobject *kobj = NULL;
|
||||
struct device *dev;
|
||||
|
||||
dev = get_cpu_device(cpu);
|
||||
if (!dev)
|
||||
goto out;
|
||||
kobj = kobject_create_and_add("cache", &dev->kobj);
|
||||
if (!kobj)
|
||||
goto out;
|
||||
cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
|
||||
if (!cache_dir)
|
||||
goto out;
|
||||
cache_dir->kobj = kobj;
|
||||
cache_dir_cpu[cpu] = cache_dir;
|
||||
return cache_dir;
|
||||
out:
|
||||
kobject_put(kobj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
|
||||
{
|
||||
return container_of(kobj, struct cache_index_dir, kobj);
|
||||
}
|
||||
|
||||
static void cache_index_release(struct kobject *kobj)
|
||||
{
|
||||
struct cache_index_dir *index;
|
||||
|
||||
index = kobj_to_cache_index_dir(kobj);
|
||||
kfree(index);
|
||||
}
|
||||
|
||||
static ssize_t cache_index_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
struct kobj_attribute *kobj_attr;
|
||||
|
||||
kobj_attr = container_of(attr, struct kobj_attribute, attr);
|
||||
return kobj_attr->show(kobj, kobj_attr, buf);
|
||||
}
|
||||
|
||||
#define DEFINE_CACHE_ATTR(_name, _format, _value) \
|
||||
static ssize_t cache_##_name##_show(struct kobject *kobj, \
|
||||
struct kobj_attribute *attr, \
|
||||
char *buf) \
|
||||
{ \
|
||||
struct cache_index_dir *index; \
|
||||
\
|
||||
index = kobj_to_cache_index_dir(kobj); \
|
||||
return sprintf(buf, _format, _value); \
|
||||
} \
|
||||
static struct kobj_attribute cache_##_name##_attr = \
|
||||
__ATTR(_name, 0444, cache_##_name##_show, NULL);
|
||||
|
||||
DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
|
||||
DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
|
||||
DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
|
||||
DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
|
||||
DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
|
||||
DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
|
||||
|
||||
static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
|
||||
{
|
||||
struct cache_index_dir *index;
|
||||
int len;
|
||||
|
||||
index = kobj_to_cache_index_dir(kobj);
|
||||
len = type ?
|
||||
cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
|
||||
cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
|
||||
len += sprintf(&buf[len], "\n");
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t shared_cpu_map_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return shared_cpu_map_func(kobj, 0, buf);
|
||||
}
|
||||
static struct kobj_attribute cache_shared_cpu_map_attr =
|
||||
__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
|
||||
|
||||
static ssize_t shared_cpu_list_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return shared_cpu_map_func(kobj, 1, buf);
|
||||
}
|
||||
static struct kobj_attribute cache_shared_cpu_list_attr =
|
||||
__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
|
||||
|
||||
static struct attribute *cache_index_default_attrs[] = {
|
||||
&cache_type_attr.attr,
|
||||
&cache_size_attr.attr,
|
||||
&cache_number_of_sets_attr.attr,
|
||||
&cache_ways_of_associativity_attr.attr,
|
||||
&cache_level_attr.attr,
|
||||
&cache_coherency_line_size_attr.attr,
|
||||
&cache_shared_cpu_map_attr.attr,
|
||||
&cache_shared_cpu_list_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct sysfs_ops cache_index_ops = {
|
||||
.show = cache_index_show,
|
||||
};
|
||||
|
||||
static struct kobj_type cache_index_type = {
|
||||
.sysfs_ops = &cache_index_ops,
|
||||
.release = cache_index_release,
|
||||
.default_attrs = cache_index_default_attrs,
|
||||
};
|
||||
|
||||
static int cache_create_index_dir(struct cache_dir *cache_dir,
|
||||
struct cache *cache, int index, int cpu)
|
||||
{
|
||||
struct cache_index_dir *index_dir;
|
||||
int rc;
|
||||
|
||||
index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
|
||||
if (!index_dir)
|
||||
return -ENOMEM;
|
||||
index_dir->cache = cache;
|
||||
index_dir->cpu = cpu;
|
||||
rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
|
||||
cache_dir->kobj, "index%d", index);
|
||||
if (rc)
|
||||
goto out;
|
||||
index_dir->next = cache_dir->index;
|
||||
cache_dir->index = index_dir;
|
||||
return 0;
|
||||
out:
|
||||
kfree(index_dir);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cache_add_cpu(int cpu)
|
||||
{
|
||||
struct cache_dir *cache_dir;
|
||||
struct cache *cache;
|
||||
int rc, index = 0;
|
||||
|
||||
if (list_empty(&cache_list))
|
||||
return 0;
|
||||
cache_dir = cache_create_cache_dir(cpu);
|
||||
if (!cache_dir)
|
||||
return -ENOMEM;
|
||||
list_for_each_entry(cache, &cache_list, list) {
|
||||
if (!cache->private)
|
||||
break;
|
||||
rc = cache_create_index_dir(cache_dir, cache, index, cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
index++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cache_remove_cpu(int cpu)
|
||||
{
|
||||
struct cache_index_dir *index, *next;
|
||||
struct cache_dir *cache_dir;
|
||||
|
||||
cache_dir = cache_dir_cpu[cpu];
|
||||
if (!cache_dir)
|
||||
return;
|
||||
index = cache_dir->index;
|
||||
while (index) {
|
||||
next = index->next;
|
||||
kobject_put(&index->kobj);
|
||||
index = next;
|
||||
}
|
||||
kobject_put(cache_dir->kobj);
|
||||
kfree(cache_dir);
|
||||
cache_dir_cpu[cpu] = NULL;
|
||||
}
|
||||
|
||||
static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
int cpu = (long)hcpu;
|
||||
int rc = 0;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_ONLINE:
|
||||
rc = cache_add_cpu(cpu);
|
||||
if (rc)
|
||||
cache_remove_cpu(cpu);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
cache_remove_cpu(cpu);
|
||||
break;
|
||||
}
|
||||
return rc ? NOTIFY_BAD : NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int __init cache_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!test_facility(34))
|
||||
return 0;
|
||||
cache_build_info();
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(cpu)
|
||||
cache_add_cpu(cpu);
|
||||
__hotcpu_notifier(cache_hotplug, 0);
|
||||
cpu_notifier_register_done();
|
||||
return 0;
|
||||
}
|
||||
device_initcall(cache_init);
|
||||
|
@@ -137,7 +137,7 @@ enum {
|
||||
INSTR_RSI_RRP,
|
||||
INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
|
||||
INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
|
||||
INSTR_RSY_RDRM,
|
||||
INSTR_RSY_RDRM, INSTR_RSY_RMRD,
|
||||
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
|
||||
INSTR_RS_RURD,
|
||||
INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
|
||||
@@ -226,7 +226,6 @@ static const struct s390_operand operands[] =
|
||||
[U16_32] = { 16, 32, 0 },
|
||||
[J16_16] = { 16, 16, OPERAND_PCREL },
|
||||
[J16_32] = { 16, 32, OPERAND_PCREL },
|
||||
[I16_32] = { 16, 32, OPERAND_SIGNED },
|
||||
[I24_24] = { 24, 24, OPERAND_SIGNED },
|
||||
[J32_16] = { 32, 16, OPERAND_PCREL },
|
||||
[I32_16] = { 32, 16, OPERAND_SIGNED },
|
||||
@@ -308,6 +307,7 @@ static const unsigned char formats[][7] = {
|
||||
[INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
|
||||
[INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
|
||||
[INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
|
||||
@@ -451,7 +451,8 @@ enum {
|
||||
LONG_INSN_VERLLV,
|
||||
LONG_INSN_VESRAV,
|
||||
LONG_INSN_VESRLV,
|
||||
LONG_INSN_VSBCBI
|
||||
LONG_INSN_VSBCBI,
|
||||
LONG_INSN_STCCTM
|
||||
};
|
||||
|
||||
static char *long_insn_name[] = {
|
||||
@@ -531,6 +532,7 @@ static char *long_insn_name[] = {
|
||||
[LONG_INSN_VESRAV] = "vesrav",
|
||||
[LONG_INSN_VESRLV] = "vesrlv",
|
||||
[LONG_INSN_VSBCBI] = "vsbcbi",
|
||||
[LONG_INSN_STCCTM] = "stcctm",
|
||||
};
|
||||
|
||||
static struct s390_insn opcode[] = {
|
||||
@@ -1656,6 +1658,7 @@ static struct s390_insn opcode_eb[] = {
|
||||
{ "lric", 0x60, INSTR_RSY_RDRM },
|
||||
{ "stric", 0x61, INSTR_RSY_RDRM },
|
||||
{ "mric", 0x62, INSTR_RSY_RDRM },
|
||||
{ { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
|
||||
#endif
|
||||
{ "rll", 0x1d, INSTR_RSY_RRRD },
|
||||
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
|
||||
|
@@ -393,9 +393,27 @@ static __init void detect_machine_facilities(void)
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
|
||||
if (test_facility(129))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
|
||||
if (test_facility(128))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init nocad_setup(char *str)
|
||||
{
|
||||
S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
|
||||
return 0;
|
||||
}
|
||||
early_param("nocad", nocad_setup);
|
||||
|
||||
static int __init cad_init(void)
|
||||
{
|
||||
if (MACHINE_HAS_CAD)
|
||||
/* Enable problem state CAD. */
|
||||
__ctl_set_bit(2, 3);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(cad_init);
|
||||
|
||||
static __init void rescue_initrd(void)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
|
@@ -71,9 +71,11 @@ struct s390_mmap_arg_struct;
|
||||
struct fadvise64_64_args;
|
||||
struct old_sigaction;
|
||||
|
||||
long sys_rt_sigreturn(void);
|
||||
long sys_sigreturn(void);
|
||||
|
||||
long sys_s390_personality(unsigned int personality);
|
||||
long sys_s390_runtime_instr(int command, int signum);
|
||||
|
||||
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
|
||||
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
|
||||
#endif /* _ENTRY_H */
|
||||
|
@@ -46,6 +46,13 @@
|
||||
* lg %r14,8(%r15) # offset 18
|
||||
* The jg instruction branches to offset 24 to skip as many instructions
|
||||
* as possible.
|
||||
* In case we use gcc's hotpatch feature the original and also the disabled
|
||||
* function prologue contains only a single six byte instruction and looks
|
||||
* like this:
|
||||
* > brcl 0,0 # offset 0
|
||||
* To enable ftrace the code gets patched like above and afterwards looks
|
||||
* like this:
|
||||
* > brasl %r0,ftrace_caller # offset 0
|
||||
*/
|
||||
|
||||
unsigned long ftrace_plt;
|
||||
@@ -59,62 +66,71 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct ftrace_insn insn;
|
||||
unsigned short op;
|
||||
void *from, *to;
|
||||
size_t size;
|
||||
struct ftrace_insn orig, new, old;
|
||||
|
||||
ftrace_generate_nop_insn(&insn);
|
||||
size = sizeof(insn);
|
||||
from = &insn;
|
||||
to = (void *) rec->ip;
|
||||
if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
|
||||
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been placed
|
||||
* at the beginning of the function. We write the constant
|
||||
* KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
|
||||
* instruction so that the kprobes handler can execute a nop, if it
|
||||
* reaches this breakpoint.
|
||||
*/
|
||||
if (op == BREAKPOINT_INSTRUCTION) {
|
||||
size -= 2;
|
||||
from += 2;
|
||||
to += 2;
|
||||
insn.disp = KPROBE_ON_FTRACE_NOP;
|
||||
if (addr == MCOUNT_ADDR) {
|
||||
/* Initial code replacement */
|
||||
#ifdef CC_USING_HOTPATCH
|
||||
/* We expect to see brcl 0,0 */
|
||||
ftrace_generate_nop_insn(&orig);
|
||||
#else
|
||||
/* We expect to see stg r14,8(r15) */
|
||||
orig.opc = 0xe3e0;
|
||||
orig.disp = 0xf0080024;
|
||||
#endif
|
||||
ftrace_generate_nop_insn(&new);
|
||||
} else if (old.opc == BREAKPOINT_INSTRUCTION) {
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been
|
||||
* placed at the beginning of the function. We write the
|
||||
* constant KPROBE_ON_FTRACE_NOP into the remaining four
|
||||
* bytes of the original instruction so that the kprobes
|
||||
* handler can execute a nop, if it reaches this breakpoint.
|
||||
*/
|
||||
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
|
||||
orig.disp = KPROBE_ON_FTRACE_CALL;
|
||||
new.disp = KPROBE_ON_FTRACE_NOP;
|
||||
} else {
|
||||
/* Replace ftrace call with a nop. */
|
||||
ftrace_generate_call_insn(&orig, rec->ip);
|
||||
ftrace_generate_nop_insn(&new);
|
||||
}
|
||||
if (probe_kernel_write(to, from, size))
|
||||
/* Verify that the to be replaced code matches what we expect. */
|
||||
if (memcmp(&orig, &old, sizeof(old)))
|
||||
return -EINVAL;
|
||||
if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
struct ftrace_insn insn;
|
||||
unsigned short op;
|
||||
void *from, *to;
|
||||
size_t size;
|
||||
struct ftrace_insn orig, new, old;
|
||||
|
||||
ftrace_generate_call_insn(&insn, rec->ip);
|
||||
size = sizeof(insn);
|
||||
from = &insn;
|
||||
to = (void *) rec->ip;
|
||||
if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
|
||||
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been placed
|
||||
* at the beginning of the function. We write the constant
|
||||
* KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
|
||||
* instruction so that the kprobes handler can execute a brasl if it
|
||||
* reaches this breakpoint.
|
||||
*/
|
||||
if (op == BREAKPOINT_INSTRUCTION) {
|
||||
size -= 2;
|
||||
from += 2;
|
||||
to += 2;
|
||||
insn.disp = KPROBE_ON_FTRACE_CALL;
|
||||
if (old.opc == BREAKPOINT_INSTRUCTION) {
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been
|
||||
* placed at the beginning of the function. We write the
|
||||
* constant KPROBE_ON_FTRACE_CALL into the remaining four
|
||||
* bytes of the original instruction so that the kprobes
|
||||
* handler can execute a brasl if it reaches this breakpoint.
|
||||
*/
|
||||
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
|
||||
orig.disp = KPROBE_ON_FTRACE_NOP;
|
||||
new.disp = KPROBE_ON_FTRACE_CALL;
|
||||
} else {
|
||||
/* Replace nop with an ftrace call. */
|
||||
ftrace_generate_nop_insn(&orig);
|
||||
ftrace_generate_call_insn(&new, rec->ip);
|
||||
}
|
||||
if (probe_kernel_write(to, from, size))
|
||||
/* Verify that the to be replaced code matches what we expect. */
|
||||
if (memcmp(&orig, &old, sizeof(old)))
|
||||
return -EINVAL;
|
||||
if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
@@ -436,7 +436,9 @@ ENTRY(startup_kdump)
|
||||
# followed by the facility words.
|
||||
|
||||
#if defined(CONFIG_64BIT)
|
||||
#if defined(CONFIG_MARCH_ZEC12)
|
||||
#if defined(CONFIG_MARCH_Z13)
|
||||
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
|
||||
#elif defined(CONFIG_MARCH_ZEC12)
|
||||
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
|
||||
#elif defined(CONFIG_MARCH_Z196)
|
||||
.long 2, 0xc100eff2, 0xf46c0000
|
||||
|
@@ -2074,7 +2074,8 @@ static void do_reset_calls(void)
|
||||
|
||||
u32 dump_prefix_page;
|
||||
|
||||
void s390_reset_system(void (*func)(void *), void *data)
|
||||
void s390_reset_system(void (*fn_pre)(void),
|
||||
void (*fn_post)(void *), void *data)
|
||||
{
|
||||
struct _lowcore *lc;
|
||||
|
||||
@@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data)
|
||||
/* Store status at absolute zero */
|
||||
store_status();
|
||||
|
||||
/* Call function before reset */
|
||||
if (fn_pre)
|
||||
fn_pre();
|
||||
do_reset_calls();
|
||||
if (func)
|
||||
func(data);
|
||||
/* Call function after reset */
|
||||
if (fn_post)
|
||||
fn_post(data);
|
||||
}
|
||||
|
@@ -22,31 +22,66 @@ struct insn_args {
|
||||
enum jump_label_type type;
|
||||
};
|
||||
|
||||
static void __jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
|
||||
{
|
||||
struct insn insn;
|
||||
int rc;
|
||||
/* brcl 0,0 */
|
||||
insn->opcode = 0xc004;
|
||||
insn->offset = 0;
|
||||
}
|
||||
|
||||
static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
|
||||
{
|
||||
/* brcl 15,offset */
|
||||
insn->opcode = 0xc0f4;
|
||||
insn->offset = (entry->target - entry->code) >> 1;
|
||||
}
|
||||
|
||||
static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
|
||||
{
|
||||
unsigned char *ipc = (unsigned char *)entry->code;
|
||||
unsigned char *ipe = (unsigned char *)insn;
|
||||
|
||||
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
|
||||
pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
|
||||
ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
|
||||
pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
|
||||
ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
|
||||
panic("Corrupted kernel text");
|
||||
}
|
||||
|
||||
static struct insn orignop = {
|
||||
.opcode = 0xc004,
|
||||
.offset = JUMP_LABEL_NOP_OFFSET >> 1,
|
||||
};
|
||||
|
||||
static void __jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type,
|
||||
int init)
|
||||
{
|
||||
struct insn old, new;
|
||||
|
||||
if (type == JUMP_LABEL_ENABLE) {
|
||||
/* brcl 15,offset */
|
||||
insn.opcode = 0xc0f4;
|
||||
insn.offset = (entry->target - entry->code) >> 1;
|
||||
jump_label_make_nop(entry, &old);
|
||||
jump_label_make_branch(entry, &new);
|
||||
} else {
|
||||
/* brcl 0,0 */
|
||||
insn.opcode = 0xc004;
|
||||
insn.offset = 0;
|
||||
jump_label_make_branch(entry, &old);
|
||||
jump_label_make_nop(entry, &new);
|
||||
}
|
||||
|
||||
rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE);
|
||||
WARN_ON_ONCE(rc < 0);
|
||||
if (init) {
|
||||
if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
|
||||
jump_label_bug(entry, &old);
|
||||
} else {
|
||||
if (memcmp((void *)entry->code, &old, sizeof(old)))
|
||||
jump_label_bug(entry, &old);
|
||||
}
|
||||
probe_kernel_write((void *)entry->code, &new, sizeof(new));
|
||||
}
|
||||
|
||||
static int __sm_arch_jump_label_transform(void *data)
|
||||
{
|
||||
struct insn_args *args = data;
|
||||
|
||||
__jump_label_transform(args->entry, args->type);
|
||||
__jump_label_transform(args->entry, args->type, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -64,7 +99,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
|
||||
void arch_jump_label_transform_static(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
__jump_label_transform(entry, type);
|
||||
__jump_label_transform(entry, type, 1);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -69,7 +69,8 @@ static void copy_instruction(struct kprobe *p)
|
||||
/*
|
||||
* If kprobes patches the instruction that is morphed by
|
||||
* ftrace make sure that kprobes always sees the branch
|
||||
* "jg .+24" that skips the mcount block
|
||||
* "jg .+24" that skips the mcount block or the "brcl 0,0"
|
||||
* in case of hotpatch.
|
||||
*/
|
||||
ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
|
||||
p->ainsn.is_ftrace_insn = 1;
|
||||
|
@@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void)
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(machine_kdump_pm_init);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Start kdump: We expect here that a store status has been done on our CPU
|
||||
*/
|
||||
static void __do_machine_kdump(void *image)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
|
||||
|
||||
setup_regs();
|
||||
__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
|
||||
start_kdump(1);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check if kdump checksums are valid: We call purgatory with parameter "0"
|
||||
@@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data)
|
||||
*/
|
||||
static void __machine_kexec(void *data)
|
||||
{
|
||||
struct kimage *image = data;
|
||||
|
||||
__arch_local_irq_stosm(0x04); /* enable DAT */
|
||||
pfault_fini();
|
||||
tracing_off();
|
||||
debug_locks_off();
|
||||
if (image->type == KEXEC_TYPE_CRASH) {
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {
|
||||
|
||||
lgr_info_log();
|
||||
s390_reset_system(__do_machine_kdump, data);
|
||||
} else {
|
||||
s390_reset_system(__do_machine_kexec, data);
|
||||
}
|
||||
s390_reset_system(setup_regs, __do_machine_kdump, data);
|
||||
} else
|
||||
#endif
|
||||
s390_reset_system(NULL, __do_machine_kexec, data);
|
||||
disabled_wait((unsigned long) __builtin_return_address(0));
|
||||
}
|
||||
|
||||
|
@@ -27,7 +27,9 @@ ENTRY(ftrace_caller)
|
||||
.globl ftrace_regs_caller
|
||||
.set ftrace_regs_caller,ftrace_caller
|
||||
lgr %r1,%r15
|
||||
#ifndef CC_USING_HOTPATCH
|
||||
aghi %r0,MCOUNT_RETURN_FIXUP
|
||||
#endif
|
||||
aghi %r15,-STACK_FRAME_SIZE
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
|
||||
|
@@ -79,6 +79,14 @@ void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
void arch_release_task_struct(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->thread.vxrs)
|
||||
kfree(tsk->thread.vxrs);
|
||||
}
|
||||
#endif
|
||||
|
||||
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
unsigned long arg, struct task_struct *p)
|
||||
{
|
||||
@@ -243,13 +251,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
ret = PAGE_ALIGN(mm->brk + brk_rnd());
|
||||
return (ret > mm->brk) ? ret : mm->brk;
|
||||
}
|
||||
|
||||
unsigned long randomize_et_dyn(unsigned long base)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return base;
|
||||
ret = PAGE_ALIGN(base + brk_rnd());
|
||||
return (ret > base) ? ret : base;
|
||||
}
|
||||
|
@@ -8,16 +8,24 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/param.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
static DEFINE_PER_CPU(struct cpuid, cpu_id);
|
||||
|
||||
void cpu_relax(void)
|
||||
{
|
||||
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
|
||||
asm volatile("diag 0,0,0x44");
|
||||
barrier();
|
||||
}
|
||||
EXPORT_SYMBOL(cpu_relax);
|
||||
|
||||
/*
|
||||
* cpu_init - initializes state that is per-CPU.
|
||||
*/
|
||||
|
@@ -294,7 +294,8 @@ ENTRY(_sclp_print_early)
|
||||
#ifdef CONFIG_64BIT
|
||||
tm LC_AR_MODE_ID,1
|
||||
jno .Lesa3
|
||||
lmh %r6,%r15,96(%r15) # store upper register halves
|
||||
lgfr %r2,%r2 # sign extend return value
|
||||
lmh %r6,%r15,96(%r15) # restore upper register halves
|
||||
ahi %r15,80
|
||||
.Lesa3:
|
||||
#endif
|
||||
|
@@ -810,6 +810,9 @@ static void __init setup_hwcaps(void)
|
||||
case 0x2828:
|
||||
strcpy(elf_platform, "zEC12");
|
||||
break;
|
||||
case 0x2964:
|
||||
strcpy(elf_platform, "z13");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -71,9 +71,30 @@ struct pcpu {
|
||||
};
|
||||
|
||||
static u8 boot_cpu_type;
|
||||
static u16 boot_cpu_address;
|
||||
static struct pcpu pcpu_devices[NR_CPUS];
|
||||
|
||||
unsigned int smp_cpu_mt_shift;
|
||||
EXPORT_SYMBOL(smp_cpu_mt_shift);
|
||||
|
||||
unsigned int smp_cpu_mtid;
|
||||
EXPORT_SYMBOL(smp_cpu_mtid);
|
||||
|
||||
static unsigned int smp_max_threads __initdata = -1U;
|
||||
|
||||
static int __init early_nosmt(char *s)
|
||||
{
|
||||
smp_max_threads = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("nosmt", early_nosmt);
|
||||
|
||||
static int __init early_smt(char *s)
|
||||
{
|
||||
get_option(&s, &smp_max_threads);
|
||||
return 0;
|
||||
}
|
||||
early_param("smt", early_smt);
|
||||
|
||||
/*
|
||||
* The smp_cpu_state_mutex must be held when changing the state or polarization
|
||||
* member of a pcpu data structure within the pcpu_devices arreay.
|
||||
@@ -132,7 +153,7 @@ static inline int pcpu_running(struct pcpu *pcpu)
|
||||
/*
|
||||
* Find struct pcpu by cpu address.
|
||||
*/
|
||||
static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
|
||||
static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@@ -298,6 +319,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
|
||||
for (;;) ;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable additional logical cpus for multi-threading.
|
||||
*/
|
||||
static int pcpu_set_smt(unsigned int mtid)
|
||||
{
|
||||
register unsigned long reg1 asm ("1") = (unsigned long) mtid;
|
||||
int cc;
|
||||
|
||||
if (smp_cpu_mtid == mtid)
|
||||
return 0;
|
||||
asm volatile(
|
||||
" sigp %1,0,%2 # sigp set multi-threading\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
: "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
|
||||
: "cc");
|
||||
if (cc == 0) {
|
||||
smp_cpu_mtid = mtid;
|
||||
smp_cpu_mt_shift = 0;
|
||||
while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
|
||||
smp_cpu_mt_shift++;
|
||||
pcpu_devices[0].address = stap();
|
||||
}
|
||||
return cc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call function on an online CPU.
|
||||
*/
|
||||
@@ -512,22 +559,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
|
||||
static void __init smp_get_save_area(int cpu, u16 address)
|
||||
static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
|
||||
{
|
||||
void *lc = pcpu_devices[0].lowcore;
|
||||
struct save_area_ext *sa_ext;
|
||||
unsigned long vx_sa;
|
||||
|
||||
if (is_kdump_kernel())
|
||||
return;
|
||||
if (!OLDMEM_BASE && (address == boot_cpu_address ||
|
||||
ipl_info.type != IPL_TYPE_FCP_DUMP))
|
||||
return;
|
||||
sa_ext = dump_save_area_create(cpu);
|
||||
if (!sa_ext)
|
||||
panic("could not allocate memory for save area\n");
|
||||
if (address == boot_cpu_address) {
|
||||
/* Copy the registers of the boot cpu. */
|
||||
if (is_boot_cpu) {
|
||||
/* Copy the registers of the boot CPU. */
|
||||
copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
|
||||
SAVE_AREA_BASE - PAGE_SIZE, 0);
|
||||
if (MACHINE_HAS_VX)
|
||||
@@ -548,6 +590,64 @@ static void __init smp_get_save_area(int cpu, u16 address)
|
||||
free_page(vx_sa);
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect CPU state of the previous, crashed system.
|
||||
* There are four cases:
|
||||
* 1) standard zfcp dump
|
||||
* condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
|
||||
* The state for all CPUs except the boot CPU needs to be collected
|
||||
* with sigp stop-and-store-status. The boot CPU state is located in
|
||||
* the absolute lowcore of the memory stored in the HSA. The zcore code
|
||||
* will allocate the save area and copy the boot CPU state from the HSA.
|
||||
* 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
|
||||
* condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
|
||||
* The state for all CPUs except the boot CPU needs to be collected
|
||||
* with sigp stop-and-store-status. The firmware or the boot-loader
|
||||
* stored the registers of the boot CPU in the absolute lowcore in the
|
||||
* memory of the old system.
|
||||
* 3) kdump and the old kernel did not store the CPU state,
|
||||
* or stand-alone kdump for DASD
|
||||
* condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
|
||||
* The state for all CPUs except the boot CPU needs to be collected
|
||||
* with sigp stop-and-store-status. The kexec code or the boot-loader
|
||||
* stored the registers of the boot CPU in the memory of the old system.
|
||||
* 4) kdump and the old kernel stored the CPU state
|
||||
* condition: OLDMEM_BASE != NULL && is_kdump_kernel()
|
||||
* The state of all CPUs is stored in ELF sections in the memory of the
|
||||
* old system. The ELF sections are picked up by the crash_dump code
|
||||
* via elfcorehdr_addr.
|
||||
*/
|
||||
static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
|
||||
{
|
||||
unsigned int cpu, address, i, j;
|
||||
int is_boot_cpu;
|
||||
|
||||
if (is_kdump_kernel())
|
||||
/* Previous system stored the CPU states. Nothing to do. */
|
||||
return;
|
||||
if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
|
||||
/* No previous system present, normal boot. */
|
||||
return;
|
||||
/* Set multi-threading state to the previous system. */
|
||||
pcpu_set_smt(sclp_get_mtid_prev());
|
||||
/* Collect CPU states. */
|
||||
cpu = 0;
|
||||
for (i = 0; i < info->configured; i++) {
|
||||
/* Skip CPUs with different CPU type. */
|
||||
if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
|
||||
continue;
|
||||
for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
|
||||
address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
|
||||
is_boot_cpu = (address == pcpu_devices[0].address);
|
||||
if (is_boot_cpu && !OLDMEM_BASE)
|
||||
/* Skip boot CPU for standard zfcp dump. */
|
||||
continue;
|
||||
/* Get state for this CPu. */
|
||||
__smp_store_cpu_state(cpu, address, is_boot_cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int smp_store_status(int cpu)
|
||||
{
|
||||
unsigned long vx_sa;
|
||||
@@ -565,10 +665,6 @@ int smp_store_status(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_CRASH_DUMP */
|
||||
|
||||
static inline void smp_get_save_area(int cpu, u16 address) { }
|
||||
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
|
||||
void smp_cpu_set_polarization(int cpu, int val)
|
||||
@@ -590,11 +686,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
|
||||
use_sigp_detection = 1;
|
||||
for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
|
||||
for (address = 0; address <= MAX_CPU_ADDRESS;
|
||||
address += (1U << smp_cpu_mt_shift)) {
|
||||
if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
|
||||
SIGP_CC_NOT_OPERATIONAL)
|
||||
continue;
|
||||
info->cpu[info->configured].address = address;
|
||||
info->cpu[info->configured].core_id =
|
||||
address >> smp_cpu_mt_shift;
|
||||
info->configured++;
|
||||
}
|
||||
info->combined = info->configured;
|
||||
@@ -608,7 +706,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
|
||||
{
|
||||
struct pcpu *pcpu;
|
||||
cpumask_t avail;
|
||||
int cpu, nr, i;
|
||||
int cpu, nr, i, j;
|
||||
u16 address;
|
||||
|
||||
nr = 0;
|
||||
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
|
||||
@@ -616,51 +715,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
|
||||
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
|
||||
if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
|
||||
continue;
|
||||
if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
|
||||
continue;
|
||||
pcpu = pcpu_devices + cpu;
|
||||
pcpu->address = info->cpu[i].address;
|
||||
pcpu->state = (i >= info->configured) ?
|
||||
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
|
||||
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
|
||||
set_cpu_present(cpu, true);
|
||||
if (sysfs_add && smp_add_present_cpu(cpu) != 0)
|
||||
set_cpu_present(cpu, false);
|
||||
else
|
||||
nr++;
|
||||
cpu = cpumask_next(cpu, &avail);
|
||||
address = info->cpu[i].core_id << smp_cpu_mt_shift;
|
||||
for (j = 0; j <= smp_cpu_mtid; j++) {
|
||||
if (pcpu_find_address(cpu_present_mask, address + j))
|
||||
continue;
|
||||
pcpu = pcpu_devices + cpu;
|
||||
pcpu->address = address + j;
|
||||
pcpu->state =
|
||||
(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
|
||||
CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
|
||||
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
|
||||
set_cpu_present(cpu, true);
|
||||
if (sysfs_add && smp_add_present_cpu(cpu) != 0)
|
||||
set_cpu_present(cpu, false);
|
||||
else
|
||||
nr++;
|
||||
cpu = cpumask_next(cpu, &avail);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return nr;
|
||||
}
|
||||
|
||||
static void __init smp_detect_cpus(void)
|
||||
{
|
||||
unsigned int cpu, c_cpus, s_cpus;
|
||||
unsigned int cpu, mtid, c_cpus, s_cpus;
|
||||
struct sclp_cpu_info *info;
|
||||
u16 address;
|
||||
|
||||
/* Get CPU information */
|
||||
info = smp_get_cpu_info();
|
||||
if (!info)
|
||||
panic("smp_detect_cpus failed to allocate memory\n");
|
||||
|
||||
/* Find boot CPU type */
|
||||
if (info->has_cpu_type) {
|
||||
for (cpu = 0; cpu < info->combined; cpu++) {
|
||||
if (info->cpu[cpu].address != boot_cpu_address)
|
||||
continue;
|
||||
/* The boot cpu dictates the cpu type. */
|
||||
boot_cpu_type = info->cpu[cpu].type;
|
||||
break;
|
||||
}
|
||||
address = stap();
|
||||
for (cpu = 0; cpu < info->combined; cpu++)
|
||||
if (info->cpu[cpu].core_id == address) {
|
||||
/* The boot cpu dictates the cpu type. */
|
||||
boot_cpu_type = info->cpu[cpu].type;
|
||||
break;
|
||||
}
|
||||
if (cpu >= info->combined)
|
||||
panic("Could not find boot CPU type");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
/* Collect CPU state of previous system */
|
||||
smp_store_cpu_states(info);
|
||||
#endif
|
||||
|
||||
/* Set multi-threading state for the current system */
|
||||
mtid = sclp_get_mtid(boot_cpu_type);
|
||||
mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
|
||||
pcpu_set_smt(mtid);
|
||||
|
||||
/* Print number of CPUs */
|
||||
c_cpus = s_cpus = 0;
|
||||
for (cpu = 0; cpu < info->combined; cpu++) {
|
||||
if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
|
||||
continue;
|
||||
if (cpu < info->configured) {
|
||||
smp_get_save_area(c_cpus, info->cpu[cpu].address);
|
||||
c_cpus++;
|
||||
} else
|
||||
s_cpus++;
|
||||
if (cpu < info->configured)
|
||||
c_cpus += smp_cpu_mtid + 1;
|
||||
else
|
||||
s_cpus += smp_cpu_mtid + 1;
|
||||
}
|
||||
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
|
||||
|
||||
/* Add CPUs present at boot */
|
||||
get_online_cpus();
|
||||
__smp_rescan_cpus(info, 0);
|
||||
put_online_cpus();
|
||||
@@ -696,12 +820,23 @@ static void smp_start_secondary(void *cpuvoid)
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
struct pcpu *pcpu;
|
||||
int rc;
|
||||
int base, i, rc;
|
||||
|
||||
pcpu = pcpu_devices + cpu;
|
||||
if (pcpu->state != CPU_STATE_CONFIGURED)
|
||||
return -EIO;
|
||||
if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
|
||||
base = cpu - (cpu % (smp_cpu_mtid + 1));
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
if (base + i < nr_cpu_ids)
|
||||
if (cpu_online(base + i))
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* If this is the first CPU of the core to get online
|
||||
* do an initial CPU reset.
|
||||
*/
|
||||
if (i > smp_cpu_mtid &&
|
||||
pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
|
||||
SIGP_CC_ORDER_CODE_ACCEPTED)
|
||||
return -EIO;
|
||||
|
||||
@@ -774,7 +909,8 @@ void __init smp_fill_possible_mask(void)
|
||||
{
|
||||
unsigned int possible, sclp, cpu;
|
||||
|
||||
sclp = sclp_get_max_cpu() ?: nr_cpu_ids;
|
||||
sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
|
||||
sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
|
||||
possible = setup_possible_cpus ?: nr_cpu_ids;
|
||||
possible = min(possible, sclp);
|
||||
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
|
||||
@@ -796,9 +932,8 @@ void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
struct pcpu *pcpu = pcpu_devices;
|
||||
|
||||
boot_cpu_address = stap();
|
||||
pcpu->state = CPU_STATE_CONFIGURED;
|
||||
pcpu->address = boot_cpu_address;
|
||||
pcpu->address = stap();
|
||||
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
|
||||
pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
|
||||
+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
|
||||
@@ -848,7 +983,7 @@ static ssize_t cpu_configure_store(struct device *dev,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct pcpu *pcpu;
|
||||
int cpu, val, rc;
|
||||
int cpu, val, rc, i;
|
||||
char delim;
|
||||
|
||||
if (sscanf(buf, "%d %c", &val, &delim) != 1)
|
||||
@@ -860,29 +995,43 @@ static ssize_t cpu_configure_store(struct device *dev,
|
||||
rc = -EBUSY;
|
||||
/* disallow configuration changes of online cpus and cpu 0 */
|
||||
cpu = dev->id;
|
||||
if (cpu_online(cpu) || cpu == 0)
|
||||
cpu -= cpu % (smp_cpu_mtid + 1);
|
||||
if (cpu == 0)
|
||||
goto out;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++)
|
||||
if (cpu_online(cpu + i))
|
||||
goto out;
|
||||
pcpu = pcpu_devices + cpu;
|
||||
rc = 0;
|
||||
switch (val) {
|
||||
case 0:
|
||||
if (pcpu->state != CPU_STATE_CONFIGURED)
|
||||
break;
|
||||
rc = sclp_cpu_deconfigure(pcpu->address);
|
||||
rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
|
||||
if (rc)
|
||||
break;
|
||||
pcpu->state = CPU_STATE_STANDBY;
|
||||
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
|
||||
continue;
|
||||
pcpu[i].state = CPU_STATE_STANDBY;
|
||||
smp_cpu_set_polarization(cpu + i,
|
||||
POLARIZATION_UNKNOWN);
|
||||
}
|
||||
topology_expect_change();
|
||||
break;
|
||||
case 1:
|
||||
if (pcpu->state != CPU_STATE_STANDBY)
|
||||
break;
|
||||
rc = sclp_cpu_configure(pcpu->address);
|
||||
rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
|
||||
if (rc)
|
||||
break;
|
||||
pcpu->state = CPU_STATE_CONFIGURED;
|
||||
smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
|
||||
continue;
|
||||
pcpu[i].state = CPU_STATE_CONFIGURED;
|
||||
smp_cpu_set_polarization(cpu + i,
|
||||
POLARIZATION_UNKNOWN);
|
||||
}
|
||||
topology_expect_change();
|
||||
break;
|
||||
default:
|
||||
|
@@ -194,6 +194,14 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
|
||||
seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
|
||||
seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
|
||||
seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
|
||||
if (info->mt_installed & 0x80) {
|
||||
seq_printf(m, "LPAR CPUs G-MTID: %d\n",
|
||||
info->mt_general & 0x1f);
|
||||
seq_printf(m, "LPAR CPUs S-MTID: %d\n",
|
||||
info->mt_installed & 0x1f);
|
||||
seq_printf(m, "LPAR CPUs PS-MTID: %d\n",
|
||||
info->mt_psmtid & 0x1f);
|
||||
}
|
||||
}
|
||||
|
||||
static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
|
||||
|
@@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
|
||||
return mask;
|
||||
}
|
||||
|
||||
static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
|
||||
static cpumask_t cpu_thread_map(unsigned int cpu)
|
||||
{
|
||||
cpumask_t mask;
|
||||
int i;
|
||||
|
||||
cpumask_copy(&mask, cpumask_of(cpu));
|
||||
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
|
||||
return mask;
|
||||
cpu -= cpu % (smp_cpu_mtid + 1);
|
||||
for (i = 0; i <= smp_cpu_mtid; i++)
|
||||
if (cpu_present(cpu + i))
|
||||
cpumask_set_cpu(cpu + i, &mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
|
||||
struct mask_info *book,
|
||||
struct mask_info *socket,
|
||||
int one_socket_per_cpu)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned int core;
|
||||
|
||||
for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
|
||||
unsigned int rcpu;
|
||||
int lcpu;
|
||||
for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
|
||||
unsigned int rcore;
|
||||
int lcpu, i;
|
||||
|
||||
rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
|
||||
lcpu = smp_find_processor_id(rcpu);
|
||||
rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
|
||||
lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
|
||||
if (lcpu < 0)
|
||||
continue;
|
||||
cpumask_set_cpu(lcpu, &book->mask);
|
||||
cpu_topology[lcpu].book_id = book->id;
|
||||
cpumask_set_cpu(lcpu, &socket->mask);
|
||||
cpu_topology[lcpu].core_id = rcpu;
|
||||
if (one_socket_per_cpu) {
|
||||
cpu_topology[lcpu].socket_id = rcpu;
|
||||
socket = socket->next;
|
||||
} else {
|
||||
cpu_topology[lcpu].socket_id = socket->id;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
cpu_topology[lcpu + i].book_id = book->id;
|
||||
cpu_topology[lcpu + i].core_id = rcore;
|
||||
cpu_topology[lcpu + i].thread_id = lcpu + i;
|
||||
cpumask_set_cpu(lcpu + i, &book->mask);
|
||||
cpumask_set_cpu(lcpu + i, &socket->mask);
|
||||
if (one_socket_per_cpu)
|
||||
cpu_topology[lcpu + i].socket_id = rcore;
|
||||
else
|
||||
cpu_topology[lcpu + i].socket_id = socket->id;
|
||||
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
|
||||
}
|
||||
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
|
||||
if (one_socket_per_cpu)
|
||||
socket = socket->next;
|
||||
}
|
||||
return socket;
|
||||
}
|
||||
@@ -108,7 +126,7 @@ static void clear_masks(void)
|
||||
static union topology_entry *next_tle(union topology_entry *tle)
|
||||
{
|
||||
if (!tle->nl)
|
||||
return (union topology_entry *)((struct topology_cpu *)tle + 1);
|
||||
return (union topology_entry *)((struct topology_core *)tle + 1);
|
||||
return (union topology_entry *)((struct topology_container *)tle + 1);
|
||||
}
|
||||
|
||||
@@ -231,9 +249,11 @@ static void update_cpu_masks(void)
|
||||
|
||||
spin_lock_irqsave(&topology_lock, flags);
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
|
||||
cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
|
||||
cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
|
||||
if (!MACHINE_HAS_TOPOLOGY) {
|
||||
cpu_topology[cpu].thread_id = cpu;
|
||||
cpu_topology[cpu].core_id = cpu;
|
||||
cpu_topology[cpu].socket_id = cpu;
|
||||
cpu_topology[cpu].book_id = cpu;
|
||||
@@ -445,6 +465,12 @@ int topology_cpu_init(struct cpu *cpu)
|
||||
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
|
||||
}
|
||||
|
||||
const struct cpumask *cpu_thread_mask(int cpu)
|
||||
{
|
||||
return &cpu_topology[cpu].thread_mask;
|
||||
}
|
||||
|
||||
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
{
|
||||
return &cpu_topology[cpu].core_mask;
|
||||
@@ -456,6 +482,7 @@ static const struct cpumask *cpu_book_mask(int cpu)
|
||||
}
|
||||
|
||||
static struct sched_domain_topology_level s390_topology[] = {
|
||||
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
|
||||
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
|
||||
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
|
||||
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
|
||||
|
@@ -15,6 +15,8 @@
|
||||
#include <asm/cputime.h>
|
||||
#include <asm/vtimer.h>
|
||||
#include <asm/vtime.h>
|
||||
#include <asm/cpu_mf.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
static void virt_timer_expire(void);
|
||||
|
||||
@@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock);
|
||||
static atomic64_t virt_timer_current;
|
||||
static atomic64_t virt_timer_elapsed;
|
||||
|
||||
static DEFINE_PER_CPU(u64, mt_cycles[32]);
|
||||
static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
|
||||
static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
|
||||
|
||||
static inline u64 get_vtimer(void)
|
||||
{
|
||||
u64 timer;
|
||||
@@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, clock, user, system, steal;
|
||||
u64 user_scaled, system_scaled;
|
||||
int i;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
clock = S390_lowcore.last_update_clock;
|
||||
@@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
||||
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
|
||||
|
||||
/* Do MT utilization calculation */
|
||||
if (smp_cpu_mtid) {
|
||||
u64 cycles_new[32], *cycles_old;
|
||||
u64 delta, mult, div;
|
||||
|
||||
cycles_old = this_cpu_ptr(mt_cycles);
|
||||
if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
|
||||
mult = div = 0;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
delta = cycles_new[i] - cycles_old[i];
|
||||
mult += delta;
|
||||
div += (i + 1) * delta;
|
||||
}
|
||||
if (mult > 0) {
|
||||
/* Update scaling factor */
|
||||
__this_cpu_write(mt_scaling_mult, mult);
|
||||
__this_cpu_write(mt_scaling_div, div);
|
||||
memcpy(cycles_old, cycles_new,
|
||||
sizeof(u64) * (smp_cpu_mtid + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
user = S390_lowcore.user_timer - ti->user_timer;
|
||||
S390_lowcore.steal_timer -= user;
|
||||
ti->user_timer = S390_lowcore.user_timer;
|
||||
account_user_time(tsk, user, user);
|
||||
|
||||
system = S390_lowcore.system_timer - ti->system_timer;
|
||||
S390_lowcore.steal_timer -= system;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
account_system_time(tsk, hardirq_offset, system, system);
|
||||
|
||||
user_scaled = user;
|
||||
system_scaled = system;
|
||||
/* Do MT utilization scaling */
|
||||
if (smp_cpu_mtid) {
|
||||
u64 mult = __this_cpu_read(mt_scaling_mult);
|
||||
u64 div = __this_cpu_read(mt_scaling_div);
|
||||
|
||||
user_scaled = (user_scaled * mult) / div;
|
||||
system_scaled = (system_scaled * mult) / div;
|
||||
}
|
||||
account_user_time(tsk, user, user_scaled);
|
||||
account_system_time(tsk, hardirq_offset, system, system_scaled);
|
||||
|
||||
steal = S390_lowcore.steal_timer;
|
||||
if ((s64) steal > 0) {
|
||||
@@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk)
|
||||
void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, system;
|
||||
u64 timer, system, system_scaled;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
S390_lowcore.last_update_timer = get_vtimer();
|
||||
@@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
system = S390_lowcore.system_timer - ti->system_timer;
|
||||
S390_lowcore.steal_timer -= system;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
account_system_time(tsk, 0, system, system);
|
||||
system_scaled = system;
|
||||
/* Do MT utilization scaling */
|
||||
if (smp_cpu_mtid) {
|
||||
u64 mult = __this_cpu_read(mt_scaling_mult);
|
||||
u64 div = __this_cpu_read(mt_scaling_div);
|
||||
|
||||
system_scaled = (system_scaled * mult) / div;
|
||||
}
|
||||
account_system_time(tsk, 0, system, system_scaled);
|
||||
|
||||
virt_timer_forward(system);
|
||||
}
|
||||
|
Reference in New Issue
Block a user