Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The main bulk of the s390 patches for the 4.10 merge window: - Add support for the contiguous memory allocator. - The recovery for I/O errors in the dasd device driver is improved, the driver will now remove channel paths that are not working properly. - Additional fields are added to /proc/sysinfo, the extended partition name and the partition UUID. - New naming for PCI devices with system defined UIDs. - The last few remaining alloc_bootmem calls are converted to memblock. - The thread_info structure is stripped down and moved to the task_struct. The only field left in thread_info is the flags field. - Rework of the arch topology code to fix a fake numa issue. - Refactoring of the atomic primitives and add a new preempt_count implementation. - Clocksource steering for the STP sync check offsets. - The s390 specific headers are changed to make them usable with CLANG. - Bug fixes and cleanup" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (70 commits) s390/cpumf: Use configuration level indication for sampling data s390: provide memmove implementation s390: cleanup arch/s390/kernel Makefile s390: fix initrd corruptions with gcov/kcov instrumented kernels s390: exclude early C code from gcov profiling s390/dasd: channel path aware error recovery s390/dasd: extend dasd path handling s390: remove unused labels from entry.S s390/vmlogrdr: fix IUCV buffer allocation s390/crypto: unlock on error in prng_tdes_read() s390/sysinfo: show partition extended name and UUID if available s390/numa: pin all possible cpus to nodes early s390/numa: establish cpu to node mapping early s390/topology: use cpu_topology array instead of per cpu variable s390/smp: initialize cpu_present_mask in setup_arch s390/topology: always use s390 specific sched_domain_topology_level s390/smp: use smp_get_base_cpu() helper function s390/numa: always use logical cpu and core ids s390: Remove VLAIS in ptff() and clear_table() s390: fix machine check panic stack switch ...
This commit is contained in:
@@ -2,20 +2,47 @@
|
||||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
KCOV_INSTRUMENT_early.o := n
|
||||
KCOV_INSTRUMENT_sclp.o := n
|
||||
KCOV_INSTRUMENT_als.o := n
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Don't trace early setup code and tracing code
|
||||
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
# Do not trace tracer code
|
||||
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
# Do not trace early setup code
|
||||
CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
endif
|
||||
|
||||
GCOV_PROFILE_als.o := n
|
||||
GCOV_PROFILE_early.o := n
|
||||
GCOV_PROFILE_sclp.o := n
|
||||
|
||||
KCOV_INSTRUMENT_als.o := n
|
||||
KCOV_INSTRUMENT_early.o := n
|
||||
KCOV_INSTRUMENT_sclp.o := n
|
||||
|
||||
UBSAN_SANITIZE_als.o := n
|
||||
UBSAN_SANITIZE_early.o := n
|
||||
UBSAN_SANITIZE_sclp.o := n
|
||||
|
||||
#
|
||||
# Use -march=z900 for sclp.c and als.c to be able to print an error
|
||||
# message if the kernel is started on a machine which is too old
|
||||
#
|
||||
ifneq ($(CC_FLAGS_MARCH),-march=z900)
|
||||
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
|
||||
CFLAGS_als.o += -march=z900
|
||||
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
|
||||
CFLAGS_sclp.o += -march=z900
|
||||
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
|
||||
AFLAGS_head.o += -march=z900
|
||||
endif
|
||||
|
||||
#
|
||||
# Passing null pointers is ok for smp code, since we access the lowcore here.
|
||||
#
|
||||
CFLAGS_smp.o := -Wno-nonnull
|
||||
CFLAGS_smp.o := -Wno-nonnull
|
||||
|
||||
#
|
||||
# Disable tailcall optimizations for stack / callchain walking functions
|
||||
@@ -30,27 +57,7 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
|
||||
#
|
||||
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
||||
|
||||
CFLAGS_sysinfo.o += -w
|
||||
|
||||
#
|
||||
# Use -march=z900 for sclp.c and als.c to be able to print an error
|
||||
# message if the kernel is started on a machine which is too old
|
||||
#
|
||||
CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
|
||||
ifneq ($(CC_FLAGS_MARCH),-march=z900)
|
||||
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
|
||||
CFLAGS_sclp.o += -march=z900
|
||||
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
|
||||
CFLAGS_als.o += -march=z900
|
||||
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
|
||||
AFLAGS_head.o += -march=z900
|
||||
endif
|
||||
GCOV_PROFILE_sclp.o := n
|
||||
GCOV_PROFILE_als.o := n
|
||||
UBSAN_SANITIZE_als.o := n
|
||||
UBSAN_SANITIZE_early.o := n
|
||||
UBSAN_SANITIZE_sclp.o := n
|
||||
CFLAGS_sysinfo.o += -w
|
||||
|
||||
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
|
||||
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
|
||||
|
@@ -25,12 +25,14 @@
|
||||
int main(void)
|
||||
{
|
||||
/* task struct offsets */
|
||||
OFFSET(__TASK_thread_info, task_struct, stack);
|
||||
OFFSET(__TASK_stack, task_struct, stack);
|
||||
OFFSET(__TASK_thread, task_struct, thread);
|
||||
OFFSET(__TASK_pid, task_struct, pid);
|
||||
BLANK();
|
||||
/* thread struct offsets */
|
||||
OFFSET(__THREAD_ksp, thread_struct, ksp);
|
||||
OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table);
|
||||
OFFSET(__THREAD_last_break, thread_struct, last_break);
|
||||
OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
|
||||
OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
|
||||
OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
|
||||
@@ -39,14 +41,7 @@ int main(void)
|
||||
OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
|
||||
BLANK();
|
||||
/* thread info offsets */
|
||||
OFFSET(__TI_task, thread_info, task);
|
||||
OFFSET(__TI_flags, thread_info, flags);
|
||||
OFFSET(__TI_sysc_table, thread_info, sys_call_table);
|
||||
OFFSET(__TI_cpu, thread_info, cpu);
|
||||
OFFSET(__TI_precount, thread_info, preempt_count);
|
||||
OFFSET(__TI_user_timer, thread_info, user_timer);
|
||||
OFFSET(__TI_system_timer, thread_info, system_timer);
|
||||
OFFSET(__TI_last_break, thread_info, last_break);
|
||||
OFFSET(__TI_flags, task_struct, thread_info.flags);
|
||||
BLANK();
|
||||
/* pt_regs offsets */
|
||||
OFFSET(__PT_ARGS, pt_regs, args);
|
||||
@@ -79,6 +74,8 @@ int main(void)
|
||||
OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
|
||||
OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
|
||||
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
|
||||
OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
|
||||
OFFSET(__VDSO_TS_END, vdso_data, ts_end);
|
||||
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
|
||||
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
|
||||
OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
|
||||
@@ -159,7 +156,6 @@ int main(void)
|
||||
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
|
||||
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
|
||||
OFFSET(__LC_CURRENT, lowcore, current_task);
|
||||
OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
|
||||
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
|
||||
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
|
||||
OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
|
||||
@@ -173,6 +169,7 @@ int main(void)
|
||||
OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
|
||||
OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
|
||||
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
|
||||
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
|
||||
OFFSET(__LC_GMAP, lowcore, gmap);
|
||||
OFFSET(__LC_PASTE, lowcore, paste);
|
||||
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
|
||||
|
@@ -446,7 +446,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
|
||||
/* set extra registers only for synchronous signals */
|
||||
regs->gprs[4] = regs->int_code & 127;
|
||||
regs->gprs[5] = regs->int_parm_long;
|
||||
regs->gprs[6] = task_thread_info(current)->last_break;
|
||||
regs->gprs[6] = current->thread.last_break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -523,7 +523,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
|
||||
regs->gprs[2] = ksig->sig;
|
||||
regs->gprs[3] = (__force __u64) &frame->info;
|
||||
regs->gprs[4] = (__force __u64) &frame->uc;
|
||||
regs->gprs[5] = task_thread_info(current)->last_break;
|
||||
regs->gprs[5] = current->thread.last_break;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void)
|
||||
psw.addr = (unsigned long) s390_base_pgm_handler;
|
||||
S390_lowcore.program_new_psw = psw;
|
||||
s390_base_pgm_handler_fn = early_pgm_check_handler;
|
||||
S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
|
||||
}
|
||||
|
||||
static noinline __init void setup_facility_list(void)
|
||||
@@ -391,7 +392,49 @@ static int __init cad_init(void)
|
||||
}
|
||||
early_initcall(cad_init);
|
||||
|
||||
static __init void rescue_initrd(void)
|
||||
static __init void memmove_early(void *dst, const void *src, size_t n)
|
||||
{
|
||||
unsigned long addr;
|
||||
long incr;
|
||||
psw_t old;
|
||||
|
||||
if (!n)
|
||||
return;
|
||||
incr = 1;
|
||||
if (dst > src) {
|
||||
incr = -incr;
|
||||
dst += n - 1;
|
||||
src += n - 1;
|
||||
}
|
||||
old = S390_lowcore.program_new_psw;
|
||||
S390_lowcore.program_new_psw.mask = __extract_psw();
|
||||
asm volatile(
|
||||
" larl %[addr],1f\n"
|
||||
" stg %[addr],%[psw_pgm_addr]\n"
|
||||
"0: mvc 0(1,%[dst]),0(%[src])\n"
|
||||
" agr %[dst],%[incr]\n"
|
||||
" agr %[src],%[incr]\n"
|
||||
" brctg %[n],0b\n"
|
||||
"1:\n"
|
||||
: [addr] "=&d" (addr),
|
||||
[psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr),
|
||||
[dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
|
||||
: [incr] "d" (incr)
|
||||
: "cc", "memory");
|
||||
S390_lowcore.program_new_psw = old;
|
||||
}
|
||||
|
||||
static __init noinline void ipl_save_parameters(void)
|
||||
{
|
||||
void *src, *dst;
|
||||
|
||||
src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
|
||||
dst = (void *) IPL_PARMBLOCK_ORIGIN;
|
||||
memmove_early(dst, src, PAGE_SIZE);
|
||||
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
|
||||
}
|
||||
|
||||
static __init noinline void rescue_initrd(void)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
|
||||
@@ -405,7 +448,7 @@ static __init void rescue_initrd(void)
|
||||
return;
|
||||
if (INITRD_START >= min_initrd_addr)
|
||||
return;
|
||||
memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
|
||||
memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
|
||||
INITRD_START = min_initrd_addr;
|
||||
#endif
|
||||
}
|
||||
@@ -467,7 +510,8 @@ void __init startup_init(void)
|
||||
ipl_save_parameters();
|
||||
rescue_initrd();
|
||||
clear_bss_section();
|
||||
ptff_init();
|
||||
ipl_verify_parameters();
|
||||
time_early_init();
|
||||
init_kernel_storage_key();
|
||||
lockdep_off();
|
||||
setup_lowcore_early();
|
||||
|
@@ -42,7 +42,7 @@ __PT_R13 = __PT_GPRS + 104
|
||||
__PT_R14 = __PT_GPRS + 112
|
||||
__PT_R15 = __PT_GPRS + 120
|
||||
|
||||
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
|
||||
STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
|
||||
STACK_SIZE = 1 << STACK_SHIFT
|
||||
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
|
||||
|
||||
@@ -123,8 +123,14 @@ _PIF_WORK = (_PIF_PER_TRAP)
|
||||
|
||||
.macro LAST_BREAK scratch
|
||||
srag \scratch,%r10,23
|
||||
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
|
||||
jz .+10
|
||||
stg %r10,__TI_last_break(%r12)
|
||||
stg %r10,__TASK_thread+__THREAD_last_break(%r12)
|
||||
#else
|
||||
jz .+14
|
||||
lghi \scratch,__TASK_thread
|
||||
stg %r10,__THREAD_last_break(\scratch,%r12)
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro REENABLE_IRQS
|
||||
@@ -186,14 +192,13 @@ ENTRY(__switch_to)
|
||||
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
|
||||
lgr %r1,%r2
|
||||
aghi %r1,__TASK_thread # thread_struct of prev task
|
||||
lg %r5,__TASK_thread_info(%r3) # get thread_info of next
|
||||
lg %r5,__TASK_stack(%r3) # start of kernel stack of next
|
||||
stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
|
||||
lgr %r1,%r3
|
||||
aghi %r1,__TASK_thread # thread_struct of next task
|
||||
lgr %r15,%r5
|
||||
aghi %r15,STACK_INIT # end of kernel stack of next
|
||||
stg %r3,__LC_CURRENT # store task struct of next
|
||||
stg %r5,__LC_THREAD_INFO # store thread info of next
|
||||
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
|
||||
lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
|
||||
/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
|
||||
@@ -274,7 +279,7 @@ ENTRY(system_call)
|
||||
.Lsysc_stmg:
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
lghi %r14,_PIF_SYSCALL
|
||||
.Lsysc_per:
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
@@ -288,7 +293,13 @@ ENTRY(system_call)
|
||||
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
|
||||
stg %r14,__PT_FLAGS(%r11)
|
||||
.Lsysc_do_svc:
|
||||
lg %r10,__TI_sysc_table(%r12) # address of system call table
|
||||
# load address of system call table
|
||||
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
|
||||
lg %r10,__TASK_thread+__THREAD_sysc_table(%r12)
|
||||
#else
|
||||
lghi %r13,__TASK_thread
|
||||
lg %r10,__THREAD_sysc_table(%r13,%r12)
|
||||
#endif
|
||||
llgh %r8,__PT_INT_CODE+2(%r11)
|
||||
slag %r8,%r8,2 # shift and test for svc 0
|
||||
jnz .Lsysc_nr_ok
|
||||
@@ -389,7 +400,6 @@ ENTRY(system_call)
|
||||
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
|
||||
jno .Lsysc_return
|
||||
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
|
||||
lg %r10,__TI_sysc_table(%r12) # address of system call table
|
||||
lghi %r8,0 # svc 0 returns -ENOSYS
|
||||
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
|
||||
cghi %r1,NR_syscalls
|
||||
@@ -457,7 +467,7 @@ ENTRY(system_call)
|
||||
#
|
||||
ENTRY(ret_from_fork)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
brasl %r14,schedule_tail
|
||||
TRACE_IRQS_ON
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
@@ -478,7 +488,7 @@ ENTRY(pgm_check_handler)
|
||||
stpt __LC_SYNC_ENTER_TIMER
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_PGM_OLD_PSW
|
||||
tmhh %r8,0x0001 # test problem state bit
|
||||
@@ -501,7 +511,7 @@ ENTRY(pgm_check_handler)
|
||||
2: LAST_BREAK %r14
|
||||
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
lg %r14,__TI_task(%r12)
|
||||
lgr %r14,%r12
|
||||
aghi %r14,__TASK_thread # pointer to thread_struct
|
||||
lghi %r13,__LC_PGM_TDB
|
||||
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
|
||||
@@ -567,7 +577,7 @@ ENTRY(io_int_handler)
|
||||
stpt __LC_ASYNC_ENTER_TIMER
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_IO_OLD_PSW
|
||||
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
|
||||
@@ -626,7 +636,7 @@ ENTRY(io_int_handler)
|
||||
jo .Lio_work_user # yes -> do resched & signal
|
||||
#ifdef CONFIG_PREEMPT
|
||||
# check for preemptive scheduling
|
||||
icm %r0,15,__TI_precount(%r12)
|
||||
icm %r0,15,__LC_PREEMPT_COUNT
|
||||
jnz .Lio_restore # preemption is disabled
|
||||
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
|
||||
jno .Lio_restore
|
||||
@@ -741,7 +751,7 @@ ENTRY(ext_int_handler)
|
||||
stpt __LC_ASYNC_ENTER_TIMER
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_EXT_OLD_PSW
|
||||
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
|
||||
@@ -798,13 +808,10 @@ ENTRY(save_fpu_regs)
|
||||
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
|
||||
bor %r14
|
||||
stfpc __THREAD_FPU_fpc(%r2)
|
||||
.Lsave_fpu_regs_fpc_end:
|
||||
lg %r3,__THREAD_FPU_regs(%r2)
|
||||
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
|
||||
jz .Lsave_fpu_regs_fp # no -> store FP regs
|
||||
.Lsave_fpu_regs_vx_low:
|
||||
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
|
||||
.Lsave_fpu_regs_vx_high:
|
||||
VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
|
||||
j .Lsave_fpu_regs_done # -> set CIF_FPU flag
|
||||
.Lsave_fpu_regs_fp:
|
||||
@@ -851,9 +858,7 @@ load_fpu_regs:
|
||||
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
|
||||
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
|
||||
jz .Lload_fpu_regs_fp # -> no VX, load FP regs
|
||||
.Lload_fpu_regs_vx:
|
||||
VLM %v0,%v15,0,%r4
|
||||
.Lload_fpu_regs_vx_high:
|
||||
VLM %v16,%v31,256,%r4
|
||||
j .Lload_fpu_regs_done
|
||||
.Lload_fpu_regs_fp:
|
||||
@@ -889,7 +894,7 @@ ENTRY(mcck_int_handler)
|
||||
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lg %r12,__LC_CURRENT
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_MCK_OLD_PSW
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
|
||||
@@ -948,7 +953,7 @@ ENTRY(mcck_int_handler)
|
||||
|
||||
.Lmcck_panic:
|
||||
lg %r15,__LC_PANIC_STACK
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
j .Lmcck_skip
|
||||
|
||||
#
|
||||
@@ -1085,7 +1090,7 @@ cleanup_critical:
|
||||
jhe 0f
|
||||
# set up saved registers r10 and r12
|
||||
stg %r10,16(%r11) # r10 last break
|
||||
stg %r12,32(%r11) # r12 thread-info pointer
|
||||
stg %r12,32(%r11) # r12 task struct pointer
|
||||
0: # check if the user time update has been done
|
||||
clg %r9,BASED(.Lcleanup_system_call_insn+24)
|
||||
jh 0f
|
||||
@@ -1106,7 +1111,9 @@ cleanup_critical:
|
||||
lg %r9,16(%r11)
|
||||
srag %r9,%r9,23
|
||||
jz 0f
|
||||
mvc __TI_last_break(8,%r12),16(%r11)
|
||||
lgr %r9,%r12
|
||||
aghi %r9,__TASK_thread
|
||||
mvc __THREAD_last_break(8,%r9),16(%r11)
|
||||
0: # set up saved register r11
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
la %r9,STACK_FRAME_OVERHEAD(%r15)
|
||||
|
@@ -315,7 +315,7 @@ ENTRY(startup_kdump)
|
||||
jg startup_continue
|
||||
|
||||
.Lstack:
|
||||
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
|
||||
.long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
|
||||
.align 8
|
||||
6: .long 0x7fffffff,0xffffffff
|
||||
|
||||
|
@@ -32,11 +32,10 @@ ENTRY(startup_continue)
|
||||
#
|
||||
# Setup stack
|
||||
#
|
||||
larl %r15,init_thread_union
|
||||
stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
|
||||
lg %r14,__TI_task(%r15) # cache current in lowcore
|
||||
larl %r14,init_task
|
||||
stg %r14,__LC_CURRENT
|
||||
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
|
||||
larl %r15,init_thread_union
|
||||
aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
|
||||
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
|
||||
aghi %r15,-160
|
||||
#
|
||||
|
@@ -1991,10 +1991,9 @@ void __init ipl_update_parameters(void)
|
||||
diag308_set_works = 1;
|
||||
}
|
||||
|
||||
void __init ipl_save_parameters(void)
|
||||
void __init ipl_verify_parameters(void)
|
||||
{
|
||||
struct cio_iplinfo iplinfo;
|
||||
void *src, *dst;
|
||||
|
||||
if (cio_get_iplinfo(&iplinfo))
|
||||
return;
|
||||
@@ -2005,10 +2004,6 @@ void __init ipl_save_parameters(void)
|
||||
if (!iplinfo.is_qdio)
|
||||
return;
|
||||
ipl_flags |= IPL_PARMBLOCK_VALID;
|
||||
src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
|
||||
dst = (void *)IPL_PARMBLOCK_ORIGIN;
|
||||
memmove(dst, src, PAGE_SIZE);
|
||||
S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
|
||||
}
|
||||
|
||||
static LIST_HEAD(rcall);
|
||||
|
@@ -168,7 +168,7 @@ void do_softirq_own_stack(void)
|
||||
old = current_stack_pointer();
|
||||
/* Check against async. stack address range. */
|
||||
new = S390_lowcore.async_stack;
|
||||
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
|
||||
if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
|
||||
/* Need to switch to the async. stack. */
|
||||
new -= STACK_FRAME_OVERHEAD;
|
||||
((struct stack_frame *) new)->back_chain = old;
|
||||
|
@@ -5,7 +5,8 @@
|
||||
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/facility.h>
|
||||
@@ -183,4 +184,4 @@ static int __init lgr_init(void)
|
||||
lgr_timer_set();
|
||||
return 0;
|
||||
}
|
||||
module_init(lgr_init);
|
||||
device_initcall(lgr_init);
|
||||
|
@@ -995,39 +995,36 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
|
||||
regs.int_parm = CPU_MF_INT_SF_PRA;
|
||||
sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long;
|
||||
|
||||
regs.psw.addr = sfr->basic.ia;
|
||||
if (sfr->basic.T)
|
||||
regs.psw.mask |= PSW_MASK_DAT;
|
||||
if (sfr->basic.W)
|
||||
regs.psw.mask |= PSW_MASK_WAIT;
|
||||
if (sfr->basic.P)
|
||||
regs.psw.mask |= PSW_MASK_PSTATE;
|
||||
switch (sfr->basic.AS) {
|
||||
case 0x0:
|
||||
regs.psw.mask |= PSW_ASC_PRIMARY;
|
||||
break;
|
||||
case 0x1:
|
||||
regs.psw.mask |= PSW_ASC_ACCREG;
|
||||
break;
|
||||
case 0x2:
|
||||
regs.psw.mask |= PSW_ASC_SECONDARY;
|
||||
break;
|
||||
case 0x3:
|
||||
regs.psw.mask |= PSW_ASC_HOME;
|
||||
break;
|
||||
}
|
||||
psw_bits(regs.psw).ia = sfr->basic.ia;
|
||||
psw_bits(regs.psw).t = sfr->basic.T;
|
||||
psw_bits(regs.psw).w = sfr->basic.W;
|
||||
psw_bits(regs.psw).p = sfr->basic.P;
|
||||
psw_bits(regs.psw).as = sfr->basic.AS;
|
||||
|
||||
/*
|
||||
* A non-zero guest program parameter indicates a guest
|
||||
* sample.
|
||||
* Note that some early samples or samples from guests without
|
||||
* Use the hardware provided configuration level to decide if the
|
||||
* sample belongs to a guest or host. If that is not available,
|
||||
* fall back to the following heuristics:
|
||||
* A non-zero guest program parameter always indicates a guest
|
||||
* sample. Some early samples or samples from guests without
|
||||
* lpp usage would be misaccounted to the host. We use the asn
|
||||
* value as a heuristic to detect most of these guest samples.
|
||||
* If the value differs from the host hpp value, we assume
|
||||
* it to be a KVM guest.
|
||||
* value as an addon heuristic to detect most of these guest samples.
|
||||
* If the value differs from the host hpp value, we assume to be a
|
||||
* KVM guest.
|
||||
*/
|
||||
if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp)
|
||||
switch (sfr->basic.CL) {
|
||||
case 1: /* logical partition */
|
||||
sde_regs->in_guest = 0;
|
||||
break;
|
||||
case 2: /* virtual machine */
|
||||
sde_regs->in_guest = 1;
|
||||
break;
|
||||
default: /* old machine, use heuristics */
|
||||
if (sfr->basic.gpp ||
|
||||
sfr->basic.prim_asn != (u16)sfr->basic.hpp)
|
||||
sde_regs->in_guest = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
overflow = 0;
|
||||
if (perf_exclude_event(event, ®s, sde_regs))
|
||||
|
@@ -103,7 +103,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
unsigned long arg, struct task_struct *p)
|
||||
{
|
||||
struct thread_info *ti;
|
||||
struct fake_frame
|
||||
{
|
||||
struct stack_frame sf;
|
||||
@@ -121,9 +120,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
|
||||
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
|
||||
/* Initialize per thread user and system timer values */
|
||||
ti = task_thread_info(p);
|
||||
ti->user_timer = 0;
|
||||
ti->system_timer = 0;
|
||||
p->thread.user_timer = 0;
|
||||
p->thread.system_timer = 0;
|
||||
|
||||
frame->sf.back_chain = 0;
|
||||
/* new return point is ret_from_fork */
|
||||
|
@@ -461,7 +461,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
}
|
||||
return 0;
|
||||
case PTRACE_GET_LAST_BREAK:
|
||||
put_user(task_thread_info(child)->last_break,
|
||||
put_user(child->thread.last_break,
|
||||
(unsigned long __user *) data);
|
||||
return 0;
|
||||
case PTRACE_ENABLE_TE:
|
||||
@@ -811,7 +811,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
}
|
||||
return 0;
|
||||
case PTRACE_GET_LAST_BREAK:
|
||||
put_user(task_thread_info(child)->last_break,
|
||||
put_user(child->thread.last_break,
|
||||
(unsigned int __user *) data);
|
||||
return 0;
|
||||
}
|
||||
@@ -997,10 +997,10 @@ static int s390_last_break_get(struct task_struct *target,
|
||||
if (count > 0) {
|
||||
if (kbuf) {
|
||||
unsigned long *k = kbuf;
|
||||
*k = task_thread_info(target)->last_break;
|
||||
*k = target->thread.last_break;
|
||||
} else {
|
||||
unsigned long __user *u = ubuf;
|
||||
if (__put_user(task_thread_info(target)->last_break, u))
|
||||
if (__put_user(target->thread.last_break, u))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
@@ -1113,7 +1113,7 @@ static int s390_system_call_get(struct task_struct *target,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
unsigned int *data = &task_thread_info(target)->system_call;
|
||||
unsigned int *data = &target->thread.system_call;
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
data, 0, sizeof(unsigned int));
|
||||
}
|
||||
@@ -1123,7 +1123,7 @@ static int s390_system_call_set(struct task_struct *target,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
unsigned int *data = &task_thread_info(target)->system_call;
|
||||
unsigned int *data = &target->thread.system_call;
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
data, 0, sizeof(unsigned int));
|
||||
}
|
||||
@@ -1327,7 +1327,7 @@ static int s390_compat_last_break_get(struct task_struct *target,
|
||||
compat_ulong_t last_break;
|
||||
|
||||
if (count > 0) {
|
||||
last_break = task_thread_info(target)->last_break;
|
||||
last_break = target->thread.last_break;
|
||||
if (kbuf) {
|
||||
unsigned long *k = kbuf;
|
||||
*k = last_break;
|
||||
|
@@ -35,6 +35,7 @@
|
||||
#include <linux/root_dev.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/pfn.h>
|
||||
@@ -303,7 +304,7 @@ static void __init setup_lowcore(void)
|
||||
* Setup lowcore for boot cpu
|
||||
*/
|
||||
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
|
||||
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
|
||||
lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
|
||||
lc->restart_psw.mask = PSW_KERNEL_BITS;
|
||||
lc->restart_psw.addr = (unsigned long) restart_int_handler;
|
||||
lc->external_new_psw.mask = PSW_KERNEL_BITS |
|
||||
@@ -324,15 +325,15 @@ static void __init setup_lowcore(void)
|
||||
lc->kernel_stack = ((unsigned long) &init_thread_union)
|
||||
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->async_stack = (unsigned long)
|
||||
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
|
||||
memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
|
||||
+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->panic_stack = (unsigned long)
|
||||
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
|
||||
memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
|
||||
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
|
||||
lc->thread_info = (unsigned long) &init_thread_union;
|
||||
lc->current_task = (unsigned long)&init_task;
|
||||
lc->lpp = LPP_MAGIC;
|
||||
lc->machine_flags = S390_lowcore.machine_flags;
|
||||
lc->preempt_count = S390_lowcore.preempt_count;
|
||||
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
|
||||
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
|
||||
MAX_FACILITY_BIT/8);
|
||||
@@ -349,7 +350,7 @@ static void __init setup_lowcore(void)
|
||||
lc->last_update_timer = S390_lowcore.last_update_timer;
|
||||
lc->last_update_clock = S390_lowcore.last_update_clock;
|
||||
|
||||
restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
|
||||
restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
|
||||
restart_stack += ASYNC_SIZE;
|
||||
|
||||
/*
|
||||
@@ -412,7 +413,7 @@ static void __init setup_resources(void)
|
||||
bss_resource.end = (unsigned long) &__bss_stop - 1;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
res = alloc_bootmem_low(sizeof(*res));
|
||||
res = memblock_virt_alloc(sizeof(*res), 8);
|
||||
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
|
||||
|
||||
res->name = "System RAM";
|
||||
@@ -426,7 +427,7 @@ static void __init setup_resources(void)
|
||||
std_res->start > res->end)
|
||||
continue;
|
||||
if (std_res->end > res->end) {
|
||||
sub_res = alloc_bootmem_low(sizeof(*sub_res));
|
||||
sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
|
||||
*sub_res = *std_res;
|
||||
sub_res->end = res->end;
|
||||
std_res->start = res->end + 1;
|
||||
@@ -445,7 +446,7 @@ static void __init setup_resources(void)
|
||||
* part of the System RAM resource.
|
||||
*/
|
||||
if (crashk_res.end) {
|
||||
memblock_add(crashk_res.start, resource_size(&crashk_res));
|
||||
memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
|
||||
memblock_reserve(crashk_res.start, resource_size(&crashk_res));
|
||||
insert_resource(&iomem_resource, &crashk_res);
|
||||
}
|
||||
@@ -903,6 +904,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
setup_memory_end();
|
||||
setup_memory();
|
||||
dma_contiguous_reserve(memory_end);
|
||||
|
||||
check_initrd();
|
||||
reserve_crashkernel();
|
||||
@@ -921,6 +923,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
cpu_detect_mhz_feature();
|
||||
cpu_init();
|
||||
numa_setup();
|
||||
smp_detect_cpus();
|
||||
topology_init_early();
|
||||
|
||||
/*
|
||||
* Create kernel page tables and switch to virtual addressing.
|
||||
|
@@ -359,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
|
||||
/* set extra registers only for synchronous signals */
|
||||
regs->gprs[4] = regs->int_code & 127;
|
||||
regs->gprs[5] = regs->int_parm_long;
|
||||
regs->gprs[6] = task_thread_info(current)->last_break;
|
||||
regs->gprs[6] = current->thread.last_break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -430,7 +430,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
||||
regs->gprs[2] = ksig->sig;
|
||||
regs->gprs[3] = (unsigned long) &frame->info;
|
||||
regs->gprs[4] = (unsigned long) &frame->uc;
|
||||
regs->gprs[5] = task_thread_info(current)->last_break;
|
||||
regs->gprs[5] = current->thread.last_break;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -467,13 +467,13 @@ void do_signal(struct pt_regs *regs)
|
||||
* the debugger may change all our registers, including the system
|
||||
* call information.
|
||||
*/
|
||||
current_thread_info()->system_call =
|
||||
current->thread.system_call =
|
||||
test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
|
||||
|
||||
if (get_signal(&ksig)) {
|
||||
/* Whee! Actually deliver the signal. */
|
||||
if (current_thread_info()->system_call) {
|
||||
regs->int_code = current_thread_info()->system_call;
|
||||
if (current->thread.system_call) {
|
||||
regs->int_code = current->thread.system_call;
|
||||
/* Check for system call restarting. */
|
||||
switch (regs->gprs[2]) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
@@ -506,8 +506,8 @@ void do_signal(struct pt_regs *regs)
|
||||
|
||||
/* No handlers present - check for system call restart */
|
||||
clear_pt_regs_flag(regs, PIF_SYSCALL);
|
||||
if (current_thread_info()->system_call) {
|
||||
regs->int_code = current_thread_info()->system_call;
|
||||
if (current->thread.system_call) {
|
||||
regs->int_code = current->thread.system_call;
|
||||
switch (regs->gprs[2]) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
/* Restart with sys_restart_syscall */
|
||||
|
@@ -19,6 +19,7 @@
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
@@ -259,16 +260,14 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
|
||||
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
|
||||
{
|
||||
struct lowcore *lc = pcpu->lowcore;
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
|
||||
lc->kernel_stack = (unsigned long) task_stack_page(tsk)
|
||||
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
|
||||
lc->thread_info = (unsigned long) task_thread_info(tsk);
|
||||
lc->current_task = (unsigned long) tsk;
|
||||
lc->lpp = LPP_MAGIC;
|
||||
lc->current_pid = tsk->pid;
|
||||
lc->user_timer = ti->user_timer;
|
||||
lc->system_timer = ti->system_timer;
|
||||
lc->user_timer = tsk->thread.user_timer;
|
||||
lc->system_timer = tsk->thread.system_timer;
|
||||
lc->steal_timer = 0;
|
||||
}
|
||||
|
||||
@@ -662,14 +661,12 @@ int smp_cpu_get_polarization(int cpu)
|
||||
return pcpu_devices[cpu].polarization;
|
||||
}
|
||||
|
||||
static struct sclp_core_info *smp_get_core_info(void)
|
||||
static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
|
||||
{
|
||||
static int use_sigp_detection;
|
||||
struct sclp_core_info *info;
|
||||
int address;
|
||||
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (info && (use_sigp_detection || sclp_get_core_info(info))) {
|
||||
if (use_sigp_detection || sclp_get_core_info(info, early)) {
|
||||
use_sigp_detection = 1;
|
||||
for (address = 0;
|
||||
address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
|
||||
@@ -683,7 +680,6 @@ static struct sclp_core_info *smp_get_core_info(void)
|
||||
}
|
||||
info->combined = info->configured;
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
static int smp_add_present_cpu(int cpu);
|
||||
@@ -724,17 +720,15 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
|
||||
return nr;
|
||||
}
|
||||
|
||||
static void __init smp_detect_cpus(void)
|
||||
void __init smp_detect_cpus(void)
|
||||
{
|
||||
unsigned int cpu, mtid, c_cpus, s_cpus;
|
||||
struct sclp_core_info *info;
|
||||
u16 address;
|
||||
|
||||
/* Get CPU information */
|
||||
info = smp_get_core_info();
|
||||
if (!info)
|
||||
panic("smp_detect_cpus failed to allocate memory\n");
|
||||
|
||||
info = memblock_virt_alloc(sizeof(*info), 8);
|
||||
smp_get_core_info(info, 1);
|
||||
/* Find boot CPU type */
|
||||
if (sclp.has_core_type) {
|
||||
address = stap();
|
||||
@@ -770,7 +764,7 @@ static void __init smp_detect_cpus(void)
|
||||
get_online_cpus();
|
||||
__smp_rescan_cpus(info, 0);
|
||||
put_online_cpus();
|
||||
kfree(info);
|
||||
memblock_free_early((unsigned long)info, sizeof(*info));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -807,7 +801,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
pcpu = pcpu_devices + cpu;
|
||||
if (pcpu->state != CPU_STATE_CONFIGURED)
|
||||
return -EIO;
|
||||
base = cpu - (cpu % (smp_cpu_mtid + 1));
|
||||
base = smp_get_base_cpu(cpu);
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
if (base + i < nr_cpu_ids)
|
||||
if (cpu_online(base + i))
|
||||
@@ -907,7 +901,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
/* request the 0x1202 external call external interrupt */
|
||||
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1202");
|
||||
smp_detect_cpus();
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
@@ -973,7 +966,7 @@ static ssize_t cpu_configure_store(struct device *dev,
|
||||
rc = -EBUSY;
|
||||
/* disallow configuration changes of online cpus and cpu 0 */
|
||||
cpu = dev->id;
|
||||
cpu -= cpu % (smp_cpu_mtid + 1);
|
||||
cpu = smp_get_base_cpu(cpu);
|
||||
if (cpu == 0)
|
||||
goto out;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++)
|
||||
@@ -1106,9 +1099,10 @@ int __ref smp_rescan_cpus(void)
|
||||
struct sclp_core_info *info;
|
||||
int nr;
|
||||
|
||||
info = smp_get_core_info();
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
smp_get_core_info(info, 0);
|
||||
get_online_cpus();
|
||||
mutex_lock(&smp_cpu_state_mutex);
|
||||
nr = __smp_rescan_cpus(info, 1);
|
||||
|
@@ -194,7 +194,7 @@ pgm_check_entry:
|
||||
|
||||
/* Suspend CPU not available -> panic */
|
||||
larl %r15,init_thread_union
|
||||
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
|
||||
ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
|
||||
larl %r2,.Lpanic_string
|
||||
larl %r3,_sclp_print_early
|
||||
lghi %r1,0
|
||||
|
@@ -56,6 +56,20 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
|
||||
}
|
||||
EXPORT_SYMBOL(stsi);
|
||||
|
||||
static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
|
||||
{
|
||||
switch (encoding) {
|
||||
case 1: /* EBCDIC */
|
||||
EBCASC(name, len);
|
||||
break;
|
||||
case 2: /* UTF-8 */
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
|
||||
{
|
||||
int i;
|
||||
@@ -207,24 +221,19 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
|
||||
seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid);
|
||||
seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid);
|
||||
}
|
||||
if (convert_ext_name(info->vsne, info->ext_name, sizeof(info->ext_name))) {
|
||||
seq_printf(m, "LPAR Extended Name: %-.256s\n", info->ext_name);
|
||||
seq_printf(m, "LPAR UUID: %pUb\n", &info->uuid);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_ext_name(struct seq_file *m, int lvl,
|
||||
struct sysinfo_3_2_2 *info)
|
||||
{
|
||||
if (info->vm[lvl].ext_name_encoding == 0)
|
||||
size_t len = sizeof(info->ext_names[lvl]);
|
||||
|
||||
if (!convert_ext_name(info->vm[lvl].evmne, info->ext_names[lvl], len))
|
||||
return;
|
||||
if (info->ext_names[lvl][0] == 0)
|
||||
return;
|
||||
switch (info->vm[lvl].ext_name_encoding) {
|
||||
case 1: /* EBCDIC */
|
||||
EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
|
||||
break;
|
||||
case 2: /* UTF-8 */
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl,
|
||||
info->ext_names[lvl]);
|
||||
}
|
||||
|
@@ -59,19 +59,27 @@ ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
|
||||
EXPORT_SYMBOL(s390_epoch_delta_notifier);
|
||||
|
||||
unsigned char ptff_function_mask[16];
|
||||
unsigned long lpar_offset;
|
||||
unsigned long initial_leap_seconds;
|
||||
|
||||
static unsigned long long lpar_offset;
|
||||
static unsigned long long initial_leap_seconds;
|
||||
static unsigned long long tod_steering_end;
|
||||
static long long tod_steering_delta;
|
||||
|
||||
/*
|
||||
* Get time offsets with PTFF
|
||||
*/
|
||||
void __init ptff_init(void)
|
||||
void __init time_early_init(void)
|
||||
{
|
||||
struct ptff_qto qto;
|
||||
struct ptff_qui qui;
|
||||
|
||||
/* Initialize TOD steering parameters */
|
||||
tod_steering_end = sched_clock_base_cc;
|
||||
vdso_data->ts_end = tod_steering_end;
|
||||
|
||||
if (!test_facility(28))
|
||||
return;
|
||||
|
||||
ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
|
||||
|
||||
/* get LPAR offset */
|
||||
@@ -80,7 +88,7 @@ void __init ptff_init(void)
|
||||
|
||||
/* get initial leap seconds */
|
||||
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
|
||||
initial_leap_seconds = (unsigned long)
|
||||
initial_leap_seconds = (unsigned long long)
|
||||
((long) qui.old_leap * 4096000000L);
|
||||
}
|
||||
|
||||
@@ -123,18 +131,6 @@ void clock_comparator_work(void)
|
||||
cd->event_handler(cd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixup the clock comparator.
|
||||
*/
|
||||
static void fixup_clock_comparator(unsigned long long delta)
|
||||
{
|
||||
/* If nobody is waiting there's nothing to fix. */
|
||||
if (S390_lowcore.clock_comparator == -1ULL)
|
||||
return;
|
||||
S390_lowcore.clock_comparator += delta;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
|
||||
static int s390_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
@@ -215,7 +211,21 @@ void read_boot_clock64(struct timespec64 *ts)
|
||||
|
||||
static cycle_t read_tod_clock(struct clocksource *cs)
|
||||
{
|
||||
return get_tod_clock();
|
||||
unsigned long long now, adj;
|
||||
|
||||
preempt_disable(); /* protect from changes to steering parameters */
|
||||
now = get_tod_clock();
|
||||
adj = tod_steering_end - now;
|
||||
if (unlikely((s64) adj >= 0))
|
||||
/*
|
||||
* manually steer by 1 cycle every 2^16 cycles. This
|
||||
* corresponds to shifting the tod delta by 15. 1s is
|
||||
* therefore steered in ~9h. The adjust will decrease
|
||||
* over time, until it finally reaches 0.
|
||||
*/
|
||||
now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
|
||||
preempt_enable();
|
||||
return now;
|
||||
}
|
||||
|
||||
static struct clocksource clocksource_tod = {
|
||||
@@ -384,6 +394,55 @@ static inline int check_sync_clock(void)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply clock delta to the global data structures.
|
||||
* This is called once on the CPU that performed the clock sync.
|
||||
*/
|
||||
static void clock_sync_global(unsigned long long delta)
|
||||
{
|
||||
unsigned long now, adj;
|
||||
struct ptff_qto qto;
|
||||
|
||||
/* Fixup the monotonic sched clock. */
|
||||
sched_clock_base_cc += delta;
|
||||
/* Adjust TOD steering parameters. */
|
||||
vdso_data->tb_update_count++;
|
||||
now = get_tod_clock();
|
||||
adj = tod_steering_end - now;
|
||||
if (unlikely((s64) adj >= 0))
|
||||
/* Calculate how much of the old adjustment is left. */
|
||||
tod_steering_delta = (tod_steering_delta < 0) ?
|
||||
-(adj >> 15) : (adj >> 15);
|
||||
tod_steering_delta += delta;
|
||||
if ((abs(tod_steering_delta) >> 48) != 0)
|
||||
panic("TOD clock sync offset %lli is too large to drift\n",
|
||||
tod_steering_delta);
|
||||
tod_steering_end = now + (abs(tod_steering_delta) << 15);
|
||||
vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
|
||||
vdso_data->ts_end = tod_steering_end;
|
||||
vdso_data->tb_update_count++;
|
||||
/* Update LPAR offset. */
|
||||
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
|
||||
lpar_offset = qto.tod_epoch_difference;
|
||||
/* Call the TOD clock change notifier. */
|
||||
atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply clock delta to the per-CPU data structures of this CPU.
|
||||
* This is called for each online CPU after the call to clock_sync_global.
|
||||
*/
|
||||
static void clock_sync_local(unsigned long long delta)
|
||||
{
|
||||
/* Add the delta to the clock comparator. */
|
||||
if (S390_lowcore.clock_comparator != -1ULL) {
|
||||
S390_lowcore.clock_comparator += delta;
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
}
|
||||
/* Adjust the last_update_clock time-stamp. */
|
||||
S390_lowcore.last_update_clock += delta;
|
||||
}
|
||||
|
||||
/* Single threaded workqueue used for stp sync events */
|
||||
static struct workqueue_struct *time_sync_wq;
|
||||
|
||||
@@ -397,31 +456,9 @@ static void __init time_init_wq(void)
|
||||
struct clock_sync_data {
|
||||
atomic_t cpus;
|
||||
int in_sync;
|
||||
unsigned long long fixup_cc;
|
||||
unsigned long long clock_delta;
|
||||
};
|
||||
|
||||
static void clock_sync_cpu(struct clock_sync_data *sync)
|
||||
{
|
||||
atomic_dec(&sync->cpus);
|
||||
enable_sync_clock();
|
||||
while (sync->in_sync == 0) {
|
||||
__udelay(1);
|
||||
/*
|
||||
* A different cpu changes *in_sync. Therefore use
|
||||
* barrier() to force memory access.
|
||||
*/
|
||||
barrier();
|
||||
}
|
||||
if (sync->in_sync != 1)
|
||||
/* Didn't work. Clear per-cpu in sync bit again. */
|
||||
disable_sync_clock(NULL);
|
||||
/*
|
||||
* This round of TOD syncing is done. Set the clock comparator
|
||||
* to the next tick and let the processor continue.
|
||||
*/
|
||||
fixup_clock_comparator(sync->fixup_cc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Server Time Protocol (STP) code.
|
||||
*/
|
||||
@@ -523,54 +560,46 @@ void stp_queue_work(void)
|
||||
|
||||
static int stp_sync_clock(void *data)
|
||||
{
|
||||
static int first;
|
||||
struct clock_sync_data *sync = data;
|
||||
unsigned long long clock_delta;
|
||||
struct clock_sync_data *stp_sync;
|
||||
struct ptff_qto qto;
|
||||
static int first;
|
||||
int rc;
|
||||
|
||||
stp_sync = data;
|
||||
|
||||
if (xchg(&first, 1) == 1) {
|
||||
/* Slave */
|
||||
clock_sync_cpu(stp_sync);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wait until all other cpus entered the sync function. */
|
||||
while (atomic_read(&stp_sync->cpus) != 0)
|
||||
cpu_relax();
|
||||
|
||||
enable_sync_clock();
|
||||
|
||||
rc = 0;
|
||||
if (stp_info.todoff[0] || stp_info.todoff[1] ||
|
||||
stp_info.todoff[2] || stp_info.todoff[3] ||
|
||||
stp_info.tmd != 2) {
|
||||
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
|
||||
if (rc == 0) {
|
||||
/* fixup the monotonic sched clock */
|
||||
sched_clock_base_cc += clock_delta;
|
||||
if (ptff_query(PTFF_QTO) &&
|
||||
ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
|
||||
/* Update LPAR offset */
|
||||
lpar_offset = qto.tod_epoch_difference;
|
||||
atomic_notifier_call_chain(&s390_epoch_delta_notifier,
|
||||
0, &clock_delta);
|
||||
stp_sync->fixup_cc = clock_delta;
|
||||
fixup_clock_comparator(clock_delta);
|
||||
rc = chsc_sstpi(stp_page, &stp_info,
|
||||
sizeof(struct stp_sstpi));
|
||||
if (rc == 0 && stp_info.tmd != 2)
|
||||
rc = -EAGAIN;
|
||||
if (xchg(&first, 1) == 0) {
|
||||
/* Wait until all other cpus entered the sync function. */
|
||||
while (atomic_read(&sync->cpus) != 0)
|
||||
cpu_relax();
|
||||
rc = 0;
|
||||
if (stp_info.todoff[0] || stp_info.todoff[1] ||
|
||||
stp_info.todoff[2] || stp_info.todoff[3] ||
|
||||
stp_info.tmd != 2) {
|
||||
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
|
||||
&clock_delta);
|
||||
if (rc == 0) {
|
||||
sync->clock_delta = clock_delta;
|
||||
clock_sync_global(clock_delta);
|
||||
rc = chsc_sstpi(stp_page, &stp_info,
|
||||
sizeof(struct stp_sstpi));
|
||||
if (rc == 0 && stp_info.tmd != 2)
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
}
|
||||
sync->in_sync = rc ? -EAGAIN : 1;
|
||||
xchg(&first, 0);
|
||||
} else {
|
||||
/* Slave */
|
||||
atomic_dec(&sync->cpus);
|
||||
/* Wait for in_sync to be set. */
|
||||
while (READ_ONCE(sync->in_sync) == 0)
|
||||
__udelay(1);
|
||||
}
|
||||
if (rc) {
|
||||
if (sync->in_sync != 1)
|
||||
/* Didn't work. Clear per-cpu in sync bit again. */
|
||||
disable_sync_clock(NULL);
|
||||
stp_sync->in_sync = -EAGAIN;
|
||||
} else
|
||||
stp_sync->in_sync = 1;
|
||||
xchg(&first, 0);
|
||||
/* Apply clock delta to per-CPU fields of this CPU. */
|
||||
clock_sync_local(sync->clock_delta);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/export.h>
|
||||
@@ -41,15 +42,17 @@ static bool topology_enabled = true;
|
||||
static DECLARE_WORK(topology_work, topology_work_fn);
|
||||
|
||||
/*
|
||||
* Socket/Book linked lists and per_cpu(cpu_topology) updates are
|
||||
* Socket/Book linked lists and cpu_topology updates are
|
||||
* protected by "sched_domains_mutex".
|
||||
*/
|
||||
static struct mask_info socket_info;
|
||||
static struct mask_info book_info;
|
||||
static struct mask_info drawer_info;
|
||||
|
||||
DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
|
||||
struct cpu_topology_s390 cpu_topology[NR_CPUS];
|
||||
EXPORT_SYMBOL_GPL(cpu_topology);
|
||||
|
||||
cpumask_t cpus_with_topology;
|
||||
|
||||
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
|
||||
{
|
||||
@@ -97,7 +100,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
|
||||
if (lcpu < 0)
|
||||
continue;
|
||||
for (i = 0; i <= smp_cpu_mtid; i++) {
|
||||
topo = &per_cpu(cpu_topology, lcpu + i);
|
||||
topo = &cpu_topology[lcpu + i];
|
||||
topo->drawer_id = drawer->id;
|
||||
topo->book_id = book->id;
|
||||
topo->socket_id = socket->id;
|
||||
@@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
|
||||
cpumask_set_cpu(lcpu + i, &drawer->mask);
|
||||
cpumask_set_cpu(lcpu + i, &book->mask);
|
||||
cpumask_set_cpu(lcpu + i, &socket->mask);
|
||||
cpumask_set_cpu(lcpu + i, &cpus_with_topology);
|
||||
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
|
||||
}
|
||||
}
|
||||
@@ -220,7 +224,7 @@ static void update_cpu_masks(void)
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
topo = &per_cpu(cpu_topology, cpu);
|
||||
topo = &cpu_topology[cpu];
|
||||
topo->thread_mask = cpu_thread_map(cpu);
|
||||
topo->core_mask = cpu_group_map(&socket_info, cpu);
|
||||
topo->book_mask = cpu_group_map(&book_info, cpu);
|
||||
@@ -231,6 +235,8 @@ static void update_cpu_masks(void)
|
||||
topo->socket_id = cpu;
|
||||
topo->book_id = cpu;
|
||||
topo->drawer_id = cpu;
|
||||
if (cpu_present(cpu))
|
||||
cpumask_set_cpu(cpu, &cpus_with_topology);
|
||||
}
|
||||
}
|
||||
numa_update_cpu_topology();
|
||||
@@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info)
|
||||
stsi(info, 15, 1, min(topology_max_mnest, 4));
|
||||
}
|
||||
|
||||
int arch_update_cpu_topology(void)
|
||||
static int __arch_update_cpu_topology(void)
|
||||
{
|
||||
struct sysinfo_15_1_x *info = tl_info;
|
||||
struct device *dev;
|
||||
int cpu, rc = 0;
|
||||
int rc = 0;
|
||||
|
||||
cpumask_clear(&cpus_with_topology);
|
||||
if (MACHINE_HAS_TOPOLOGY) {
|
||||
rc = 1;
|
||||
store_topology(info);
|
||||
@@ -255,6 +261,15 @@ int arch_update_cpu_topology(void)
|
||||
update_cpu_masks();
|
||||
if (!MACHINE_HAS_TOPOLOGY)
|
||||
topology_update_polarization_simple();
|
||||
return rc;
|
||||
}
|
||||
|
||||
int arch_update_cpu_topology(void)
|
||||
{
|
||||
struct device *dev;
|
||||
int cpu, rc;
|
||||
|
||||
rc = __arch_update_cpu_topology();
|
||||
for_each_online_cpu(cpu) {
|
||||
dev = get_cpu_device(cpu);
|
||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||
@@ -394,23 +409,23 @@ int topology_cpu_init(struct cpu *cpu)
|
||||
|
||||
static const struct cpumask *cpu_thread_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_topology, cpu).thread_mask;
|
||||
return &cpu_topology[cpu].thread_mask;
|
||||
}
|
||||
|
||||
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_topology, cpu).core_mask;
|
||||
return &cpu_topology[cpu].core_mask;
|
||||
}
|
||||
|
||||
static const struct cpumask *cpu_book_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_topology, cpu).book_mask;
|
||||
return &cpu_topology[cpu].book_mask;
|
||||
}
|
||||
|
||||
static const struct cpumask *cpu_drawer_mask(int cpu)
|
||||
{
|
||||
return &per_cpu(cpu_topology, cpu).drawer_mask;
|
||||
return &cpu_topology[cpu].drawer_mask;
|
||||
}
|
||||
|
||||
static int __init early_parse_topology(char *p)
|
||||
@@ -438,19 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
|
||||
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
|
||||
nr_masks = max(nr_masks, 1);
|
||||
for (i = 0; i < nr_masks; i++) {
|
||||
mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
|
||||
mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
|
||||
mask = mask->next;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init s390_topology_init(void)
|
||||
void __init topology_init_early(void)
|
||||
{
|
||||
struct sysinfo_15_1_x *info;
|
||||
int i;
|
||||
|
||||
set_sched_topology(s390_topology);
|
||||
if (!MACHINE_HAS_TOPOLOGY)
|
||||
return 0;
|
||||
tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
|
||||
goto out;
|
||||
tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
|
||||
info = tl_info;
|
||||
store_topology(info);
|
||||
pr_info("The CPU configuration topology of the machine is:");
|
||||
@@ -460,10 +476,9 @@ static int __init s390_topology_init(void)
|
||||
alloc_masks(info, &socket_info, 1);
|
||||
alloc_masks(info, &book_info, 2);
|
||||
alloc_masks(info, &drawer_info, 3);
|
||||
set_sched_topology(s390_topology);
|
||||
return 0;
|
||||
out:
|
||||
__arch_update_cpu_topology();
|
||||
}
|
||||
early_initcall(s390_topology_init);
|
||||
|
||||
static int __init topology_init(void)
|
||||
{
|
||||
|
@@ -99,8 +99,27 @@ __kernel_clock_gettime:
|
||||
tml %r4,0x0001 /* pending update ? loop */
|
||||
jnz 11b
|
||||
stcke 0(%r15) /* Store TOD clock */
|
||||
lm %r0,%r1,1(%r15)
|
||||
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
|
||||
s %r0,1(%r15) /* no - ts_steering_end */
|
||||
sl %r1,5(%r15)
|
||||
brc 3,22f
|
||||
ahi %r0,-1
|
||||
22: ltr %r0,%r0 /* past end of steering? */
|
||||
jm 24f
|
||||
srdl %r0,15 /* 1 per 2^16 */
|
||||
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
|
||||
jz 23f
|
||||
lcr %r0,%r0 /* negative TOD offset */
|
||||
lcr %r1,%r1
|
||||
je 23f
|
||||
ahi %r0,-1
|
||||
23: a %r0,1(%r15) /* add TOD timestamp */
|
||||
al %r1,5(%r15)
|
||||
brc 12,25f
|
||||
ahi %r0,1
|
||||
j 25f
|
||||
24: lm %r0,%r1,1(%r15) /* load TOD timestamp */
|
||||
25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||
brc 3,12f
|
||||
ahi %r0,-1
|
||||
|
@@ -31,8 +31,27 @@ __kernel_gettimeofday:
|
||||
tml %r4,0x0001 /* pending update ? loop */
|
||||
jnz 1b
|
||||
stcke 0(%r15) /* Store TOD clock */
|
||||
lm %r0,%r1,1(%r15)
|
||||
s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
|
||||
s %r0,1(%r15)
|
||||
sl %r1,5(%r15)
|
||||
brc 3,14f
|
||||
ahi %r0,-1
|
||||
14: ltr %r0,%r0 /* past end of steering? */
|
||||
jm 16f
|
||||
srdl %r0,15 /* 1 per 2^16 */
|
||||
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
|
||||
jz 15f
|
||||
lcr %r0,%r0 /* negative TOD offset */
|
||||
lcr %r1,%r1
|
||||
je 15f
|
||||
ahi %r0,-1
|
||||
15: a %r0,1(%r15) /* add TOD timestamp */
|
||||
al %r1,5(%r15)
|
||||
brc 12,17f
|
||||
ahi %r0,1
|
||||
j 17f
|
||||
16: lm %r0,%r1,1(%r15) /* load TOD timestamp */
|
||||
17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
|
||||
brc 3,3f
|
||||
ahi %r0,-1
|
||||
|
@@ -83,8 +83,17 @@ __kernel_clock_gettime:
|
||||
tmll %r4,0x0001 /* pending update ? loop */
|
||||
jnz 5b
|
||||
stcke 0(%r15) /* Store TOD clock */
|
||||
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||
lg %r1,1(%r15)
|
||||
lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
|
||||
slgr %r0,%r1 /* now - ts_steering_end */
|
||||
ltgr %r0,%r0 /* past end of steering ? */
|
||||
jm 17f
|
||||
srlg %r0,%r0,15 /* 1 per 2^16 */
|
||||
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
|
||||
jz 18f
|
||||
lcgr %r0,%r0 /* negative TOD offset */
|
||||
18: algr %r1,%r0 /* add steering offset */
|
||||
17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
|
||||
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||
|
@@ -31,7 +31,16 @@ __kernel_gettimeofday:
|
||||
jnz 0b
|
||||
stcke 0(%r15) /* Store TOD clock */
|
||||
lg %r1,1(%r15)
|
||||
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
|
||||
slgr %r0,%r1 /* now - ts_steering_end */
|
||||
ltgr %r0,%r0 /* past end of steering ? */
|
||||
jm 6f
|
||||
srlg %r0,%r0,15 /* 1 per 2^16 */
|
||||
tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
|
||||
jz 7f
|
||||
lcgr %r0,%r0 /* negative TOD offset */
|
||||
7: algr %r1,%r0 /* add steering offset */
|
||||
6: sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
|
||||
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
|
||||
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
|
||||
lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
|
||||
|
@@ -96,7 +96,6 @@ static void update_mt_scaling(void)
|
||||
*/
|
||||
static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, clock, user, system, steal;
|
||||
u64 user_scaled, system_scaled;
|
||||
|
||||
@@ -119,13 +118,13 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
|
||||
update_mt_scaling();
|
||||
|
||||
user = S390_lowcore.user_timer - ti->user_timer;
|
||||
user = S390_lowcore.user_timer - tsk->thread.user_timer;
|
||||
S390_lowcore.steal_timer -= user;
|
||||
ti->user_timer = S390_lowcore.user_timer;
|
||||
tsk->thread.user_timer = S390_lowcore.user_timer;
|
||||
|
||||
system = S390_lowcore.system_timer - ti->system_timer;
|
||||
system = S390_lowcore.system_timer - tsk->thread.system_timer;
|
||||
S390_lowcore.steal_timer -= system;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
tsk->thread.system_timer = S390_lowcore.system_timer;
|
||||
|
||||
user_scaled = user;
|
||||
system_scaled = system;
|
||||
@@ -153,15 +152,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
|
||||
void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
struct thread_info *ti;
|
||||
|
||||
do_account_vtime(prev, 0);
|
||||
ti = task_thread_info(prev);
|
||||
ti->user_timer = S390_lowcore.user_timer;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
ti = task_thread_info(current);
|
||||
S390_lowcore.user_timer = ti->user_timer;
|
||||
S390_lowcore.system_timer = ti->system_timer;
|
||||
prev->thread.user_timer = S390_lowcore.user_timer;
|
||||
prev->thread.system_timer = S390_lowcore.system_timer;
|
||||
S390_lowcore.user_timer = current->thread.user_timer;
|
||||
S390_lowcore.system_timer = current->thread.system_timer;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -181,7 +176,6 @@ void vtime_account_user(struct task_struct *tsk)
|
||||
*/
|
||||
void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(tsk);
|
||||
u64 timer, system, system_scaled;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
@@ -193,9 +187,9 @@ void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
|
||||
update_mt_scaling();
|
||||
|
||||
system = S390_lowcore.system_timer - ti->system_timer;
|
||||
system = S390_lowcore.system_timer - tsk->thread.system_timer;
|
||||
S390_lowcore.steal_timer -= system;
|
||||
ti->system_timer = S390_lowcore.system_timer;
|
||||
tsk->thread.system_timer = S390_lowcore.system_timer;
|
||||
system_scaled = system;
|
||||
/* Do MT utilization scaling */
|
||||
if (smp_cpu_mtid) {
|
||||
|
Reference in New Issue
Block a user