Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The most notable change for this pull request is the ftrace rework from Heiko. It brings a small performance improvement and the ground work to support a new gcc option to replace the mcount blocks with a single nop. Two new s390 specific system calls are added to emulate user space mmio for PCI, an artifact of the how PCI memory is accessed. Two patches for the memory management with changes to common code. For KVM mm_forbids_zeropage is added which disables the empty zero page for an mm that is used by a KVM process. And an optimization, pmdp_get_and_clear_full is added analog to ptep_get_and_clear_full. Some micro optimization for the cmpxchg and the spinlock code. And as usual bug fixes and cleanups" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (46 commits) s390/cputime: fix 31-bit compile s390/scm_block: make the number of reqs per HW req configurable s390/scm_block: handle multiple requests in one HW request s390/scm_block: allocate aidaw pages only when necessary s390/scm_block: use mempool to manage aidaw requests s390/eadm: change timeout value s390/mm: fix memory leak of ptlock in pmd_free_tlb s390: use local symbol names in entry[64].S s390/ptrace: always include vector registers in core files s390/simd: clear vector register pointer on fork/clone s390: translate cputime magic constants to macros s390/idle: convert open coded idle time seqcount s390/idle: add missing irq off lockdep annotation s390/debug: avoid function call for debug_sprintf_* s390/kprobes: fix instruction copy for out of line execution s390: remove diag 44 calls from cpu_relax() s390/dasd: retry partition detection s390/dasd: fix list corruption for sleep_on requests s390/dasd: fix infinite term I/O loop s390/dasd: remove unused code ...
This commit is contained in:
@@ -17,8 +17,8 @@
|
||||
* Make sure that the compiler is new enough. We want a compiler that
|
||||
* is known to work with the "Q" assembler constraint.
|
||||
*/
|
||||
#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
|
||||
#error Your compiler is too old; please use version 3.3.3 or newer
|
||||
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3)
|
||||
#error Your compiler is too old; please use version 4.3 or newer
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
@@ -156,7 +156,6 @@ int main(void)
|
||||
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
|
||||
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
|
||||
DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
|
||||
DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
|
||||
DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
|
||||
BLANK();
|
||||
DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
|
||||
|
@@ -434,7 +434,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
|
||||
ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
|
||||
} else {
|
||||
/* Signal frames without vectors registers are short ! */
|
||||
__u16 __user *svc = (void *) frame + frame_size - 2;
|
||||
__u16 __user *svc = (void __user *) frame + frame_size - 2;
|
||||
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
|
||||
return -EFAULT;
|
||||
restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
|
||||
|
@@ -218,3 +218,5 @@ COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char
|
||||
COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags)
|
||||
COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
|
||||
COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size);
|
||||
COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length);
|
||||
COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length);
|
||||
|
@@ -1019,7 +1019,7 @@ debug_count_numargs(char *string)
|
||||
*/
|
||||
|
||||
debug_entry_t*
|
||||
debug_sprintf_event(debug_info_t* id, int level,char *string,...)
|
||||
__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int numargs,idx;
|
||||
@@ -1027,8 +1027,6 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
|
||||
debug_sprintf_entry_t *curr_event;
|
||||
debug_entry_t *active;
|
||||
|
||||
if((!id) || (level > id->level))
|
||||
return NULL;
|
||||
if (!debug_active || !id->areas)
|
||||
return NULL;
|
||||
numargs=debug_count_numargs(string);
|
||||
@@ -1050,14 +1048,14 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
|
||||
|
||||
return active;
|
||||
}
|
||||
EXPORT_SYMBOL(debug_sprintf_event);
|
||||
EXPORT_SYMBOL(__debug_sprintf_event);
|
||||
|
||||
/*
|
||||
* debug_sprintf_exception:
|
||||
*/
|
||||
|
||||
debug_entry_t*
|
||||
debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
|
||||
__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int numargs,idx;
|
||||
@@ -1065,8 +1063,6 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
|
||||
debug_sprintf_entry_t *curr_event;
|
||||
debug_entry_t *active;
|
||||
|
||||
if((!id) || (level > id->level))
|
||||
return NULL;
|
||||
if (!debug_active || !id->areas)
|
||||
return NULL;
|
||||
|
||||
@@ -1089,7 +1085,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
|
||||
|
||||
return active;
|
||||
}
|
||||
EXPORT_SYMBOL(debug_sprintf_exception);
|
||||
EXPORT_SYMBOL(__debug_sprintf_exception);
|
||||
|
||||
/*
|
||||
* debug_register_view:
|
||||
|
@@ -191,7 +191,8 @@ void die(struct pt_regs *regs, const char *str)
|
||||
console_verbose();
|
||||
spin_lock_irq(&die_lock);
|
||||
bust_spinlocks(1);
|
||||
printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
|
||||
printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
|
||||
regs->int_code >> 17, ++die_counter);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
printk("PREEMPT ");
|
||||
#endif
|
||||
|
@@ -12,7 +12,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pfn.h>
|
||||
@@ -490,8 +489,5 @@ void __init startup_init(void)
|
||||
detect_machine_facilities();
|
||||
setup_topology();
|
||||
sclp_early_detect();
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
|
||||
#endif
|
||||
lockdep_on();
|
||||
}
|
||||
|
@@ -53,7 +53,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
|
||||
.macro TRACE_IRQS_ON
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
basr %r2,%r0
|
||||
l %r1,BASED(.Lhardirqs_on)
|
||||
l %r1,BASED(.Lc_hardirqs_on)
|
||||
basr %r14,%r1 # call trace_hardirqs_on_caller
|
||||
#endif
|
||||
.endm
|
||||
@@ -61,7 +61,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
|
||||
.macro TRACE_IRQS_OFF
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
basr %r2,%r0
|
||||
l %r1,BASED(.Lhardirqs_off)
|
||||
l %r1,BASED(.Lc_hardirqs_off)
|
||||
basr %r14,%r1 # call trace_hardirqs_off_caller
|
||||
#endif
|
||||
.endm
|
||||
@@ -70,7 +70,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
tm __PT_PSW+1(%r11),0x01 # returning to user ?
|
||||
jz .+10
|
||||
l %r1,BASED(.Llockdep_sys_exit)
|
||||
l %r1,BASED(.Lc_lockdep_sys_exit)
|
||||
basr %r14,%r1 # call lockdep_sys_exit
|
||||
#endif
|
||||
.endm
|
||||
@@ -87,8 +87,8 @@ _PIF_WORK = (_PIF_PER_TRAP)
|
||||
tmh %r8,0x0001 # interrupting from user ?
|
||||
jnz 1f
|
||||
lr %r14,%r9
|
||||
sl %r14,BASED(.Lcritical_start)
|
||||
cl %r14,BASED(.Lcritical_length)
|
||||
sl %r14,BASED(.Lc_critical_start)
|
||||
cl %r14,BASED(.Lc_critical_length)
|
||||
jhe 0f
|
||||
la %r11,\savearea # inside critical section, do cleanup
|
||||
bras %r14,cleanup_critical
|
||||
@@ -162,7 +162,7 @@ ENTRY(__switch_to)
|
||||
lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
||||
br %r14
|
||||
|
||||
__critical_start:
|
||||
.L__critical_start:
|
||||
/*
|
||||
* SVC interrupt handler routine. System calls are synchronous events and
|
||||
* are executed with interrupts enabled.
|
||||
@@ -170,145 +170,145 @@ __critical_start:
|
||||
|
||||
ENTRY(system_call)
|
||||
stpt __LC_SYNC_ENTER_TIMER
|
||||
sysc_stm:
|
||||
.Lsysc_stm:
|
||||
stm %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
l %r12,__LC_THREAD_INFO
|
||||
l %r13,__LC_SVC_NEW_PSW+4
|
||||
lhi %r14,_PIF_SYSCALL
|
||||
sysc_per:
|
||||
.Lsysc_per:
|
||||
l %r15,__LC_KERNEL_STACK
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
|
||||
sysc_vtime:
|
||||
.Lsysc_vtime:
|
||||
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
|
||||
stm %r0,%r7,__PT_R0(%r11)
|
||||
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
|
||||
mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
|
||||
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
|
||||
st %r14,__PT_FLAGS(%r11)
|
||||
sysc_do_svc:
|
||||
.Lsysc_do_svc:
|
||||
l %r10,__TI_sysc_table(%r12) # 31 bit system call table
|
||||
lh %r8,__PT_INT_CODE+2(%r11)
|
||||
sla %r8,2 # shift and test for svc0
|
||||
jnz sysc_nr_ok
|
||||
jnz .Lsysc_nr_ok
|
||||
# svc 0: system call number in %r1
|
||||
cl %r1,BASED(.Lnr_syscalls)
|
||||
jnl sysc_nr_ok
|
||||
jnl .Lsysc_nr_ok
|
||||
sth %r1,__PT_INT_CODE+2(%r11)
|
||||
lr %r8,%r1
|
||||
sla %r8,2
|
||||
sysc_nr_ok:
|
||||
.Lsysc_nr_ok:
|
||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
|
||||
st %r2,__PT_ORIG_GPR2(%r11)
|
||||
st %r7,STACK_FRAME_OVERHEAD(%r15)
|
||||
l %r9,0(%r8,%r10) # get system call addr.
|
||||
tm __TI_flags+3(%r12),_TIF_TRACE
|
||||
jnz sysc_tracesys
|
||||
jnz .Lsysc_tracesys
|
||||
basr %r14,%r9 # call sys_xxxx
|
||||
st %r2,__PT_R2(%r11) # store return value
|
||||
|
||||
sysc_return:
|
||||
.Lsysc_return:
|
||||
LOCKDEP_SYS_EXIT
|
||||
sysc_tif:
|
||||
.Lsysc_tif:
|
||||
tm __PT_PSW+1(%r11),0x01 # returning to user ?
|
||||
jno sysc_restore
|
||||
jno .Lsysc_restore
|
||||
tm __PT_FLAGS+3(%r11),_PIF_WORK
|
||||
jnz sysc_work
|
||||
jnz .Lsysc_work
|
||||
tm __TI_flags+3(%r12),_TIF_WORK
|
||||
jnz sysc_work # check for thread work
|
||||
jnz .Lsysc_work # check for thread work
|
||||
tm __LC_CPU_FLAGS+3,_CIF_WORK
|
||||
jnz sysc_work
|
||||
sysc_restore:
|
||||
jnz .Lsysc_work
|
||||
.Lsysc_restore:
|
||||
mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
|
||||
stpt __LC_EXIT_TIMER
|
||||
lm %r0,%r15,__PT_R0(%r11)
|
||||
lpsw __LC_RETURN_PSW
|
||||
sysc_done:
|
||||
.Lsysc_done:
|
||||
|
||||
#
|
||||
# One of the work bits is on. Find out which one.
|
||||
#
|
||||
sysc_work:
|
||||
.Lsysc_work:
|
||||
tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
|
||||
jo sysc_mcck_pending
|
||||
jo .Lsysc_mcck_pending
|
||||
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
|
||||
jo sysc_reschedule
|
||||
jo .Lsysc_reschedule
|
||||
tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
|
||||
jo sysc_singlestep
|
||||
jo .Lsysc_singlestep
|
||||
tm __TI_flags+3(%r12),_TIF_SIGPENDING
|
||||
jo sysc_sigpending
|
||||
jo .Lsysc_sigpending
|
||||
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
|
||||
jo sysc_notify_resume
|
||||
jo .Lsysc_notify_resume
|
||||
tm __LC_CPU_FLAGS+3,_CIF_ASCE
|
||||
jo sysc_uaccess
|
||||
j sysc_return # beware of critical section cleanup
|
||||
jo .Lsysc_uaccess
|
||||
j .Lsysc_return # beware of critical section cleanup
|
||||
|
||||
#
|
||||
# _TIF_NEED_RESCHED is set, call schedule
|
||||
#
|
||||
sysc_reschedule:
|
||||
l %r1,BASED(.Lschedule)
|
||||
la %r14,BASED(sysc_return)
|
||||
.Lsysc_reschedule:
|
||||
l %r1,BASED(.Lc_schedule)
|
||||
la %r14,BASED(.Lsysc_return)
|
||||
br %r1 # call schedule
|
||||
|
||||
#
|
||||
# _CIF_MCCK_PENDING is set, call handler
|
||||
#
|
||||
sysc_mcck_pending:
|
||||
l %r1,BASED(.Lhandle_mcck)
|
||||
la %r14,BASED(sysc_return)
|
||||
.Lsysc_mcck_pending:
|
||||
l %r1,BASED(.Lc_handle_mcck)
|
||||
la %r14,BASED(.Lsysc_return)
|
||||
br %r1 # TIF bit will be cleared by handler
|
||||
|
||||
#
|
||||
# _CIF_ASCE is set, load user space asce
|
||||
#
|
||||
sysc_uaccess:
|
||||
.Lsysc_uaccess:
|
||||
ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
|
||||
lctl %c1,%c1,__LC_USER_ASCE # load primary asce
|
||||
j sysc_return
|
||||
j .Lsysc_return
|
||||
|
||||
#
|
||||
# _TIF_SIGPENDING is set, call do_signal
|
||||
#
|
||||
sysc_sigpending:
|
||||
.Lsysc_sigpending:
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
l %r1,BASED(.Ldo_signal)
|
||||
l %r1,BASED(.Lc_do_signal)
|
||||
basr %r14,%r1 # call do_signal
|
||||
tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
|
||||
jno sysc_return
|
||||
jno .Lsysc_return
|
||||
lm %r2,%r7,__PT_R2(%r11) # load svc arguments
|
||||
l %r10,__TI_sysc_table(%r12) # 31 bit system call table
|
||||
xr %r8,%r8 # svc 0 returns -ENOSYS
|
||||
clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
|
||||
jnl sysc_nr_ok # invalid svc number -> do svc 0
|
||||
jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
|
||||
lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
|
||||
sla %r8,2
|
||||
j sysc_nr_ok # restart svc
|
||||
j .Lsysc_nr_ok # restart svc
|
||||
|
||||
#
|
||||
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
|
||||
#
|
||||
sysc_notify_resume:
|
||||
.Lsysc_notify_resume:
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
l %r1,BASED(.Ldo_notify_resume)
|
||||
la %r14,BASED(sysc_return)
|
||||
l %r1,BASED(.Lc_do_notify_resume)
|
||||
la %r14,BASED(.Lsysc_return)
|
||||
br %r1 # call do_notify_resume
|
||||
|
||||
#
|
||||
# _PIF_PER_TRAP is set, call do_per_trap
|
||||
#
|
||||
sysc_singlestep:
|
||||
.Lsysc_singlestep:
|
||||
ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
l %r1,BASED(.Ldo_per_trap)
|
||||
la %r14,BASED(sysc_return)
|
||||
l %r1,BASED(.Lc_do_per_trap)
|
||||
la %r14,BASED(.Lsysc_return)
|
||||
br %r1 # call do_per_trap
|
||||
|
||||
#
|
||||
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
|
||||
# and after the system call
|
||||
#
|
||||
sysc_tracesys:
|
||||
l %r1,BASED(.Ltrace_enter)
|
||||
.Lsysc_tracesys:
|
||||
l %r1,BASED(.Lc_trace_enter)
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
la %r3,0
|
||||
xr %r0,%r0
|
||||
@@ -316,22 +316,22 @@ sysc_tracesys:
|
||||
st %r0,__PT_R2(%r11)
|
||||
basr %r14,%r1 # call do_syscall_trace_enter
|
||||
cl %r2,BASED(.Lnr_syscalls)
|
||||
jnl sysc_tracenogo
|
||||
jnl .Lsysc_tracenogo
|
||||
lr %r8,%r2
|
||||
sll %r8,2
|
||||
l %r9,0(%r8,%r10)
|
||||
sysc_tracego:
|
||||
.Lsysc_tracego:
|
||||
lm %r3,%r7,__PT_R3(%r11)
|
||||
st %r7,STACK_FRAME_OVERHEAD(%r15)
|
||||
l %r2,__PT_ORIG_GPR2(%r11)
|
||||
basr %r14,%r9 # call sys_xxx
|
||||
st %r2,__PT_R2(%r11) # store return value
|
||||
sysc_tracenogo:
|
||||
.Lsysc_tracenogo:
|
||||
tm __TI_flags+3(%r12),_TIF_TRACE
|
||||
jz sysc_return
|
||||
l %r1,BASED(.Ltrace_exit)
|
||||
jz .Lsysc_return
|
||||
l %r1,BASED(.Lc_trace_exit)
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
la %r14,BASED(sysc_return)
|
||||
la %r14,BASED(.Lsysc_return)
|
||||
br %r1 # call do_syscall_trace_exit
|
||||
|
||||
#
|
||||
@@ -341,18 +341,18 @@ ENTRY(ret_from_fork)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
l %r12,__LC_THREAD_INFO
|
||||
l %r13,__LC_SVC_NEW_PSW+4
|
||||
l %r1,BASED(.Lschedule_tail)
|
||||
l %r1,BASED(.Lc_schedule_tail)
|
||||
basr %r14,%r1 # call schedule_tail
|
||||
TRACE_IRQS_ON
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
|
||||
jne sysc_tracenogo
|
||||
jne .Lsysc_tracenogo
|
||||
# it's a kernel thread
|
||||
lm %r9,%r10,__PT_R9(%r11) # load gprs
|
||||
ENTRY(kernel_thread_starter)
|
||||
la %r2,0(%r10)
|
||||
basr %r14,%r9
|
||||
j sysc_tracenogo
|
||||
j .Lsysc_tracenogo
|
||||
|
||||
/*
|
||||
* Program check handler routine
|
||||
@@ -369,7 +369,7 @@ ENTRY(pgm_check_handler)
|
||||
tmh %r8,0x4000 # PER bit set in old PSW ?
|
||||
jnz 0f # -> enabled, can't be a double fault
|
||||
tm __LC_PGM_ILC+3,0x80 # check for per exception
|
||||
jnz pgm_svcper # -> single stepped svc
|
||||
jnz .Lpgm_svcper # -> single stepped svc
|
||||
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
|
||||
ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
j 2f
|
||||
@@ -386,42 +386,42 @@ ENTRY(pgm_check_handler)
|
||||
jz 0f
|
||||
l %r1,__TI_task(%r12)
|
||||
tmh %r8,0x0001 # kernel per event ?
|
||||
jz pgm_kprobe
|
||||
jz .Lpgm_kprobe
|
||||
oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
|
||||
mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE
|
||||
mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID
|
||||
0: REENABLE_IRQS
|
||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
|
||||
l %r1,BASED(.Ljump_table)
|
||||
l %r1,BASED(.Lc_jump_table)
|
||||
la %r10,0x7f
|
||||
n %r10,__PT_INT_CODE(%r11)
|
||||
je sysc_return
|
||||
je .Lsysc_return
|
||||
sll %r10,2
|
||||
l %r1,0(%r10,%r1) # load address of handler routine
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
basr %r14,%r1 # branch to interrupt-handler
|
||||
j sysc_return
|
||||
j .Lsysc_return
|
||||
|
||||
#
|
||||
# PER event in supervisor state, must be kprobes
|
||||
#
|
||||
pgm_kprobe:
|
||||
.Lpgm_kprobe:
|
||||
REENABLE_IRQS
|
||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
|
||||
l %r1,BASED(.Ldo_per_trap)
|
||||
l %r1,BASED(.Lc_do_per_trap)
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
basr %r14,%r1 # call do_per_trap
|
||||
j sysc_return
|
||||
j .Lsysc_return
|
||||
|
||||
#
|
||||
# single stepped system call
|
||||
#
|
||||
pgm_svcper:
|
||||
.Lpgm_svcper:
|
||||
mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
|
||||
mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per)
|
||||
mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per)
|
||||
lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
|
||||
lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs
|
||||
lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
|
||||
|
||||
/*
|
||||
* IO interrupt handler routine
|
||||
@@ -435,9 +435,9 @@ ENTRY(io_int_handler)
|
||||
l %r13,__LC_SVC_NEW_PSW+4
|
||||
lm %r8,%r9,__LC_IO_OLD_PSW
|
||||
tmh %r8,0x0001 # interrupting from user ?
|
||||
jz io_skip
|
||||
jz .Lio_skip
|
||||
UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
|
||||
io_skip:
|
||||
.Lio_skip:
|
||||
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
|
||||
stm %r0,%r7,__PT_R0(%r11)
|
||||
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
|
||||
@@ -446,35 +446,35 @@ io_skip:
|
||||
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
|
||||
TRACE_IRQS_OFF
|
||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
|
||||
io_loop:
|
||||
l %r1,BASED(.Ldo_IRQ)
|
||||
.Lio_loop:
|
||||
l %r1,BASED(.Lc_do_IRQ)
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
lhi %r3,IO_INTERRUPT
|
||||
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
|
||||
jz io_call
|
||||
jz .Lio_call
|
||||
lhi %r3,THIN_INTERRUPT
|
||||
io_call:
|
||||
.Lio_call:
|
||||
basr %r14,%r1 # call do_IRQ
|
||||
tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
|
||||
jz io_return
|
||||
jz .Lio_return
|
||||
tpi 0
|
||||
jz io_return
|
||||
jz .Lio_return
|
||||
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
|
||||
j io_loop
|
||||
io_return:
|
||||
j .Lio_loop
|
||||
.Lio_return:
|
||||
LOCKDEP_SYS_EXIT
|
||||
TRACE_IRQS_ON
|
||||
io_tif:
|
||||
.Lio_tif:
|
||||
tm __TI_flags+3(%r12),_TIF_WORK
|
||||
jnz io_work # there is work to do (signals etc.)
|
||||
jnz .Lio_work # there is work to do (signals etc.)
|
||||
tm __LC_CPU_FLAGS+3,_CIF_WORK
|
||||
jnz io_work
|
||||
io_restore:
|
||||
jnz .Lio_work
|
||||
.Lio_restore:
|
||||
mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
|
||||
stpt __LC_EXIT_TIMER
|
||||
lm %r0,%r15,__PT_R0(%r11)
|
||||
lpsw __LC_RETURN_PSW
|
||||
io_done:
|
||||
.Lio_done:
|
||||
|
||||
#
|
||||
# There is work todo, find out in which context we have been interrupted:
|
||||
@@ -483,15 +483,15 @@ io_done:
|
||||
# the preemption counter and if it is zero call preempt_schedule_irq
|
||||
# Before any work can be done, a switch to the kernel stack is required.
|
||||
#
|
||||
io_work:
|
||||
.Lio_work:
|
||||
tm __PT_PSW+1(%r11),0x01 # returning to user ?
|
||||
jo io_work_user # yes -> do resched & signal
|
||||
jo .Lio_work_user # yes -> do resched & signal
|
||||
#ifdef CONFIG_PREEMPT
|
||||
# check for preemptive scheduling
|
||||
icm %r0,15,__TI_precount(%r12)
|
||||
jnz io_restore # preemption disabled
|
||||
jnz .Lio_restore # preemption disabled
|
||||
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
|
||||
jno io_restore
|
||||
jno .Lio_restore
|
||||
# switch to kernel stack
|
||||
l %r1,__PT_R15(%r11)
|
||||
ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
@@ -499,20 +499,20 @@ io_work:
|
||||
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r1)
|
||||
lr %r15,%r1
|
||||
# TRACE_IRQS_ON already done at io_return, call
|
||||
# TRACE_IRQS_ON already done at .Lio_return, call
|
||||
# TRACE_IRQS_OFF to keep things symmetrical
|
||||
TRACE_IRQS_OFF
|
||||
l %r1,BASED(.Lpreempt_irq)
|
||||
l %r1,BASED(.Lc_preempt_irq)
|
||||
basr %r14,%r1 # call preempt_schedule_irq
|
||||
j io_return
|
||||
j .Lio_return
|
||||
#else
|
||||
j io_restore
|
||||
j .Lio_restore
|
||||
#endif
|
||||
|
||||
#
|
||||
# Need to do work before returning to userspace, switch to kernel stack
|
||||
#
|
||||
io_work_user:
|
||||
.Lio_work_user:
|
||||
l %r1,__LC_KERNEL_STACK
|
||||
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
|
||||
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
|
||||
@@ -522,74 +522,74 @@ io_work_user:
|
||||
#
|
||||
# One of the work bits is on. Find out which one.
|
||||
#
|
||||
io_work_tif:
|
||||
.Lio_work_tif:
|
||||
tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
|
||||
jo io_mcck_pending
|
||||
jo .Lio_mcck_pending
|
||||
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
|
||||
jo io_reschedule
|
||||
jo .Lio_reschedule
|
||||
tm __TI_flags+3(%r12),_TIF_SIGPENDING
|
||||
jo io_sigpending
|
||||
jo .Lio_sigpending
|
||||
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
|
||||
jo io_notify_resume
|
||||
jo .Lio_notify_resume
|
||||
tm __LC_CPU_FLAGS+3,_CIF_ASCE
|
||||
jo io_uaccess
|
||||
j io_return # beware of critical section cleanup
|
||||
jo .Lio_uaccess
|
||||
j .Lio_return # beware of critical section cleanup
|
||||
|
||||
#
|
||||
# _CIF_MCCK_PENDING is set, call handler
|
||||
#
|
||||
io_mcck_pending:
|
||||
# TRACE_IRQS_ON already done at io_return
|
||||
l %r1,BASED(.Lhandle_mcck)
|
||||
.Lio_mcck_pending:
|
||||
# TRACE_IRQS_ON already done at .Lio_return
|
||||
l %r1,BASED(.Lc_handle_mcck)
|
||||
basr %r14,%r1 # TIF bit will be cleared by handler
|
||||
TRACE_IRQS_OFF
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
#
|
||||
# _CIF_ASCE is set, load user space asce
|
||||
#
|
||||
io_uaccess:
|
||||
.Lio_uaccess:
|
||||
ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
|
||||
lctl %c1,%c1,__LC_USER_ASCE # load primary asce
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
#
|
||||
# _TIF_NEED_RESCHED is set, call schedule
|
||||
#
|
||||
io_reschedule:
|
||||
# TRACE_IRQS_ON already done at io_return
|
||||
l %r1,BASED(.Lschedule)
|
||||
.Lio_reschedule:
|
||||
# TRACE_IRQS_ON already done at .Lio_return
|
||||
l %r1,BASED(.Lc_schedule)
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
basr %r14,%r1 # call scheduler
|
||||
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
|
||||
TRACE_IRQS_OFF
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
#
|
||||
# _TIF_SIGPENDING is set, call do_signal
|
||||
#
|
||||
io_sigpending:
|
||||
# TRACE_IRQS_ON already done at io_return
|
||||
l %r1,BASED(.Ldo_signal)
|
||||
.Lio_sigpending:
|
||||
# TRACE_IRQS_ON already done at .Lio_return
|
||||
l %r1,BASED(.Lc_do_signal)
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
basr %r14,%r1 # call do_signal
|
||||
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
|
||||
TRACE_IRQS_OFF
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
#
|
||||
# _TIF_SIGPENDING is set, call do_signal
|
||||
#
|
||||
io_notify_resume:
|
||||
# TRACE_IRQS_ON already done at io_return
|
||||
l %r1,BASED(.Ldo_notify_resume)
|
||||
.Lio_notify_resume:
|
||||
# TRACE_IRQS_ON already done at .Lio_return
|
||||
l %r1,BASED(.Lc_do_notify_resume)
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
basr %r14,%r1 # call do_notify_resume
|
||||
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
|
||||
TRACE_IRQS_OFF
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
/*
|
||||
* External interrupt handler routine
|
||||
@@ -603,9 +603,9 @@ ENTRY(ext_int_handler)
|
||||
l %r13,__LC_SVC_NEW_PSW+4
|
||||
lm %r8,%r9,__LC_EXT_OLD_PSW
|
||||
tmh %r8,0x0001 # interrupting from user ?
|
||||
jz ext_skip
|
||||
jz .Lext_skip
|
||||
UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
|
||||
ext_skip:
|
||||
.Lext_skip:
|
||||
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
|
||||
stm %r0,%r7,__PT_R0(%r11)
|
||||
mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
|
||||
@@ -614,29 +614,29 @@ ext_skip:
|
||||
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
|
||||
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
|
||||
TRACE_IRQS_OFF
|
||||
l %r1,BASED(.Ldo_IRQ)
|
||||
l %r1,BASED(.Lc_do_IRQ)
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
lhi %r3,EXT_INTERRUPT
|
||||
basr %r14,%r1 # call do_IRQ
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
/*
|
||||
* Load idle PSW. The second "half" of this function is in cleanup_idle.
|
||||
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
|
||||
*/
|
||||
ENTRY(psw_idle)
|
||||
st %r3,__SF_EMPTY(%r15)
|
||||
basr %r1,0
|
||||
la %r1,psw_idle_lpsw+4-.(%r1)
|
||||
la %r1,.Lpsw_idle_lpsw+4-.(%r1)
|
||||
st %r1,__SF_EMPTY+4(%r15)
|
||||
oi __SF_EMPTY+4(%r15),0x80
|
||||
stck __CLOCK_IDLE_ENTER(%r2)
|
||||
stpt __TIMER_IDLE_ENTER(%r2)
|
||||
psw_idle_lpsw:
|
||||
.Lpsw_idle_lpsw:
|
||||
lpsw __SF_EMPTY(%r15)
|
||||
br %r14
|
||||
psw_idle_end:
|
||||
.Lpsw_idle_end:
|
||||
|
||||
__critical_end:
|
||||
.L__critical_end:
|
||||
|
||||
/*
|
||||
* Machine check handler routines
|
||||
@@ -650,7 +650,7 @@ ENTRY(mcck_int_handler)
|
||||
l %r13,__LC_SVC_NEW_PSW+4
|
||||
lm %r8,%r9,__LC_MCK_OLD_PSW
|
||||
tm __LC_MCCK_CODE,0x80 # system damage?
|
||||
jo mcck_panic # yes -> rest of mcck code invalid
|
||||
jo .Lmcck_panic # yes -> rest of mcck code invalid
|
||||
la %r14,__LC_CPU_TIMER_SAVE_AREA
|
||||
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
|
||||
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
|
||||
@@ -668,22 +668,22 @@ ENTRY(mcck_int_handler)
|
||||
2: spt 0(%r14)
|
||||
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
|
||||
3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
|
||||
jno mcck_panic # no -> skip cleanup critical
|
||||
jno .Lmcck_panic # no -> skip cleanup critical
|
||||
tm %r8,0x0001 # interrupting from user ?
|
||||
jz mcck_skip
|
||||
jz .Lmcck_skip
|
||||
UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
|
||||
mcck_skip:
|
||||
.Lmcck_skip:
|
||||
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
|
||||
stm %r0,%r7,__PT_R0(%r11)
|
||||
mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
|
||||
stm %r8,%r9,__PT_PSW(%r11)
|
||||
xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
|
||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
|
||||
l %r1,BASED(.Ldo_machine_check)
|
||||
l %r1,BASED(.Lc_do_machine_check)
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
basr %r14,%r1 # call s390_do_machine_check
|
||||
tm __PT_PSW+1(%r11),0x01 # returning to user ?
|
||||
jno mcck_return
|
||||
jno .Lmcck_return
|
||||
l %r1,__LC_KERNEL_STACK # switch to kernel stack
|
||||
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
|
||||
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
|
||||
@@ -691,12 +691,12 @@ mcck_skip:
|
||||
lr %r15,%r1
|
||||
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
|
||||
tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
|
||||
jno mcck_return
|
||||
jno .Lmcck_return
|
||||
TRACE_IRQS_OFF
|
||||
l %r1,BASED(.Lhandle_mcck)
|
||||
l %r1,BASED(.Lc_handle_mcck)
|
||||
basr %r14,%r1 # call s390_handle_mcck
|
||||
TRACE_IRQS_ON
|
||||
mcck_return:
|
||||
.Lmcck_return:
|
||||
mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
|
||||
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
|
||||
jno 0f
|
||||
@@ -706,15 +706,15 @@ mcck_return:
|
||||
0: lm %r0,%r15,__PT_R0(%r11)
|
||||
lpsw __LC_RETURN_MCCK_PSW
|
||||
|
||||
mcck_panic:
|
||||
.Lmcck_panic:
|
||||
l %r14,__LC_PANIC_STACK
|
||||
slr %r14,%r15
|
||||
sra %r14,PAGE_SHIFT
|
||||
jz 0f
|
||||
l %r15,__LC_PANIC_STACK
|
||||
j mcck_skip
|
||||
j .Lmcck_skip
|
||||
0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
j mcck_skip
|
||||
j .Lmcck_skip
|
||||
|
||||
#
|
||||
# PSW restart interrupt handler
|
||||
@@ -764,58 +764,58 @@ stack_overflow:
|
||||
1: .long kernel_stack_overflow
|
||||
#endif
|
||||
|
||||
cleanup_table:
|
||||
.Lcleanup_table:
|
||||
.long system_call + 0x80000000
|
||||
.long sysc_do_svc + 0x80000000
|
||||
.long sysc_tif + 0x80000000
|
||||
.long sysc_restore + 0x80000000
|
||||
.long sysc_done + 0x80000000
|
||||
.long io_tif + 0x80000000
|
||||
.long io_restore + 0x80000000
|
||||
.long io_done + 0x80000000
|
||||
.long .Lsysc_do_svc + 0x80000000
|
||||
.long .Lsysc_tif + 0x80000000
|
||||
.long .Lsysc_restore + 0x80000000
|
||||
.long .Lsysc_done + 0x80000000
|
||||
.long .Lio_tif + 0x80000000
|
||||
.long .Lio_restore + 0x80000000
|
||||
.long .Lio_done + 0x80000000
|
||||
.long psw_idle + 0x80000000
|
||||
.long psw_idle_end + 0x80000000
|
||||
.long .Lpsw_idle_end + 0x80000000
|
||||
|
||||
cleanup_critical:
|
||||
cl %r9,BASED(cleanup_table) # system_call
|
||||
cl %r9,BASED(.Lcleanup_table) # system_call
|
||||
jl 0f
|
||||
cl %r9,BASED(cleanup_table+4) # sysc_do_svc
|
||||
jl cleanup_system_call
|
||||
cl %r9,BASED(cleanup_table+8) # sysc_tif
|
||||
cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc
|
||||
jl .Lcleanup_system_call
|
||||
cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif
|
||||
jl 0f
|
||||
cl %r9,BASED(cleanup_table+12) # sysc_restore
|
||||
jl cleanup_sysc_tif
|
||||
cl %r9,BASED(cleanup_table+16) # sysc_done
|
||||
jl cleanup_sysc_restore
|
||||
cl %r9,BASED(cleanup_table+20) # io_tif
|
||||
cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore
|
||||
jl .Lcleanup_sysc_tif
|
||||
cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done
|
||||
jl .Lcleanup_sysc_restore
|
||||
cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif
|
||||
jl 0f
|
||||
cl %r9,BASED(cleanup_table+24) # io_restore
|
||||
jl cleanup_io_tif
|
||||
cl %r9,BASED(cleanup_table+28) # io_done
|
||||
jl cleanup_io_restore
|
||||
cl %r9,BASED(cleanup_table+32) # psw_idle
|
||||
cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore
|
||||
jl .Lcleanup_io_tif
|
||||
cl %r9,BASED(.Lcleanup_table+28) # .Lio_done
|
||||
jl .Lcleanup_io_restore
|
||||
cl %r9,BASED(.Lcleanup_table+32) # psw_idle
|
||||
jl 0f
|
||||
cl %r9,BASED(cleanup_table+36) # psw_idle_end
|
||||
jl cleanup_idle
|
||||
cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end
|
||||
jl .Lcleanup_idle
|
||||
0: br %r14
|
||||
|
||||
cleanup_system_call:
|
||||
.Lcleanup_system_call:
|
||||
# check if stpt has been executed
|
||||
cl %r9,BASED(cleanup_system_call_insn)
|
||||
cl %r9,BASED(.Lcleanup_system_call_insn)
|
||||
jh 0f
|
||||
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||
chi %r11,__LC_SAVE_AREA_ASYNC
|
||||
je 0f
|
||||
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
|
||||
0: # check if stm has been executed
|
||||
cl %r9,BASED(cleanup_system_call_insn+4)
|
||||
cl %r9,BASED(.Lcleanup_system_call_insn+4)
|
||||
jh 0f
|
||||
mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
|
||||
0: # set up saved registers r12, and r13
|
||||
st %r12,16(%r11) # r12 thread-info pointer
|
||||
st %r13,20(%r11) # r13 literal-pool pointer
|
||||
# check if the user time calculation has been done
|
||||
cl %r9,BASED(cleanup_system_call_insn+8)
|
||||
cl %r9,BASED(.Lcleanup_system_call_insn+8)
|
||||
jh 0f
|
||||
l %r10,__LC_EXIT_TIMER
|
||||
l %r15,__LC_EXIT_TIMER+4
|
||||
@@ -824,7 +824,7 @@ cleanup_system_call:
|
||||
st %r10,__LC_USER_TIMER
|
||||
st %r15,__LC_USER_TIMER+4
|
||||
0: # check if the system time calculation has been done
|
||||
cl %r9,BASED(cleanup_system_call_insn+12)
|
||||
cl %r9,BASED(.Lcleanup_system_call_insn+12)
|
||||
jh 0f
|
||||
l %r10,__LC_LAST_UPDATE_TIMER
|
||||
l %r15,__LC_LAST_UPDATE_TIMER+4
|
||||
@@ -848,20 +848,20 @@ cleanup_system_call:
|
||||
# setup saved register 15
|
||||
st %r15,28(%r11) # r15 stack pointer
|
||||
# set new psw address and exit
|
||||
l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000
|
||||
l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000
|
||||
br %r14
|
||||
cleanup_system_call_insn:
|
||||
.Lcleanup_system_call_insn:
|
||||
.long system_call + 0x80000000
|
||||
.long sysc_stm + 0x80000000
|
||||
.long sysc_vtime + 0x80000000 + 36
|
||||
.long sysc_vtime + 0x80000000 + 76
|
||||
.long .Lsysc_stm + 0x80000000
|
||||
.long .Lsysc_vtime + 0x80000000 + 36
|
||||
.long .Lsysc_vtime + 0x80000000 + 76
|
||||
|
||||
cleanup_sysc_tif:
|
||||
l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000
|
||||
.Lcleanup_sysc_tif:
|
||||
l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000
|
||||
br %r14
|
||||
|
||||
cleanup_sysc_restore:
|
||||
cl %r9,BASED(cleanup_sysc_restore_insn)
|
||||
.Lcleanup_sysc_restore:
|
||||
cl %r9,BASED(.Lcleanup_sysc_restore_insn)
|
||||
jhe 0f
|
||||
l %r9,12(%r11) # get saved pointer to pt_regs
|
||||
mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
|
||||
@@ -869,15 +869,15 @@ cleanup_sysc_restore:
|
||||
lm %r0,%r7,__PT_R0(%r9)
|
||||
0: lm %r8,%r9,__LC_RETURN_PSW
|
||||
br %r14
|
||||
cleanup_sysc_restore_insn:
|
||||
.long sysc_done - 4 + 0x80000000
|
||||
.Lcleanup_sysc_restore_insn:
|
||||
.long .Lsysc_done - 4 + 0x80000000
|
||||
|
||||
cleanup_io_tif:
|
||||
l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000
|
||||
.Lcleanup_io_tif:
|
||||
l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000
|
||||
br %r14
|
||||
|
||||
cleanup_io_restore:
|
||||
cl %r9,BASED(cleanup_io_restore_insn)
|
||||
.Lcleanup_io_restore:
|
||||
cl %r9,BASED(.Lcleanup_io_restore_insn)
|
||||
jhe 0f
|
||||
l %r9,12(%r11) # get saved r11 pointer to pt_regs
|
||||
mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
|
||||
@@ -885,10 +885,10 @@ cleanup_io_restore:
|
||||
lm %r0,%r7,__PT_R0(%r9)
|
||||
0: lm %r8,%r9,__LC_RETURN_PSW
|
||||
br %r14
|
||||
cleanup_io_restore_insn:
|
||||
.long io_done - 4 + 0x80000000
|
||||
.Lcleanup_io_restore_insn:
|
||||
.long .Lio_done - 4 + 0x80000000
|
||||
|
||||
cleanup_idle:
|
||||
.Lcleanup_idle:
|
||||
# copy interrupt clock & cpu timer
|
||||
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
|
||||
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
|
||||
@@ -897,7 +897,7 @@ cleanup_idle:
|
||||
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
|
||||
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
|
||||
0: # check if stck has been executed
|
||||
cl %r9,BASED(cleanup_idle_insn)
|
||||
cl %r9,BASED(.Lcleanup_idle_insn)
|
||||
jhe 1f
|
||||
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
|
||||
mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
|
||||
@@ -913,12 +913,12 @@ cleanup_idle:
|
||||
stm %r9,%r10,__LC_SYSTEM_TIMER
|
||||
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
|
||||
# prepare return psw
|
||||
n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits
|
||||
n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits
|
||||
l %r9,24(%r11) # return from psw_idle
|
||||
br %r14
|
||||
cleanup_idle_insn:
|
||||
.long psw_idle_lpsw + 0x80000000
|
||||
cleanup_idle_wait:
|
||||
.Lcleanup_idle_insn:
|
||||
.long .Lpsw_idle_lpsw + 0x80000000
|
||||
.Lcleanup_idle_wait:
|
||||
.long 0xfcfdffff
|
||||
|
||||
/*
|
||||
@@ -933,30 +933,30 @@ cleanup_idle_wait:
|
||||
/*
|
||||
* Symbol constants
|
||||
*/
|
||||
.Ldo_machine_check: .long s390_do_machine_check
|
||||
.Lhandle_mcck: .long s390_handle_mcck
|
||||
.Ldo_IRQ: .long do_IRQ
|
||||
.Ldo_signal: .long do_signal
|
||||
.Ldo_notify_resume: .long do_notify_resume
|
||||
.Ldo_per_trap: .long do_per_trap
|
||||
.Ljump_table: .long pgm_check_table
|
||||
.Lschedule: .long schedule
|
||||
.Lc_do_machine_check: .long s390_do_machine_check
|
||||
.Lc_handle_mcck: .long s390_handle_mcck
|
||||
.Lc_do_IRQ: .long do_IRQ
|
||||
.Lc_do_signal: .long do_signal
|
||||
.Lc_do_notify_resume: .long do_notify_resume
|
||||
.Lc_do_per_trap: .long do_per_trap
|
||||
.Lc_jump_table: .long pgm_check_table
|
||||
.Lc_schedule: .long schedule
|
||||
#ifdef CONFIG_PREEMPT
|
||||
.Lpreempt_irq: .long preempt_schedule_irq
|
||||
.Lc_preempt_irq: .long preempt_schedule_irq
|
||||
#endif
|
||||
.Ltrace_enter: .long do_syscall_trace_enter
|
||||
.Ltrace_exit: .long do_syscall_trace_exit
|
||||
.Lschedule_tail: .long schedule_tail
|
||||
.Lsysc_per: .long sysc_per + 0x80000000
|
||||
.Lc_trace_enter: .long do_syscall_trace_enter
|
||||
.Lc_trace_exit: .long do_syscall_trace_exit
|
||||
.Lc_schedule_tail: .long schedule_tail
|
||||
.Lc_sysc_per: .long .Lsysc_per + 0x80000000
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
.Lhardirqs_on: .long trace_hardirqs_on_caller
|
||||
.Lhardirqs_off: .long trace_hardirqs_off_caller
|
||||
.Lc_hardirqs_on: .long trace_hardirqs_on_caller
|
||||
.Lc_hardirqs_off: .long trace_hardirqs_off_caller
|
||||
#endif
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
.Llockdep_sys_exit: .long lockdep_sys_exit
|
||||
.Lc_lockdep_sys_exit: .long lockdep_sys_exit
|
||||
#endif
|
||||
.Lcritical_start: .long __critical_start + 0x80000000
|
||||
.Lcritical_length: .long __critical_end - __critical_start
|
||||
.Lc_critical_start: .long .L__critical_start + 0x80000000
|
||||
.Lc_critical_length: .long .L__critical_end - .L__critical_start
|
||||
|
||||
.section .rodata, "a"
|
||||
#define SYSCALL(esa,esame,emu) .long esa
|
||||
|
@@ -74,4 +74,6 @@ struct old_sigaction;
|
||||
long sys_s390_personality(unsigned int personality);
|
||||
long sys_s390_runtime_instr(int command, int signum);
|
||||
|
||||
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
|
||||
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
|
||||
#endif /* _ENTRY_H */
|
||||
|
@@ -91,7 +91,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
|
||||
.if \reason==1
|
||||
# Some program interrupts are suppressing (e.g. protection).
|
||||
# We must also check the instruction after SIE in that case.
|
||||
# do_protection_exception will rewind to rewind_pad
|
||||
# do_protection_exception will rewind to .Lrewind_pad
|
||||
jh .+42
|
||||
.else
|
||||
jhe .+42
|
||||
@@ -192,7 +192,7 @@ ENTRY(__switch_to)
|
||||
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
||||
br %r14
|
||||
|
||||
__critical_start:
|
||||
.L__critical_start:
|
||||
/*
|
||||
* SVC interrupt handler routine. System calls are synchronous events and
|
||||
* are executed with interrupts enabled.
|
||||
@@ -200,15 +200,15 @@ __critical_start:
|
||||
|
||||
ENTRY(system_call)
|
||||
stpt __LC_SYNC_ENTER_TIMER
|
||||
sysc_stmg:
|
||||
.Lsysc_stmg:
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
|
||||
lg %r10,__LC_LAST_BREAK
|
||||
lg %r12,__LC_THREAD_INFO
|
||||
lghi %r14,_PIF_SYSCALL
|
||||
sysc_per:
|
||||
.Lsysc_per:
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
|
||||
sysc_vtime:
|
||||
.Lsysc_vtime:
|
||||
UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
|
||||
LAST_BREAK %r13
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
@@ -216,39 +216,39 @@ sysc_vtime:
|
||||
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
|
||||
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
|
||||
stg %r14,__PT_FLAGS(%r11)
|
||||
sysc_do_svc:
|
||||
.Lsysc_do_svc:
|
||||
lg %r10,__TI_sysc_table(%r12) # address of system call table
|
||||
llgh %r8,__PT_INT_CODE+2(%r11)
|
||||
slag %r8,%r8,2 # shift and test for svc 0
|
||||
jnz sysc_nr_ok
|
||||
jnz .Lsysc_nr_ok
|
||||
# svc 0: system call number in %r1
|
||||
llgfr %r1,%r1 # clear high word in r1
|
||||
cghi %r1,NR_syscalls
|
||||
jnl sysc_nr_ok
|
||||
jnl .Lsysc_nr_ok
|
||||
sth %r1,__PT_INT_CODE+2(%r11)
|
||||
slag %r8,%r1,2
|
||||
sysc_nr_ok:
|
||||
.Lsysc_nr_ok:
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
stg %r2,__PT_ORIG_GPR2(%r11)
|
||||
stg %r7,STACK_FRAME_OVERHEAD(%r15)
|
||||
lgf %r9,0(%r8,%r10) # get system call add.
|
||||
tm __TI_flags+7(%r12),_TIF_TRACE
|
||||
jnz sysc_tracesys
|
||||
jnz .Lsysc_tracesys
|
||||
basr %r14,%r9 # call sys_xxxx
|
||||
stg %r2,__PT_R2(%r11) # store return value
|
||||
|
||||
sysc_return:
|
||||
.Lsysc_return:
|
||||
LOCKDEP_SYS_EXIT
|
||||
sysc_tif:
|
||||
.Lsysc_tif:
|
||||
tm __PT_PSW+1(%r11),0x01 # returning to user ?
|
||||
jno sysc_restore
|
||||
jno .Lsysc_restore
|
||||
tm __PT_FLAGS+7(%r11),_PIF_WORK
|
||||
jnz sysc_work
|
||||
jnz .Lsysc_work
|
||||
tm __TI_flags+7(%r12),_TIF_WORK
|
||||
jnz sysc_work # check for work
|
||||
jnz .Lsysc_work # check for work
|
||||
tm __LC_CPU_FLAGS+7,_CIF_WORK
|
||||
jnz sysc_work
|
||||
sysc_restore:
|
||||
jnz .Lsysc_work
|
||||
.Lsysc_restore:
|
||||
lg %r14,__LC_VDSO_PER_CPU
|
||||
lmg %r0,%r10,__PT_R0(%r11)
|
||||
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
|
||||
@@ -256,101 +256,101 @@ sysc_restore:
|
||||
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
|
||||
lmg %r11,%r15,__PT_R11(%r11)
|
||||
lpswe __LC_RETURN_PSW
|
||||
sysc_done:
|
||||
.Lsysc_done:
|
||||
|
||||
#
|
||||
# One of the work bits is on. Find out which one.
|
||||
#
|
||||
sysc_work:
|
||||
.Lsysc_work:
|
||||
tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
|
||||
jo sysc_mcck_pending
|
||||
jo .Lsysc_mcck_pending
|
||||
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
|
||||
jo sysc_reschedule
|
||||
jo .Lsysc_reschedule
|
||||
#ifdef CONFIG_UPROBES
|
||||
tm __TI_flags+7(%r12),_TIF_UPROBE
|
||||
jo sysc_uprobe_notify
|
||||
jo .Lsysc_uprobe_notify
|
||||
#endif
|
||||
tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
|
||||
jo sysc_singlestep
|
||||
jo .Lsysc_singlestep
|
||||
tm __TI_flags+7(%r12),_TIF_SIGPENDING
|
||||
jo sysc_sigpending
|
||||
jo .Lsysc_sigpending
|
||||
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
|
||||
jo sysc_notify_resume
|
||||
jo .Lsysc_notify_resume
|
||||
tm __LC_CPU_FLAGS+7,_CIF_ASCE
|
||||
jo sysc_uaccess
|
||||
j sysc_return # beware of critical section cleanup
|
||||
jo .Lsysc_uaccess
|
||||
j .Lsysc_return # beware of critical section cleanup
|
||||
|
||||
#
|
||||
# _TIF_NEED_RESCHED is set, call schedule
|
||||
#
|
||||
sysc_reschedule:
|
||||
larl %r14,sysc_return
|
||||
.Lsysc_reschedule:
|
||||
larl %r14,.Lsysc_return
|
||||
jg schedule
|
||||
|
||||
#
|
||||
# _CIF_MCCK_PENDING is set, call handler
|
||||
#
|
||||
sysc_mcck_pending:
|
||||
larl %r14,sysc_return
|
||||
.Lsysc_mcck_pending:
|
||||
larl %r14,.Lsysc_return
|
||||
jg s390_handle_mcck # TIF bit will be cleared by handler
|
||||
|
||||
#
|
||||
# _CIF_ASCE is set, load user space asce
|
||||
#
|
||||
sysc_uaccess:
|
||||
.Lsysc_uaccess:
|
||||
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
|
||||
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
|
||||
j sysc_return
|
||||
j .Lsysc_return
|
||||
|
||||
#
|
||||
# _TIF_SIGPENDING is set, call do_signal
|
||||
#
|
||||
sysc_sigpending:
|
||||
.Lsysc_sigpending:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
brasl %r14,do_signal
|
||||
tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
|
||||
jno sysc_return
|
||||
jno .Lsysc_return
|
||||
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
|
||||
lg %r10,__TI_sysc_table(%r12) # address of system call table
|
||||
lghi %r8,0 # svc 0 returns -ENOSYS
|
||||
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
|
||||
cghi %r1,NR_syscalls
|
||||
jnl sysc_nr_ok # invalid svc number -> do svc 0
|
||||
jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
|
||||
slag %r8,%r1,2
|
||||
j sysc_nr_ok # restart svc
|
||||
j .Lsysc_nr_ok # restart svc
|
||||
|
||||
#
|
||||
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
|
||||
#
|
||||
sysc_notify_resume:
|
||||
.Lsysc_notify_resume:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
larl %r14,sysc_return
|
||||
larl %r14,.Lsysc_return
|
||||
jg do_notify_resume
|
||||
|
||||
#
|
||||
# _TIF_UPROBE is set, call uprobe_notify_resume
|
||||
#
|
||||
#ifdef CONFIG_UPROBES
|
||||
sysc_uprobe_notify:
|
||||
.Lsysc_uprobe_notify:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
larl %r14,sysc_return
|
||||
larl %r14,.Lsysc_return
|
||||
jg uprobe_notify_resume
|
||||
#endif
|
||||
|
||||
#
|
||||
# _PIF_PER_TRAP is set, call do_per_trap
|
||||
#
|
||||
sysc_singlestep:
|
||||
.Lsysc_singlestep:
|
||||
ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
larl %r14,sysc_return
|
||||
larl %r14,.Lsysc_return
|
||||
jg do_per_trap
|
||||
|
||||
#
|
||||
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
|
||||
# and after the system call
|
||||
#
|
||||
sysc_tracesys:
|
||||
.Lsysc_tracesys:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
la %r3,0
|
||||
llgh %r0,__PT_INT_CODE+2(%r11)
|
||||
@@ -358,20 +358,20 @@ sysc_tracesys:
|
||||
brasl %r14,do_syscall_trace_enter
|
||||
lghi %r0,NR_syscalls
|
||||
clgr %r0,%r2
|
||||
jnh sysc_tracenogo
|
||||
jnh .Lsysc_tracenogo
|
||||
sllg %r8,%r2,2
|
||||
lgf %r9,0(%r8,%r10)
|
||||
sysc_tracego:
|
||||
.Lsysc_tracego:
|
||||
lmg %r3,%r7,__PT_R3(%r11)
|
||||
stg %r7,STACK_FRAME_OVERHEAD(%r15)
|
||||
lg %r2,__PT_ORIG_GPR2(%r11)
|
||||
basr %r14,%r9 # call sys_xxx
|
||||
stg %r2,__PT_R2(%r11) # store return value
|
||||
sysc_tracenogo:
|
||||
.Lsysc_tracenogo:
|
||||
tm __TI_flags+7(%r12),_TIF_TRACE
|
||||
jz sysc_return
|
||||
jz .Lsysc_return
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
larl %r14,sysc_return
|
||||
larl %r14,.Lsysc_return
|
||||
jg do_syscall_trace_exit
|
||||
|
||||
#
|
||||
@@ -384,13 +384,13 @@ ENTRY(ret_from_fork)
|
||||
TRACE_IRQS_ON
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
|
||||
jne sysc_tracenogo
|
||||
jne .Lsysc_tracenogo
|
||||
# it's a kernel thread
|
||||
lmg %r9,%r10,__PT_R9(%r11) # load gprs
|
||||
ENTRY(kernel_thread_starter)
|
||||
la %r2,0(%r10)
|
||||
basr %r14,%r9
|
||||
j sysc_tracenogo
|
||||
j .Lsysc_tracenogo
|
||||
|
||||
/*
|
||||
* Program check handler routine
|
||||
@@ -409,7 +409,7 @@ ENTRY(pgm_check_handler)
|
||||
tmhh %r8,0x4000 # PER bit set in old PSW ?
|
||||
jnz 0f # -> enabled, can't be a double fault
|
||||
tm __LC_PGM_ILC+3,0x80 # check for per exception
|
||||
jnz pgm_svcper # -> single stepped svc
|
||||
jnz .Lpgm_svcper # -> single stepped svc
|
||||
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
j 2f
|
||||
@@ -432,7 +432,7 @@ ENTRY(pgm_check_handler)
|
||||
tm __LC_PGM_ILC+3,0x80 # check for per exception
|
||||
jz 0f
|
||||
tmhh %r8,0x0001 # kernel per event ?
|
||||
jz pgm_kprobe
|
||||
jz .Lpgm_kprobe
|
||||
oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
|
||||
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
|
||||
@@ -443,31 +443,31 @@ ENTRY(pgm_check_handler)
|
||||
llgh %r10,__PT_INT_CODE+2(%r11)
|
||||
nill %r10,0x007f
|
||||
sll %r10,2
|
||||
je sysc_return
|
||||
je .Lsysc_return
|
||||
lgf %r1,0(%r10,%r1) # load address of handler routine
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
basr %r14,%r1 # branch to interrupt-handler
|
||||
j sysc_return
|
||||
j .Lsysc_return
|
||||
|
||||
#
|
||||
# PER event in supervisor state, must be kprobes
|
||||
#
|
||||
pgm_kprobe:
|
||||
.Lpgm_kprobe:
|
||||
REENABLE_IRQS
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
brasl %r14,do_per_trap
|
||||
j sysc_return
|
||||
j .Lsysc_return
|
||||
|
||||
#
|
||||
# single stepped system call
|
||||
#
|
||||
pgm_svcper:
|
||||
.Lpgm_svcper:
|
||||
mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
|
||||
larl %r14,sysc_per
|
||||
larl %r14,.Lsysc_per
|
||||
stg %r14,__LC_RETURN_PSW+8
|
||||
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
|
||||
lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs
|
||||
lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
|
||||
|
||||
/*
|
||||
* IO interrupt handler routine
|
||||
@@ -483,10 +483,10 @@ ENTRY(io_int_handler)
|
||||
HANDLE_SIE_INTERCEPT %r14,2
|
||||
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
|
||||
tmhh %r8,0x0001 # interrupting from user?
|
||||
jz io_skip
|
||||
jz .Lio_skip
|
||||
UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
|
||||
LAST_BREAK %r14
|
||||
io_skip:
|
||||
.Lio_skip:
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
|
||||
stmg %r8,%r9,__PT_PSW(%r11)
|
||||
@@ -494,29 +494,29 @@ io_skip:
|
||||
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
|
||||
TRACE_IRQS_OFF
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
io_loop:
|
||||
.Lio_loop:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
lghi %r3,IO_INTERRUPT
|
||||
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
|
||||
jz io_call
|
||||
jz .Lio_call
|
||||
lghi %r3,THIN_INTERRUPT
|
||||
io_call:
|
||||
.Lio_call:
|
||||
brasl %r14,do_IRQ
|
||||
tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
|
||||
jz io_return
|
||||
jz .Lio_return
|
||||
tpi 0
|
||||
jz io_return
|
||||
jz .Lio_return
|
||||
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
|
||||
j io_loop
|
||||
io_return:
|
||||
j .Lio_loop
|
||||
.Lio_return:
|
||||
LOCKDEP_SYS_EXIT
|
||||
TRACE_IRQS_ON
|
||||
io_tif:
|
||||
.Lio_tif:
|
||||
tm __TI_flags+7(%r12),_TIF_WORK
|
||||
jnz io_work # there is work to do (signals etc.)
|
||||
jnz .Lio_work # there is work to do (signals etc.)
|
||||
tm __LC_CPU_FLAGS+7,_CIF_WORK
|
||||
jnz io_work
|
||||
io_restore:
|
||||
jnz .Lio_work
|
||||
.Lio_restore:
|
||||
lg %r14,__LC_VDSO_PER_CPU
|
||||
lmg %r0,%r10,__PT_R0(%r11)
|
||||
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
|
||||
@@ -524,7 +524,7 @@ io_restore:
|
||||
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
|
||||
lmg %r11,%r15,__PT_R11(%r11)
|
||||
lpswe __LC_RETURN_PSW
|
||||
io_done:
|
||||
.Lio_done:
|
||||
|
||||
#
|
||||
# There is work todo, find out in which context we have been interrupted:
|
||||
@@ -535,15 +535,15 @@ io_done:
|
||||
# the preemption counter and if it is zero call preempt_schedule_irq
|
||||
# Before any work can be done, a switch to the kernel stack is required.
|
||||
#
|
||||
io_work:
|
||||
.Lio_work:
|
||||
tm __PT_PSW+1(%r11),0x01 # returning to user ?
|
||||
jo io_work_user # yes -> do resched & signal
|
||||
jo .Lio_work_user # yes -> do resched & signal
|
||||
#ifdef CONFIG_PREEMPT
|
||||
# check for preemptive scheduling
|
||||
icm %r0,15,__TI_precount(%r12)
|
||||
jnz io_restore # preemption is disabled
|
||||
jnz .Lio_restore # preemption is disabled
|
||||
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
|
||||
jno io_restore
|
||||
jno .Lio_restore
|
||||
# switch to kernel stack
|
||||
lg %r1,__PT_R15(%r11)
|
||||
aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
@@ -551,19 +551,19 @@ io_work:
|
||||
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
|
||||
la %r11,STACK_FRAME_OVERHEAD(%r1)
|
||||
lgr %r15,%r1
|
||||
# TRACE_IRQS_ON already done at io_return, call
|
||||
# TRACE_IRQS_ON already done at .Lio_return, call
|
||||
# TRACE_IRQS_OFF to keep things symmetrical
|
||||
TRACE_IRQS_OFF
|
||||
brasl %r14,preempt_schedule_irq
|
||||
j io_return
|
||||
j .Lio_return
|
||||
#else
|
||||
j io_restore
|
||||
j .Lio_restore
|
||||
#endif
|
||||
|
||||
#
|
||||
# Need to do work before returning to userspace, switch to kernel stack
|
||||
#
|
||||
io_work_user:
|
||||
.Lio_work_user:
|
||||
lg %r1,__LC_KERNEL_STACK
|
||||
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
|
||||
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
|
||||
@@ -573,70 +573,70 @@ io_work_user:
|
||||
#
|
||||
# One of the work bits is on. Find out which one.
|
||||
#
|
||||
io_work_tif:
|
||||
.Lio_work_tif:
|
||||
tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
|
||||
jo io_mcck_pending
|
||||
jo .Lio_mcck_pending
|
||||
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
|
||||
jo io_reschedule
|
||||
jo .Lio_reschedule
|
||||
tm __TI_flags+7(%r12),_TIF_SIGPENDING
|
||||
jo io_sigpending
|
||||
jo .Lio_sigpending
|
||||
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
|
||||
jo io_notify_resume
|
||||
jo .Lio_notify_resume
|
||||
tm __LC_CPU_FLAGS+7,_CIF_ASCE
|
||||
jo io_uaccess
|
||||
j io_return # beware of critical section cleanup
|
||||
jo .Lio_uaccess
|
||||
j .Lio_return # beware of critical section cleanup
|
||||
|
||||
#
|
||||
# _CIF_MCCK_PENDING is set, call handler
|
||||
#
|
||||
io_mcck_pending:
|
||||
# TRACE_IRQS_ON already done at io_return
|
||||
.Lio_mcck_pending:
|
||||
# TRACE_IRQS_ON already done at .Lio_return
|
||||
brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
|
||||
TRACE_IRQS_OFF
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
#
|
||||
# _CIF_ASCE is set, load user space asce
|
||||
#
|
||||
io_uaccess:
|
||||
.Lio_uaccess:
|
||||
ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
|
||||
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
#
|
||||
# _TIF_NEED_RESCHED is set, call schedule
|
||||
#
|
||||
io_reschedule:
|
||||
# TRACE_IRQS_ON already done at io_return
|
||||
.Lio_reschedule:
|
||||
# TRACE_IRQS_ON already done at .Lio_return
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
brasl %r14,schedule # call scheduler
|
||||
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
|
||||
TRACE_IRQS_OFF
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
#
|
||||
# _TIF_SIGPENDING or is set, call do_signal
|
||||
#
|
||||
io_sigpending:
|
||||
# TRACE_IRQS_ON already done at io_return
|
||||
.Lio_sigpending:
|
||||
# TRACE_IRQS_ON already done at .Lio_return
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
brasl %r14,do_signal
|
||||
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
|
||||
TRACE_IRQS_OFF
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
#
|
||||
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
|
||||
#
|
||||
io_notify_resume:
|
||||
# TRACE_IRQS_ON already done at io_return
|
||||
.Lio_notify_resume:
|
||||
# TRACE_IRQS_ON already done at .Lio_return
|
||||
ssm __LC_SVC_NEW_PSW # reenable interrupts
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
brasl %r14,do_notify_resume
|
||||
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
|
||||
TRACE_IRQS_OFF
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
/*
|
||||
* External interrupt handler routine
|
||||
@@ -652,10 +652,10 @@ ENTRY(ext_int_handler)
|
||||
HANDLE_SIE_INTERCEPT %r14,3
|
||||
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
|
||||
tmhh %r8,0x0001 # interrupting from user ?
|
||||
jz ext_skip
|
||||
jz .Lext_skip
|
||||
UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
|
||||
LAST_BREAK %r14
|
||||
ext_skip:
|
||||
.Lext_skip:
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
|
||||
stmg %r8,%r9,__PT_PSW(%r11)
|
||||
@@ -669,23 +669,23 @@ ext_skip:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
lghi %r3,EXT_INTERRUPT
|
||||
brasl %r14,do_IRQ
|
||||
j io_return
|
||||
j .Lio_return
|
||||
|
||||
/*
|
||||
* Load idle PSW. The second "half" of this function is in cleanup_idle.
|
||||
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
|
||||
*/
|
||||
ENTRY(psw_idle)
|
||||
stg %r3,__SF_EMPTY(%r15)
|
||||
larl %r1,psw_idle_lpsw+4
|
||||
larl %r1,.Lpsw_idle_lpsw+4
|
||||
stg %r1,__SF_EMPTY+8(%r15)
|
||||
STCK __CLOCK_IDLE_ENTER(%r2)
|
||||
stpt __TIMER_IDLE_ENTER(%r2)
|
||||
psw_idle_lpsw:
|
||||
.Lpsw_idle_lpsw:
|
||||
lpswe __SF_EMPTY(%r15)
|
||||
br %r14
|
||||
psw_idle_end:
|
||||
.Lpsw_idle_end:
|
||||
|
||||
__critical_end:
|
||||
.L__critical_end:
|
||||
|
||||
/*
|
||||
* Machine check handler routines
|
||||
@@ -701,7 +701,7 @@ ENTRY(mcck_int_handler)
|
||||
lmg %r8,%r9,__LC_MCK_OLD_PSW
|
||||
HANDLE_SIE_INTERCEPT %r14,4
|
||||
tm __LC_MCCK_CODE,0x80 # system damage?
|
||||
jo mcck_panic # yes -> rest of mcck code invalid
|
||||
jo .Lmcck_panic # yes -> rest of mcck code invalid
|
||||
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
|
||||
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
|
||||
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
|
||||
@@ -719,13 +719,13 @@ ENTRY(mcck_int_handler)
|
||||
2: spt 0(%r14)
|
||||
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
|
||||
3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
|
||||
jno mcck_panic # no -> skip cleanup critical
|
||||
jno .Lmcck_panic # no -> skip cleanup critical
|
||||
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
|
||||
tm %r8,0x0001 # interrupting from user ?
|
||||
jz mcck_skip
|
||||
jz .Lmcck_skip
|
||||
UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
|
||||
LAST_BREAK %r14
|
||||
mcck_skip:
|
||||
.Lmcck_skip:
|
||||
lghi %r14,__LC_GPREGS_SAVE_AREA+64
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
mvc __PT_R8(64,%r11),0(%r14)
|
||||
@@ -735,7 +735,7 @@ mcck_skip:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
brasl %r14,s390_do_machine_check
|
||||
tm __PT_PSW+1(%r11),0x01 # returning to user ?
|
||||
jno mcck_return
|
||||
jno .Lmcck_return
|
||||
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
|
||||
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
|
||||
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
|
||||
@@ -743,11 +743,11 @@ mcck_skip:
|
||||
lgr %r15,%r1
|
||||
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
|
||||
tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
|
||||
jno mcck_return
|
||||
jno .Lmcck_return
|
||||
TRACE_IRQS_OFF
|
||||
brasl %r14,s390_handle_mcck
|
||||
TRACE_IRQS_ON
|
||||
mcck_return:
|
||||
.Lmcck_return:
|
||||
lg %r14,__LC_VDSO_PER_CPU
|
||||
lmg %r0,%r10,__PT_R0(%r11)
|
||||
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
|
||||
@@ -758,14 +758,14 @@ mcck_return:
|
||||
0: lmg %r11,%r15,__PT_R11(%r11)
|
||||
lpswe __LC_RETURN_MCCK_PSW
|
||||
|
||||
mcck_panic:
|
||||
.Lmcck_panic:
|
||||
lg %r14,__LC_PANIC_STACK
|
||||
slgr %r14,%r15
|
||||
srag %r14,%r14,PAGE_SHIFT
|
||||
jz 0f
|
||||
lg %r15,__LC_PANIC_STACK
|
||||
0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
j mcck_skip
|
||||
j .Lmcck_skip
|
||||
|
||||
#
|
||||
# PSW restart interrupt handler
|
||||
@@ -815,69 +815,69 @@ stack_overflow:
|
||||
#endif
|
||||
|
||||
.align 8
|
||||
cleanup_table:
|
||||
.Lcleanup_table:
|
||||
.quad system_call
|
||||
.quad sysc_do_svc
|
||||
.quad sysc_tif
|
||||
.quad sysc_restore
|
||||
.quad sysc_done
|
||||
.quad io_tif
|
||||
.quad io_restore
|
||||
.quad io_done
|
||||
.quad .Lsysc_do_svc
|
||||
.quad .Lsysc_tif
|
||||
.quad .Lsysc_restore
|
||||
.quad .Lsysc_done
|
||||
.quad .Lio_tif
|
||||
.quad .Lio_restore
|
||||
.quad .Lio_done
|
||||
.quad psw_idle
|
||||
.quad psw_idle_end
|
||||
.quad .Lpsw_idle_end
|
||||
|
||||
cleanup_critical:
|
||||
clg %r9,BASED(cleanup_table) # system_call
|
||||
clg %r9,BASED(.Lcleanup_table) # system_call
|
||||
jl 0f
|
||||
clg %r9,BASED(cleanup_table+8) # sysc_do_svc
|
||||
jl cleanup_system_call
|
||||
clg %r9,BASED(cleanup_table+16) # sysc_tif
|
||||
clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
|
||||
jl .Lcleanup_system_call
|
||||
clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
|
||||
jl 0f
|
||||
clg %r9,BASED(cleanup_table+24) # sysc_restore
|
||||
jl cleanup_sysc_tif
|
||||
clg %r9,BASED(cleanup_table+32) # sysc_done
|
||||
jl cleanup_sysc_restore
|
||||
clg %r9,BASED(cleanup_table+40) # io_tif
|
||||
clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
|
||||
jl .Lcleanup_sysc_tif
|
||||
clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
|
||||
jl .Lcleanup_sysc_restore
|
||||
clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
|
||||
jl 0f
|
||||
clg %r9,BASED(cleanup_table+48) # io_restore
|
||||
jl cleanup_io_tif
|
||||
clg %r9,BASED(cleanup_table+56) # io_done
|
||||
jl cleanup_io_restore
|
||||
clg %r9,BASED(cleanup_table+64) # psw_idle
|
||||
clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
|
||||
jl .Lcleanup_io_tif
|
||||
clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
|
||||
jl .Lcleanup_io_restore
|
||||
clg %r9,BASED(.Lcleanup_table+64) # psw_idle
|
||||
jl 0f
|
||||
clg %r9,BASED(cleanup_table+72) # psw_idle_end
|
||||
jl cleanup_idle
|
||||
clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
|
||||
jl .Lcleanup_idle
|
||||
0: br %r14
|
||||
|
||||
|
||||
cleanup_system_call:
|
||||
.Lcleanup_system_call:
|
||||
# check if stpt has been executed
|
||||
clg %r9,BASED(cleanup_system_call_insn)
|
||||
clg %r9,BASED(.Lcleanup_system_call_insn)
|
||||
jh 0f
|
||||
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
|
||||
cghi %r11,__LC_SAVE_AREA_ASYNC
|
||||
je 0f
|
||||
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
|
||||
0: # check if stmg has been executed
|
||||
clg %r9,BASED(cleanup_system_call_insn+8)
|
||||
clg %r9,BASED(.Lcleanup_system_call_insn+8)
|
||||
jh 0f
|
||||
mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
|
||||
0: # check if base register setup + TIF bit load has been done
|
||||
clg %r9,BASED(cleanup_system_call_insn+16)
|
||||
clg %r9,BASED(.Lcleanup_system_call_insn+16)
|
||||
jhe 0f
|
||||
# set up saved registers r10 and r12
|
||||
stg %r10,16(%r11) # r10 last break
|
||||
stg %r12,32(%r11) # r12 thread-info pointer
|
||||
0: # check if the user time update has been done
|
||||
clg %r9,BASED(cleanup_system_call_insn+24)
|
||||
clg %r9,BASED(.Lcleanup_system_call_insn+24)
|
||||
jh 0f
|
||||
lg %r15,__LC_EXIT_TIMER
|
||||
slg %r15,__LC_SYNC_ENTER_TIMER
|
||||
alg %r15,__LC_USER_TIMER
|
||||
stg %r15,__LC_USER_TIMER
|
||||
0: # check if the system time update has been done
|
||||
clg %r9,BASED(cleanup_system_call_insn+32)
|
||||
clg %r9,BASED(.Lcleanup_system_call_insn+32)
|
||||
jh 0f
|
||||
lg %r15,__LC_LAST_UPDATE_TIMER
|
||||
slg %r15,__LC_EXIT_TIMER
|
||||
@@ -904,21 +904,21 @@ cleanup_system_call:
|
||||
# setup saved register r15
|
||||
stg %r15,56(%r11) # r15 stack pointer
|
||||
# set new psw address and exit
|
||||
larl %r9,sysc_do_svc
|
||||
larl %r9,.Lsysc_do_svc
|
||||
br %r14
|
||||
cleanup_system_call_insn:
|
||||
.Lcleanup_system_call_insn:
|
||||
.quad system_call
|
||||
.quad sysc_stmg
|
||||
.quad sysc_per
|
||||
.quad sysc_vtime+18
|
||||
.quad sysc_vtime+42
|
||||
.quad .Lsysc_stmg
|
||||
.quad .Lsysc_per
|
||||
.quad .Lsysc_vtime+18
|
||||
.quad .Lsysc_vtime+42
|
||||
|
||||
cleanup_sysc_tif:
|
||||
larl %r9,sysc_tif
|
||||
.Lcleanup_sysc_tif:
|
||||
larl %r9,.Lsysc_tif
|
||||
br %r14
|
||||
|
||||
cleanup_sysc_restore:
|
||||
clg %r9,BASED(cleanup_sysc_restore_insn)
|
||||
.Lcleanup_sysc_restore:
|
||||
clg %r9,BASED(.Lcleanup_sysc_restore_insn)
|
||||
je 0f
|
||||
lg %r9,24(%r11) # get saved pointer to pt_regs
|
||||
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
|
||||
@@ -926,15 +926,15 @@ cleanup_sysc_restore:
|
||||
lmg %r0,%r7,__PT_R0(%r9)
|
||||
0: lmg %r8,%r9,__LC_RETURN_PSW
|
||||
br %r14
|
||||
cleanup_sysc_restore_insn:
|
||||
.quad sysc_done - 4
|
||||
.Lcleanup_sysc_restore_insn:
|
||||
.quad .Lsysc_done - 4
|
||||
|
||||
cleanup_io_tif:
|
||||
larl %r9,io_tif
|
||||
.Lcleanup_io_tif:
|
||||
larl %r9,.Lio_tif
|
||||
br %r14
|
||||
|
||||
cleanup_io_restore:
|
||||
clg %r9,BASED(cleanup_io_restore_insn)
|
||||
.Lcleanup_io_restore:
|
||||
clg %r9,BASED(.Lcleanup_io_restore_insn)
|
||||
je 0f
|
||||
lg %r9,24(%r11) # get saved r11 pointer to pt_regs
|
||||
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
|
||||
@@ -942,10 +942,10 @@ cleanup_io_restore:
|
||||
lmg %r0,%r7,__PT_R0(%r9)
|
||||
0: lmg %r8,%r9,__LC_RETURN_PSW
|
||||
br %r14
|
||||
cleanup_io_restore_insn:
|
||||
.quad io_done - 4
|
||||
.Lcleanup_io_restore_insn:
|
||||
.quad .Lio_done - 4
|
||||
|
||||
cleanup_idle:
|
||||
.Lcleanup_idle:
|
||||
# copy interrupt clock & cpu timer
|
||||
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
|
||||
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
|
||||
@@ -954,7 +954,7 @@ cleanup_idle:
|
||||
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
|
||||
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
|
||||
0: # check if stck & stpt have been executed
|
||||
clg %r9,BASED(cleanup_idle_insn)
|
||||
clg %r9,BASED(.Lcleanup_idle_insn)
|
||||
jhe 1f
|
||||
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
|
||||
mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
|
||||
@@ -973,17 +973,17 @@ cleanup_idle:
|
||||
nihh %r8,0xfcfd # clear irq & wait state bits
|
||||
lg %r9,48(%r11) # return from psw_idle
|
||||
br %r14
|
||||
cleanup_idle_insn:
|
||||
.quad psw_idle_lpsw
|
||||
.Lcleanup_idle_insn:
|
||||
.quad .Lpsw_idle_lpsw
|
||||
|
||||
/*
|
||||
* Integer constants
|
||||
*/
|
||||
.align 8
|
||||
.Lcritical_start:
|
||||
.quad __critical_start
|
||||
.quad .L__critical_start
|
||||
.Lcritical_length:
|
||||
.quad __critical_end - __critical_start
|
||||
.quad .L__critical_end - .L__critical_start
|
||||
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
@@ -1000,25 +1000,25 @@ ENTRY(sie64a)
|
||||
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
|
||||
lg %r14,__LC_GMAP # get gmap pointer
|
||||
ltgr %r14,%r14
|
||||
jz sie_gmap
|
||||
jz .Lsie_gmap
|
||||
lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
|
||||
sie_gmap:
|
||||
.Lsie_gmap:
|
||||
lg %r14,__SF_EMPTY(%r15) # get control block pointer
|
||||
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
|
||||
tm __SIE_PROG20+3(%r14),1 # last exit...
|
||||
jnz sie_done
|
||||
jnz .Lsie_done
|
||||
LPP __SF_EMPTY(%r15) # set guest id
|
||||
sie 0(%r14)
|
||||
sie_done:
|
||||
.Lsie_done:
|
||||
LPP __SF_EMPTY+16(%r15) # set host id
|
||||
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
|
||||
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
|
||||
# some program checks are suppressing. C code (e.g. do_protection_exception)
|
||||
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
|
||||
# instructions between sie64a and sie_done should not cause program
|
||||
# instructions between sie64a and .Lsie_done should not cause program
|
||||
# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
|
||||
# See also HANDLE_SIE_INTERCEPT
|
||||
rewind_pad:
|
||||
.Lrewind_pad:
|
||||
nop 0
|
||||
.globl sie_exit
|
||||
sie_exit:
|
||||
@@ -1027,19 +1027,19 @@ sie_exit:
|
||||
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
|
||||
lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
|
||||
br %r14
|
||||
sie_fault:
|
||||
.Lsie_fault:
|
||||
lghi %r14,-EFAULT
|
||||
stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
|
||||
j sie_exit
|
||||
|
||||
.align 8
|
||||
.Lsie_critical:
|
||||
.quad sie_gmap
|
||||
.quad .Lsie_gmap
|
||||
.Lsie_critical_length:
|
||||
.quad sie_done - sie_gmap
|
||||
.quad .Lsie_done - .Lsie_gmap
|
||||
|
||||
EX_TABLE(rewind_pad,sie_fault)
|
||||
EX_TABLE(sie_exit,sie_fault)
|
||||
EX_TABLE(.Lrewind_pad,.Lsie_fault)
|
||||
EX_TABLE(sie_exit,.Lsie_fault)
|
||||
#endif
|
||||
|
||||
.section .rodata, "a"
|
||||
|
@@ -7,6 +7,7 @@
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
@@ -15,60 +16,39 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include <trace/syscall.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include "entry.h"
|
||||
|
||||
void mcount_replace_code(void);
|
||||
void ftrace_disable_code(void);
|
||||
void ftrace_enable_insn(void);
|
||||
|
||||
/*
|
||||
* The mcount code looks like this:
|
||||
* stg %r14,8(%r15) # offset 0
|
||||
* larl %r1,<&counter> # offset 6
|
||||
* brasl %r14,_mcount # offset 12
|
||||
* lg %r14,8(%r15) # offset 18
|
||||
* Total length is 24 bytes. The complete mcount block initially gets replaced
|
||||
* by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop
|
||||
* only patch the jg/lg instruction within the block.
|
||||
* Note: we do not patch the first instruction to an unconditional branch,
|
||||
* since that would break kprobes/jprobes. It is easier to leave the larl
|
||||
* instruction in and only modify the second instruction.
|
||||
* Total length is 24 bytes. Only the first instruction will be patched
|
||||
* by ftrace_make_call / ftrace_make_nop.
|
||||
* The enabled ftrace code block looks like this:
|
||||
* larl %r0,.+24 # offset 0
|
||||
* > lg %r1,__LC_FTRACE_FUNC # offset 6
|
||||
* br %r1 # offset 12
|
||||
* brcl 0,0 # offset 14
|
||||
* brc 0,0 # offset 20
|
||||
* > brasl %r0,ftrace_caller # offset 0
|
||||
* larl %r1,<&counter> # offset 6
|
||||
* brasl %r14,_mcount # offset 12
|
||||
* lg %r14,8(%r15) # offset 18
|
||||
* The ftrace function gets called with a non-standard C function call ABI
|
||||
* where r0 contains the return address. It is also expected that the called
|
||||
* function only clobbers r0 and r1, but restores r2-r15.
|
||||
* For module code we can't directly jump to ftrace caller, but need a
|
||||
* trampoline (ftrace_plt), which clobbers also r1.
|
||||
* The return point of the ftrace function has offset 24, so execution
|
||||
* continues behind the mcount block.
|
||||
* larl %r0,.+24 # offset 0
|
||||
* > jg .+18 # offset 6
|
||||
* br %r1 # offset 12
|
||||
* brcl 0,0 # offset 14
|
||||
* brc 0,0 # offset 20
|
||||
* The disabled ftrace code block looks like this:
|
||||
* > jg .+24 # offset 0
|
||||
* larl %r1,<&counter> # offset 6
|
||||
* brasl %r14,_mcount # offset 12
|
||||
* lg %r14,8(%r15) # offset 18
|
||||
* The jg instruction branches to offset 24 to skip as many instructions
|
||||
* as possible.
|
||||
*/
|
||||
asm(
|
||||
" .align 4\n"
|
||||
"mcount_replace_code:\n"
|
||||
" larl %r0,0f\n"
|
||||
"ftrace_disable_code:\n"
|
||||
" jg 0f\n"
|
||||
" br %r1\n"
|
||||
" brcl 0,0\n"
|
||||
" brc 0,0\n"
|
||||
"0:\n"
|
||||
" .align 4\n"
|
||||
"ftrace_enable_insn:\n"
|
||||
" lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
|
||||
|
||||
#define MCOUNT_BLOCK_SIZE 24
|
||||
#define MCOUNT_INSN_OFFSET 6
|
||||
#define FTRACE_INSN_SIZE 6
|
||||
unsigned long ftrace_plt;
|
||||
|
||||
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
unsigned long addr)
|
||||
@@ -79,24 +59,62 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
/* Initial replacement of the whole mcount block */
|
||||
if (addr == MCOUNT_ADDR) {
|
||||
if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET,
|
||||
mcount_replace_code,
|
||||
MCOUNT_BLOCK_SIZE))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
struct ftrace_insn insn;
|
||||
unsigned short op;
|
||||
void *from, *to;
|
||||
size_t size;
|
||||
|
||||
ftrace_generate_nop_insn(&insn);
|
||||
size = sizeof(insn);
|
||||
from = &insn;
|
||||
to = (void *) rec->ip;
|
||||
if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
|
||||
return -EFAULT;
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been placed
|
||||
* at the beginning of the function. We write the constant
|
||||
* KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
|
||||
* instruction so that the kprobes handler can execute a nop, if it
|
||||
* reaches this breakpoint.
|
||||
*/
|
||||
if (op == BREAKPOINT_INSTRUCTION) {
|
||||
size -= 2;
|
||||
from += 2;
|
||||
to += 2;
|
||||
insn.disp = KPROBE_ON_FTRACE_NOP;
|
||||
}
|
||||
if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
|
||||
MCOUNT_INSN_SIZE))
|
||||
if (probe_kernel_write(to, from, size))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
|
||||
FTRACE_INSN_SIZE))
|
||||
struct ftrace_insn insn;
|
||||
unsigned short op;
|
||||
void *from, *to;
|
||||
size_t size;
|
||||
|
||||
ftrace_generate_call_insn(&insn, rec->ip);
|
||||
size = sizeof(insn);
|
||||
from = &insn;
|
||||
to = (void *) rec->ip;
|
||||
if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
|
||||
return -EFAULT;
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been placed
|
||||
* at the beginning of the function. We write the constant
|
||||
* KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
|
||||
* instruction so that the kprobes handler can execute a brasl if it
|
||||
* reaches this breakpoint.
|
||||
*/
|
||||
if (op == BREAKPOINT_INSTRUCTION) {
|
||||
size -= 2;
|
||||
from += 2;
|
||||
to += 2;
|
||||
insn.disp = KPROBE_ON_FTRACE_CALL;
|
||||
}
|
||||
if (probe_kernel_write(to, from, size))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
@@ -111,13 +129,30 @@ int __init ftrace_dyn_arch_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init ftrace_plt_init(void)
|
||||
{
|
||||
unsigned int *ip;
|
||||
|
||||
ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
|
||||
if (!ftrace_plt)
|
||||
panic("cannot allocate ftrace plt\n");
|
||||
ip = (unsigned int *) ftrace_plt;
|
||||
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
|
||||
ip[1] = 0x100a0004;
|
||||
ip[2] = 0x07f10000;
|
||||
ip[3] = FTRACE_ADDR >> 32;
|
||||
ip[4] = FTRACE_ADDR & 0xffffffff;
|
||||
set_memory_ro(ftrace_plt, 1);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(ftrace_plt_init);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/*
|
||||
* Hook the return address and push it in the stack of return addresses
|
||||
* in current thread info.
|
||||
*/
|
||||
unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
|
||||
unsigned long ip)
|
||||
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
|
||||
{
|
||||
struct ftrace_graph_ent trace;
|
||||
|
||||
@@ -137,6 +172,7 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
|
||||
out:
|
||||
return parent;
|
||||
}
|
||||
NOKPROBE_SYMBOL(prepare_ftrace_return);
|
||||
|
||||
/*
|
||||
* Patch the kernel code at ftrace_graph_caller location. The instruction
|
||||
|
@@ -19,7 +19,7 @@
|
||||
|
||||
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
|
||||
|
||||
void __kprobes enabled_wait(void)
|
||||
void enabled_wait(void)
|
||||
{
|
||||
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
|
||||
unsigned long long idle_time;
|
||||
@@ -35,31 +35,32 @@ void __kprobes enabled_wait(void)
|
||||
/* Call the assembler magic in entry.S */
|
||||
psw_idle(idle, psw_mask);
|
||||
|
||||
trace_hardirqs_off();
|
||||
|
||||
/* Account time spent with enabled wait psw loaded as idle time. */
|
||||
idle->sequence++;
|
||||
smp_wmb();
|
||||
write_seqcount_begin(&idle->seqcount);
|
||||
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
|
||||
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
|
||||
idle->idle_time += idle_time;
|
||||
idle->idle_count++;
|
||||
account_idle_time(idle_time);
|
||||
smp_wmb();
|
||||
idle->sequence++;
|
||||
write_seqcount_end(&idle->seqcount);
|
||||
}
|
||||
NOKPROBE_SYMBOL(enabled_wait);
|
||||
|
||||
static ssize_t show_idle_count(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned long long idle_count;
|
||||
unsigned int sequence;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
sequence = ACCESS_ONCE(idle->sequence);
|
||||
seq = read_seqcount_begin(&idle->seqcount);
|
||||
idle_count = ACCESS_ONCE(idle->idle_count);
|
||||
if (ACCESS_ONCE(idle->clock_idle_enter))
|
||||
idle_count++;
|
||||
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
|
||||
} while (read_seqcount_retry(&idle->seqcount, seq));
|
||||
return sprintf(buf, "%llu\n", idle_count);
|
||||
}
|
||||
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
|
||||
@@ -69,15 +70,15 @@ static ssize_t show_idle_time(struct device *dev,
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
|
||||
unsigned long long now, idle_time, idle_enter, idle_exit;
|
||||
unsigned int sequence;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
now = get_tod_clock();
|
||||
sequence = ACCESS_ONCE(idle->sequence);
|
||||
seq = read_seqcount_begin(&idle->seqcount);
|
||||
idle_time = ACCESS_ONCE(idle->idle_time);
|
||||
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
||||
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
|
||||
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
|
||||
} while (read_seqcount_retry(&idle->seqcount, seq));
|
||||
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
|
||||
return sprintf(buf, "%llu\n", idle_time >> 12);
|
||||
}
|
||||
@@ -87,14 +88,14 @@ cputime64_t arch_cpu_idle_time(int cpu)
|
||||
{
|
||||
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
|
||||
unsigned long long now, idle_enter, idle_exit;
|
||||
unsigned int sequence;
|
||||
unsigned int seq;
|
||||
|
||||
do {
|
||||
now = get_tod_clock();
|
||||
sequence = ACCESS_ONCE(idle->sequence);
|
||||
seq = read_seqcount_begin(&idle->seqcount);
|
||||
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
||||
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
|
||||
} while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
|
||||
} while (read_seqcount_retry(&idle->seqcount, seq));
|
||||
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
|
||||
}
|
||||
|
||||
|
@@ -127,13 +127,10 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||
for_each_online_cpu(cpu)
|
||||
seq_printf(p, "CPU%d ", cpu);
|
||||
seq_putc(p, '\n');
|
||||
goto out;
|
||||
}
|
||||
if (index < NR_IRQS) {
|
||||
if (index >= NR_IRQS_BASE)
|
||||
goto out;
|
||||
/* Adjust index to process irqclass_main_desc array entries */
|
||||
index--;
|
||||
seq_printf(p, "%s: ", irqclass_main_desc[index].name);
|
||||
irq = irqclass_main_desc[index].irq;
|
||||
for_each_online_cpu(cpu)
|
||||
@@ -158,7 +155,7 @@ out:
|
||||
|
||||
unsigned int arch_dynirq_lower_bound(unsigned int from)
|
||||
{
|
||||
return from < THIN_INTERRUPT ? THIN_INTERRUPT : from;
|
||||
return from < NR_IRQS_BASE ? NR_IRQS_BASE : from;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/dis.h>
|
||||
@@ -58,12 +59,23 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
|
||||
.insn_size = MAX_INSN_SIZE,
|
||||
};
|
||||
|
||||
static void __kprobes copy_instruction(struct kprobe *p)
|
||||
static void copy_instruction(struct kprobe *p)
|
||||
{
|
||||
unsigned long ip = (unsigned long) p->addr;
|
||||
s64 disp, new_disp;
|
||||
u64 addr, new_addr;
|
||||
|
||||
memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
|
||||
if (ftrace_location(ip) == ip) {
|
||||
/*
|
||||
* If kprobes patches the instruction that is morphed by
|
||||
* ftrace make sure that kprobes always sees the branch
|
||||
* "jg .+24" that skips the mcount block
|
||||
*/
|
||||
ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
|
||||
p->ainsn.is_ftrace_insn = 1;
|
||||
} else
|
||||
memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
|
||||
p->opcode = p->ainsn.insn[0];
|
||||
if (!probe_is_insn_relative_long(p->ainsn.insn))
|
||||
return;
|
||||
/*
|
||||
@@ -79,25 +91,14 @@ static void __kprobes copy_instruction(struct kprobe *p)
|
||||
new_disp = ((addr + (disp * 2)) - new_addr) / 2;
|
||||
*(s32 *)&p->ainsn.insn[1] = new_disp;
|
||||
}
|
||||
NOKPROBE_SYMBOL(copy_instruction);
|
||||
|
||||
static inline int is_kernel_addr(void *addr)
|
||||
{
|
||||
return addr < (void *)_end;
|
||||
}
|
||||
|
||||
static inline int is_module_addr(void *addr)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
|
||||
if (addr < (void *)MODULES_VADDR)
|
||||
return 0;
|
||||
if (addr > (void *)MODULES_END)
|
||||
return 0;
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __kprobes s390_get_insn_slot(struct kprobe *p)
|
||||
static int s390_get_insn_slot(struct kprobe *p)
|
||||
{
|
||||
/*
|
||||
* Get an insn slot that is within the same 2GB area like the original
|
||||
@@ -111,8 +112,9 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p)
|
||||
p->ainsn.insn = get_insn_slot();
|
||||
return p->ainsn.insn ? 0 : -ENOMEM;
|
||||
}
|
||||
NOKPROBE_SYMBOL(s390_get_insn_slot);
|
||||
|
||||
static void __kprobes s390_free_insn_slot(struct kprobe *p)
|
||||
static void s390_free_insn_slot(struct kprobe *p)
|
||||
{
|
||||
if (!p->ainsn.insn)
|
||||
return;
|
||||
@@ -122,8 +124,9 @@ static void __kprobes s390_free_insn_slot(struct kprobe *p)
|
||||
free_insn_slot(p->ainsn.insn, 0);
|
||||
p->ainsn.insn = NULL;
|
||||
}
|
||||
NOKPROBE_SYMBOL(s390_free_insn_slot);
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
int arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
if ((unsigned long) p->addr & 0x01)
|
||||
return -EINVAL;
|
||||
@@ -132,54 +135,79 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
return -EINVAL;
|
||||
if (s390_get_insn_slot(p))
|
||||
return -ENOMEM;
|
||||
p->opcode = *p->addr;
|
||||
copy_instruction(p);
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_prepare_kprobe);
|
||||
|
||||
struct ins_replace_args {
|
||||
kprobe_opcode_t *ptr;
|
||||
kprobe_opcode_t opcode;
|
||||
};
|
||||
|
||||
static int __kprobes swap_instruction(void *aref)
|
||||
int arch_check_ftrace_location(struct kprobe *p)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long status = kcb->kprobe_status;
|
||||
struct ins_replace_args *args = aref;
|
||||
|
||||
kcb->kprobe_status = KPROBE_SWAP_INST;
|
||||
probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
|
||||
kcb->kprobe_status = status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
struct ins_replace_args args;
|
||||
struct swap_insn_args {
|
||||
struct kprobe *p;
|
||||
unsigned int arm_kprobe : 1;
|
||||
};
|
||||
|
||||
static int swap_instruction(void *data)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long status = kcb->kprobe_status;
|
||||
struct swap_insn_args *args = data;
|
||||
struct ftrace_insn new_insn, *insn;
|
||||
struct kprobe *p = args->p;
|
||||
size_t len;
|
||||
|
||||
new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
|
||||
len = sizeof(new_insn.opc);
|
||||
if (!p->ainsn.is_ftrace_insn)
|
||||
goto skip_ftrace;
|
||||
len = sizeof(new_insn);
|
||||
insn = (struct ftrace_insn *) p->addr;
|
||||
if (args->arm_kprobe) {
|
||||
if (is_ftrace_nop(insn))
|
||||
new_insn.disp = KPROBE_ON_FTRACE_NOP;
|
||||
else
|
||||
new_insn.disp = KPROBE_ON_FTRACE_CALL;
|
||||
} else {
|
||||
ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
|
||||
if (insn->disp == KPROBE_ON_FTRACE_NOP)
|
||||
ftrace_generate_nop_insn(&new_insn);
|
||||
}
|
||||
skip_ftrace:
|
||||
kcb->kprobe_status = KPROBE_SWAP_INST;
|
||||
probe_kernel_write(p->addr, &new_insn, len);
|
||||
kcb->kprobe_status = status;
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(swap_instruction);
|
||||
|
||||
void arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
|
||||
|
||||
args.ptr = p->addr;
|
||||
args.opcode = BREAKPOINT_INSTRUCTION;
|
||||
stop_machine(swap_instruction, &args, NULL);
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_arm_kprobe);
|
||||
|
||||
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||
void arch_disarm_kprobe(struct kprobe *p)
|
||||
{
|
||||
struct ins_replace_args args;
|
||||
struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
|
||||
|
||||
args.ptr = p->addr;
|
||||
args.opcode = p->opcode;
|
||||
stop_machine(swap_instruction, &args, NULL);
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_disarm_kprobe);
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
void arch_remove_kprobe(struct kprobe *p)
|
||||
{
|
||||
s390_free_insn_slot(p);
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_remove_kprobe);
|
||||
|
||||
static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs,
|
||||
unsigned long ip)
|
||||
static void enable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct per_regs per_kprobe;
|
||||
|
||||
@@ -199,10 +227,11 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
|
||||
regs->psw.addr = ip | PSW_ADDR_AMODE;
|
||||
}
|
||||
NOKPROBE_SYMBOL(enable_singlestep);
|
||||
|
||||
static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs,
|
||||
unsigned long ip)
|
||||
static void disable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs,
|
||||
unsigned long ip)
|
||||
{
|
||||
/* Restore control regs and psw mask, set new psw address */
|
||||
__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
|
||||
@@ -210,41 +239,43 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
|
||||
regs->psw.mask |= kcb->kprobe_saved_imask;
|
||||
regs->psw.addr = ip | PSW_ADDR_AMODE;
|
||||
}
|
||||
NOKPROBE_SYMBOL(disable_singlestep);
|
||||
|
||||
/*
|
||||
* Activate a kprobe by storing its pointer to current_kprobe. The
|
||||
* previous kprobe is stored in kcb->prev_kprobe. A stack of up to
|
||||
* two kprobes can be active, see KPROBE_REENTER.
|
||||
*/
|
||||
static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
|
||||
static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
|
||||
{
|
||||
kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
|
||||
kcb->prev_kprobe.status = kcb->kprobe_status;
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
}
|
||||
NOKPROBE_SYMBOL(push_kprobe);
|
||||
|
||||
/*
|
||||
* Deactivate a kprobe by backing up to the previous state. If the
|
||||
* current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
|
||||
* for any other state prev_kprobe.kp will be NULL.
|
||||
*/
|
||||
static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
|
||||
static void pop_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
}
|
||||
NOKPROBE_SYMBOL(pop_kprobe);
|
||||
|
||||
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
|
||||
{
|
||||
ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
|
||||
|
||||
static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
|
||||
struct kprobe *p)
|
||||
static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
|
||||
{
|
||||
switch (kcb->kprobe_status) {
|
||||
case KPROBE_HIT_SSDONE:
|
||||
@@ -264,8 +295,9 @@ static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_reenter_check);
|
||||
|
||||
static int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
static int kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb;
|
||||
struct kprobe *p;
|
||||
@@ -339,6 +371,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
preempt_enable_no_resched();
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_handler);
|
||||
|
||||
/*
|
||||
* Function return probe trampoline:
|
||||
@@ -355,8 +388,7 @@ static void __used kretprobe_trampoline_holder(void)
|
||||
/*
|
||||
* Called when the probe at kretprobe trampoline is hit
|
||||
*/
|
||||
static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
||||
struct pt_regs *regs)
|
||||
static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kretprobe_instance *ri;
|
||||
struct hlist_head *head, empty_rp;
|
||||
@@ -444,6 +476,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
NOKPROBE_SYMBOL(trampoline_probe_handler);
|
||||
|
||||
/*
|
||||
* Called after single-stepping. p->addr is the address of the
|
||||
@@ -453,12 +486,30 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
||||
* single-stepped a copy of the instruction. The address of this
|
||||
* copy is p->ainsn.insn.
|
||||
*/
|
||||
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||
static void resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
|
||||
int fixup = probe_get_fixup_type(p->ainsn.insn);
|
||||
|
||||
/* Check if the kprobes location is an enabled ftrace caller */
|
||||
if (p->ainsn.is_ftrace_insn) {
|
||||
struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
|
||||
struct ftrace_insn call_insn;
|
||||
|
||||
ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
|
||||
/*
|
||||
* A kprobe on an enabled ftrace call site actually single
|
||||
* stepped an unconditional branch (ftrace nop equivalent).
|
||||
* Now we need to fixup things and pretend that a brasl r0,...
|
||||
* was executed instead.
|
||||
*/
|
||||
if (insn->disp == KPROBE_ON_FTRACE_CALL) {
|
||||
ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
|
||||
regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
|
||||
}
|
||||
}
|
||||
|
||||
if (fixup & FIXUP_PSW_NORMAL)
|
||||
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
|
||||
|
||||
@@ -476,8 +527,9 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||
|
||||
disable_singlestep(kcb, regs, ip);
|
||||
}
|
||||
NOKPROBE_SYMBOL(resume_execution);
|
||||
|
||||
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||
static int post_kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
struct kprobe *p = kprobe_running();
|
||||
@@ -504,8 +556,9 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||
|
||||
return 1;
|
||||
}
|
||||
NOKPROBE_SYMBOL(post_kprobe_handler);
|
||||
|
||||
static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
|
||||
static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
struct kprobe *p = kprobe_running();
|
||||
@@ -567,8 +620,9 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_trap_handler);
|
||||
|
||||
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -579,12 +633,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
|
||||
return ret;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_fault_handler);
|
||||
|
||||
/*
|
||||
* Wrapper routine to for handling exceptions.
|
||||
*/
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct die_args *args = (struct die_args *) data;
|
||||
struct pt_regs *regs = args->regs;
|
||||
@@ -616,8 +671,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
|
||||
return ret;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
|
||||
|
||||
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
@@ -635,13 +691,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
|
||||
return 1;
|
||||
}
|
||||
NOKPROBE_SYMBOL(setjmp_pre_handler);
|
||||
|
||||
void __kprobes jprobe_return(void)
|
||||
void jprobe_return(void)
|
||||
{
|
||||
asm volatile(".word 0x0002");
|
||||
}
|
||||
NOKPROBE_SYMBOL(jprobe_return);
|
||||
|
||||
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long stack;
|
||||
@@ -655,6 +713,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
NOKPROBE_SYMBOL(longjmp_break_handler);
|
||||
|
||||
static struct kprobe trampoline = {
|
||||
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
|
||||
@@ -666,7 +725,8 @@ int __init arch_init_kprobes(void)
|
||||
return register_kprobe(&trampoline);
|
||||
}
|
||||
|
||||
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
||||
int arch_trampoline_kprobe(struct kprobe *p)
|
||||
{
|
||||
return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_trampoline_kprobe);
|
||||
|
@@ -27,6 +27,7 @@ ENTRY(ftrace_caller)
|
||||
.globl ftrace_regs_caller
|
||||
.set ftrace_regs_caller,ftrace_caller
|
||||
lgr %r1,%r15
|
||||
aghi %r0,MCOUNT_RETURN_FIXUP
|
||||
aghi %r15,-STACK_FRAME_SIZE
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
|
||||
|
@@ -1383,7 +1383,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
|
||||
cpuhw->lsctl.ed = 1;
|
||||
|
||||
/* Set in_use flag and store event */
|
||||
event->hw.idx = 0; /* only one sampling event per CPU supported */
|
||||
cpuhw->event = event;
|
||||
cpuhw->flags |= PMU_F_IN_USE;
|
||||
|
||||
|
@@ -61,7 +61,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
return sf->gprs[8];
|
||||
}
|
||||
|
||||
extern void __kprobes kernel_thread_starter(void);
|
||||
extern void kernel_thread_starter(void);
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
@@ -153,6 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
save_fp_ctl(&p->thread.fp_regs.fpc);
|
||||
save_fp_regs(p->thread.fp_regs.fprs);
|
||||
p->thread.fp_regs.pad = 0;
|
||||
p->thread.vxrs = NULL;
|
||||
/* Set a new TLS ? */
|
||||
if (clone_flags & CLONE_SETTLS) {
|
||||
unsigned long tls = frame->childregs.gprs[6];
|
||||
|
@@ -248,14 +248,27 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
|
||||
*/
|
||||
tmp = 0;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
|
||||
/*
|
||||
* floating point regs. are stored in the thread structure
|
||||
} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
|
||||
/*
|
||||
* floating point control reg. is in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy->regs.fp_regs;
|
||||
tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
|
||||
if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
|
||||
tmp <<= BITS_PER_LONG - 32;
|
||||
tmp = child->thread.fp_regs.fpc;
|
||||
tmp <<= BITS_PER_LONG - 32;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
|
||||
/*
|
||||
* floating point regs. are either in child->thread.fp_regs
|
||||
* or the child->thread.vxrs array
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (child->thread.vxrs)
|
||||
tmp = *(addr_t *)
|
||||
((addr_t) child->thread.vxrs + 2*offset);
|
||||
else
|
||||
#endif
|
||||
tmp = *(addr_t *)
|
||||
((addr_t) &child->thread.fp_regs.fprs + offset);
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
|
||||
/*
|
||||
@@ -383,16 +396,29 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||
*/
|
||||
return 0;
|
||||
|
||||
} else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
|
||||
/*
|
||||
* floating point control reg. is in the thread structure
|
||||
*/
|
||||
if ((unsigned int) data != 0 ||
|
||||
test_fp_ctl(data >> (BITS_PER_LONG - 32)))
|
||||
return -EINVAL;
|
||||
child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32);
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
|
||||
/*
|
||||
* floating point regs. are stored in the thread structure
|
||||
* floating point regs. are either in child->thread.fp_regs
|
||||
* or the child->thread.vxrs array
|
||||
*/
|
||||
if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
|
||||
if ((unsigned int) data != 0 ||
|
||||
test_fp_ctl(data >> (BITS_PER_LONG - 32)))
|
||||
return -EINVAL;
|
||||
offset = addr - (addr_t) &dummy->regs.fp_regs;
|
||||
*(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
|
||||
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (child->thread.vxrs)
|
||||
*(addr_t *)((addr_t)
|
||||
child->thread.vxrs + 2*offset) = data;
|
||||
else
|
||||
#endif
|
||||
*(addr_t *)((addr_t)
|
||||
&child->thread.fp_regs.fprs + offset) = data;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
|
||||
/*
|
||||
@@ -611,12 +637,26 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
|
||||
*/
|
||||
tmp = 0;
|
||||
|
||||
} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
|
||||
/*
|
||||
* floating point control reg. is in the thread structure
|
||||
*/
|
||||
tmp = child->thread.fp_regs.fpc;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
|
||||
/*
|
||||
* floating point regs. are stored in the thread structure
|
||||
* floating point regs. are either in child->thread.fp_regs
|
||||
* or the child->thread.vxrs array
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy32->regs.fp_regs;
|
||||
tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
|
||||
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (child->thread.vxrs)
|
||||
tmp = *(__u32 *)
|
||||
((addr_t) child->thread.vxrs + 2*offset);
|
||||
else
|
||||
#endif
|
||||
tmp = *(__u32 *)
|
||||
((addr_t) &child->thread.fp_regs.fprs + offset);
|
||||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
|
||||
/*
|
||||
@@ -722,15 +762,28 @@ static int __poke_user_compat(struct task_struct *child,
|
||||
*/
|
||||
return 0;
|
||||
|
||||
} else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
|
||||
/*
|
||||
* floating point control reg. is in the thread structure
|
||||
*/
|
||||
if (test_fp_ctl(tmp))
|
||||
return -EINVAL;
|
||||
child->thread.fp_regs.fpc = data;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
|
||||
/*
|
||||
* floating point regs. are stored in the thread structure
|
||||
* floating point regs. are either in child->thread.fp_regs
|
||||
* or the child->thread.vxrs array
|
||||
*/
|
||||
if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
|
||||
test_fp_ctl(tmp))
|
||||
return -EINVAL;
|
||||
offset = addr - (addr_t) &dummy32->regs.fp_regs;
|
||||
*(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
|
||||
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (child->thread.vxrs)
|
||||
*(__u32 *)((addr_t)
|
||||
child->thread.vxrs + 2*offset) = tmp;
|
||||
else
|
||||
#endif
|
||||
*(__u32 *)((addr_t)
|
||||
&child->thread.fp_regs.fprs + offset) = tmp;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
|
||||
/*
|
||||
@@ -1038,12 +1091,6 @@ static int s390_tdb_set(struct task_struct *target,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int s390_vxrs_active(struct task_struct *target,
|
||||
const struct user_regset *regset)
|
||||
{
|
||||
return !!target->thread.vxrs;
|
||||
}
|
||||
|
||||
static int s390_vxrs_low_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
@@ -1052,6 +1099,8 @@ static int s390_vxrs_low_get(struct task_struct *target,
|
||||
__u64 vxrs[__NUM_VXRS_LOW];
|
||||
int i;
|
||||
|
||||
if (!MACHINE_HAS_VX)
|
||||
return -ENODEV;
|
||||
if (target->thread.vxrs) {
|
||||
if (target == current)
|
||||
save_vx_regs(target->thread.vxrs);
|
||||
@@ -1070,6 +1119,8 @@ static int s390_vxrs_low_set(struct task_struct *target,
|
||||
__u64 vxrs[__NUM_VXRS_LOW];
|
||||
int i, rc;
|
||||
|
||||
if (!MACHINE_HAS_VX)
|
||||
return -ENODEV;
|
||||
if (!target->thread.vxrs) {
|
||||
rc = alloc_vector_registers(target);
|
||||
if (rc)
|
||||
@@ -1095,6 +1146,8 @@ static int s390_vxrs_high_get(struct task_struct *target,
|
||||
{
|
||||
__vector128 vxrs[__NUM_VXRS_HIGH];
|
||||
|
||||
if (!MACHINE_HAS_VX)
|
||||
return -ENODEV;
|
||||
if (target->thread.vxrs) {
|
||||
if (target == current)
|
||||
save_vx_regs(target->thread.vxrs);
|
||||
@@ -1112,6 +1165,8 @@ static int s390_vxrs_high_set(struct task_struct *target,
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!MACHINE_HAS_VX)
|
||||
return -ENODEV;
|
||||
if (!target->thread.vxrs) {
|
||||
rc = alloc_vector_registers(target);
|
||||
if (rc)
|
||||
@@ -1196,7 +1251,6 @@ static const struct user_regset s390_regsets[] = {
|
||||
.n = __NUM_VXRS_LOW,
|
||||
.size = sizeof(__u64),
|
||||
.align = sizeof(__u64),
|
||||
.active = s390_vxrs_active,
|
||||
.get = s390_vxrs_low_get,
|
||||
.set = s390_vxrs_low_set,
|
||||
},
|
||||
@@ -1205,7 +1259,6 @@ static const struct user_regset s390_regsets[] = {
|
||||
.n = __NUM_VXRS_HIGH,
|
||||
.size = sizeof(__vector128),
|
||||
.align = sizeof(__vector128),
|
||||
.active = s390_vxrs_active,
|
||||
.get = s390_vxrs_high_get,
|
||||
.set = s390_vxrs_high_set,
|
||||
},
|
||||
@@ -1419,7 +1472,6 @@ static const struct user_regset s390_compat_regsets[] = {
|
||||
.n = __NUM_VXRS_LOW,
|
||||
.size = sizeof(__u64),
|
||||
.align = sizeof(__u64),
|
||||
.active = s390_vxrs_active,
|
||||
.get = s390_vxrs_low_get,
|
||||
.set = s390_vxrs_low_set,
|
||||
},
|
||||
@@ -1428,7 +1480,6 @@ static const struct user_regset s390_compat_regsets[] = {
|
||||
.n = __NUM_VXRS_HIGH,
|
||||
.size = sizeof(__vector128),
|
||||
.align = sizeof(__vector128),
|
||||
.active = s390_vxrs_active,
|
||||
.get = s390_vxrs_high_get,
|
||||
.set = s390_vxrs_high_set,
|
||||
},
|
||||
|
@@ -41,7 +41,6 @@
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/memory.h>
|
||||
@@ -356,7 +355,6 @@ static void __init setup_lowcore(void)
|
||||
lc->steal_timer = S390_lowcore.steal_timer;
|
||||
lc->last_update_timer = S390_lowcore.last_update_timer;
|
||||
lc->last_update_clock = S390_lowcore.last_update_clock;
|
||||
lc->ftrace_func = S390_lowcore.ftrace_func;
|
||||
|
||||
restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
|
||||
restart_stack += ASYNC_SIZE;
|
||||
|
@@ -371,7 +371,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
|
||||
restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
|
||||
} else {
|
||||
/* Signal frame without vector registers are short ! */
|
||||
__u16 __user *svc = (void *) frame + frame_size - 2;
|
||||
__u16 __user *svc = (void __user *) frame + frame_size - 2;
|
||||
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
|
||||
return -EFAULT;
|
||||
restorer = (unsigned long) svc | PSW_ADDR_AMODE;
|
||||
|
@@ -236,7 +236,6 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
|
||||
lc->percpu_offset = __per_cpu_offset[cpu];
|
||||
lc->kernel_asce = S390_lowcore.kernel_asce;
|
||||
lc->machine_flags = S390_lowcore.machine_flags;
|
||||
lc->ftrace_func = S390_lowcore.ftrace_func;
|
||||
lc->user_timer = lc->system_timer = lc->steal_timer = 0;
|
||||
__ctl_store(lc->cregs_save_area, 0, 15);
|
||||
save_access_regs((unsigned int *) lc->access_regs_save_area);
|
||||
|
@@ -360,3 +360,5 @@ SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
|
||||
SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
|
||||
SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
|
||||
SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
|
||||
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
|
||||
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
|
||||
|
@@ -61,10 +61,11 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
|
||||
/*
|
||||
* Scheduler clock - returns current time in nanosec units.
|
||||
*/
|
||||
unsigned long long notrace __kprobes sched_clock(void)
|
||||
unsigned long long notrace sched_clock(void)
|
||||
{
|
||||
return tod_to_ns(get_tod_clock_monotonic());
|
||||
}
|
||||
NOKPROBE_SYMBOL(sched_clock);
|
||||
|
||||
/*
|
||||
* Monotonic_clock - returns # of nanoseconds passed since time_init()
|
||||
|
@@ -49,7 +49,8 @@ static inline void report_user_fault(struct pt_regs *regs, int signr)
|
||||
return;
|
||||
if (!printk_ratelimit())
|
||||
return;
|
||||
printk("User process fault: interruption code 0x%X ", regs->int_code);
|
||||
printk("User process fault: interruption code %04x ilc:%d ",
|
||||
regs->int_code & 0xffff, regs->int_code >> 17);
|
||||
print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
|
||||
printk("\n");
|
||||
show_regs(regs);
|
||||
@@ -87,16 +88,16 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
|
||||
}
|
||||
}
|
||||
|
||||
static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code,
|
||||
char *str)
|
||||
static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
|
||||
{
|
||||
if (notify_die(DIE_TRAP, str, regs, 0,
|
||||
regs->int_code, si_signo) == NOTIFY_STOP)
|
||||
return;
|
||||
do_report_trap(regs, si_signo, si_code, str);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_trap);
|
||||
|
||||
void __kprobes do_per_trap(struct pt_regs *regs)
|
||||
void do_per_trap(struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t info;
|
||||
|
||||
@@ -111,6 +112,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
|
||||
(void __force __user *) current->thread.per_event.address;
|
||||
force_sig_info(SIGTRAP, &info, current);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_per_trap);
|
||||
|
||||
void default_trap_handler(struct pt_regs *regs)
|
||||
{
|
||||
@@ -151,8 +153,6 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
|
||||
"privileged operation")
|
||||
DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
|
||||
"special operation exception")
|
||||
DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
|
||||
"translation exception")
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
|
||||
@@ -179,7 +179,13 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
|
||||
do_trap(regs, SIGFPE, si_code, "floating point exception");
|
||||
}
|
||||
|
||||
void __kprobes illegal_op(struct pt_regs *regs)
|
||||
void translation_exception(struct pt_regs *regs)
|
||||
{
|
||||
/* May never happen. */
|
||||
die(regs, "Translation exception");
|
||||
}
|
||||
|
||||
void illegal_op(struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t info;
|
||||
__u8 opcode[6];
|
||||
@@ -252,7 +258,7 @@ void __kprobes illegal_op(struct pt_regs *regs)
|
||||
if (signal)
|
||||
do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
|
||||
}
|
||||
|
||||
NOKPROBE_SYMBOL(illegal_op);
|
||||
|
||||
#ifdef CONFIG_MATHEMU
|
||||
void specification_exception(struct pt_regs *regs)
|
||||
@@ -469,7 +475,7 @@ void space_switch_exception(struct pt_regs *regs)
|
||||
do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
|
||||
}
|
||||
|
||||
void __kprobes kernel_stack_overflow(struct pt_regs * regs)
|
||||
void kernel_stack_overflow(struct pt_regs *regs)
|
||||
{
|
||||
bust_spinlocks(1);
|
||||
printk("Kernel stack overflow.\n");
|
||||
@@ -477,6 +483,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs)
|
||||
bust_spinlocks(0);
|
||||
panic("Corrupt kernel stack, can't continue.");
|
||||
}
|
||||
NOKPROBE_SYMBOL(kernel_stack_overflow);
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
|
Reference in New Issue
Block a user