Merge branches 'sh/xstate', 'sh/hw-breakpoints' and 'sh/stable-updates'
Šī revīzija ir iekļauta:
@@ -2,7 +2,7 @@
|
||||
* SuperH process tracing
|
||||
*
|
||||
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
|
||||
* Copyright (C) 2002 - 2008 Paul Mundt
|
||||
* Copyright (C) 2002 - 2009 Paul Mundt
|
||||
*
|
||||
* Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
|
||||
*
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/regset.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
@@ -63,33 +64,64 @@ static inline int put_stack_long(struct task_struct *task, int offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ptrace_triggered(struct perf_event *bp, int nmi,
|
||||
struct perf_sample_data *data, struct pt_regs *regs)
|
||||
{
|
||||
struct perf_event_attr attr;
|
||||
|
||||
/*
|
||||
* Disable the breakpoint request here since ptrace has defined a
|
||||
* one-shot behaviour for breakpoint exceptions.
|
||||
*/
|
||||
attr = bp->attr;
|
||||
attr.disabled = true;
|
||||
modify_user_hw_breakpoint(bp, &attr);
|
||||
}
|
||||
|
||||
static int set_single_step(struct task_struct *tsk, unsigned long addr)
|
||||
{
|
||||
struct thread_struct *thread = &tsk->thread;
|
||||
struct perf_event *bp;
|
||||
struct perf_event_attr attr;
|
||||
|
||||
bp = thread->ptrace_bps[0];
|
||||
if (!bp) {
|
||||
hw_breakpoint_init(&attr);
|
||||
|
||||
attr.bp_addr = addr;
|
||||
attr.bp_len = HW_BREAKPOINT_LEN_2;
|
||||
attr.bp_type = HW_BREAKPOINT_R;
|
||||
|
||||
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
|
||||
if (IS_ERR(bp))
|
||||
return PTR_ERR(bp);
|
||||
|
||||
thread->ptrace_bps[0] = bp;
|
||||
} else {
|
||||
int err;
|
||||
|
||||
attr = bp->attr;
|
||||
attr.bp_addr = addr;
|
||||
err = modify_user_hw_breakpoint(bp, &attr);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void user_enable_single_step(struct task_struct *child)
|
||||
{
|
||||
/* Next scheduling will set up UBC */
|
||||
if (child->thread.ubc_pc == 0)
|
||||
ubc_usercnt += 1;
|
||||
|
||||
child->thread.ubc_pc = get_stack_long(child,
|
||||
offsetof(struct pt_regs, pc));
|
||||
unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
|
||||
|
||||
set_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
|
||||
set_single_step(child, pc);
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *child)
|
||||
{
|
||||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
|
||||
/*
|
||||
* Ensure the UBC is not programmed at the next context switch.
|
||||
*
|
||||
* Normally this is not needed but there are sequences such as
|
||||
* singlestep, signal delivery, and continue that leave the
|
||||
* ubc_pc non-zero leading to spurious SIGTRAPs.
|
||||
*/
|
||||
if (child->thread.ubc_pc != 0) {
|
||||
ubc_usercnt -= 1;
|
||||
child->thread.ubc_pc = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -163,10 +195,10 @@ int fpregs_get(struct task_struct *target,
|
||||
|
||||
if ((boot_cpu_data.flags & CPU_HAS_FPU))
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.hard, 0, -1);
|
||||
&target->thread.xstate->hardfpu, 0, -1);
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.soft, 0, -1);
|
||||
&target->thread.xstate->softfpu, 0, -1);
|
||||
}
|
||||
|
||||
static int fpregs_set(struct task_struct *target,
|
||||
@@ -184,10 +216,10 @@ static int fpregs_set(struct task_struct *target,
|
||||
|
||||
if ((boot_cpu_data.flags & CPU_HAS_FPU))
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.hard, 0, -1);
|
||||
&target->thread.xstate->hardfpu, 0, -1);
|
||||
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.soft, 0, -1);
|
||||
&target->thread.xstate->softfpu, 0, -1);
|
||||
}
|
||||
|
||||
static int fpregs_active(struct task_struct *target,
|
||||
@@ -333,7 +365,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
else
|
||||
tmp = 0;
|
||||
} else
|
||||
tmp = ((long *)&child->thread.fpu)
|
||||
tmp = ((long *)child->thread.xstate)
|
||||
[(addr - (long)&dummy->fpu) >> 2];
|
||||
} else if (addr == (long) &dummy->u_fpvalid)
|
||||
tmp = !!tsk_used_math(child);
|
||||
@@ -362,7 +394,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
else if (addr >= (long) &dummy->fpu &&
|
||||
addr < (long) &dummy->u_fpvalid) {
|
||||
set_stopped_child_used_math(child);
|
||||
((long *)&child->thread.fpu)
|
||||
((long *)child->thread.xstate)
|
||||
[(addr - (long)&dummy->fpu) >> 2] = data;
|
||||
ret = 0;
|
||||
} else if (addr == (long) &dummy->u_fpvalid) {
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user