Merge branches 'x86/signal' and 'x86/irq' into perfcounters/core
Merge these pending x86 tree changes into the perfcounters tree to avoid conflicts.
This commit is contained in:
@@ -242,21 +242,13 @@ config X86_FIND_SMP_CONFIG
|
|||||||
def_bool y
|
def_bool y
|
||||||
depends on X86_MPPARSE || X86_VOYAGER
|
depends on X86_MPPARSE || X86_VOYAGER
|
||||||
|
|
||||||
if ACPI
|
|
||||||
config X86_MPPARSE
|
config X86_MPPARSE
|
||||||
def_bool y
|
bool "Enable MPS table" if ACPI
|
||||||
bool "Enable MPS table"
|
default y
|
||||||
depends on X86_LOCAL_APIC
|
depends on X86_LOCAL_APIC
|
||||||
help
|
help
|
||||||
For old smp systems that do not have proper acpi support. Newer systems
|
For old smp systems that do not have proper acpi support. Newer systems
|
||||||
(esp with 64bit cpus) with acpi support, MADT and DSDT will override it
|
(esp with 64bit cpus) with acpi support, MADT and DSDT will override it
|
||||||
endif
|
|
||||||
|
|
||||||
if !ACPI
|
|
||||||
config X86_MPPARSE
|
|
||||||
def_bool y
|
|
||||||
depends on X86_LOCAL_APIC
|
|
||||||
endif
|
|
||||||
|
|
||||||
choice
|
choice
|
||||||
prompt "Subarchitecture Type"
|
prompt "Subarchitecture Type"
|
||||||
|
@@ -197,23 +197,28 @@ struct rt_sigframe
|
|||||||
/* fp state follows here */
|
/* fp state follows here */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define COPY(x) { \
|
#define COPY(x) { \
|
||||||
unsigned int reg; \
|
err |= __get_user(regs->x, &sc->x); \
|
||||||
err |= __get_user(reg, &sc->x); \
|
|
||||||
regs->x = reg; \
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define RELOAD_SEG(seg,mask) \
|
#define COPY_SEG_CPL3(seg) { \
|
||||||
{ unsigned int cur; \
|
unsigned short tmp; \
|
||||||
unsigned short pre; \
|
err |= __get_user(tmp, &sc->seg); \
|
||||||
err |= __get_user(pre, &sc->seg); \
|
regs->seg = tmp | 3; \
|
||||||
savesegment(seg, cur); \
|
}
|
||||||
pre |= mask; \
|
|
||||||
if (pre != cur) loadsegment(seg, pre); }
|
#define RELOAD_SEG(seg) { \
|
||||||
|
unsigned int cur, pre; \
|
||||||
|
err |= __get_user(pre, &sc->seg); \
|
||||||
|
savesegment(seg, cur); \
|
||||||
|
pre |= 3; \
|
||||||
|
if (pre != cur) \
|
||||||
|
loadsegment(seg, pre); \
|
||||||
|
}
|
||||||
|
|
||||||
static int ia32_restore_sigcontext(struct pt_regs *regs,
|
static int ia32_restore_sigcontext(struct pt_regs *regs,
|
||||||
struct sigcontext_ia32 __user *sc,
|
struct sigcontext_ia32 __user *sc,
|
||||||
unsigned int *peax)
|
unsigned int *pax)
|
||||||
{
|
{
|
||||||
unsigned int tmpflags, gs, oldgs, err = 0;
|
unsigned int tmpflags, gs, oldgs, err = 0;
|
||||||
void __user *buf;
|
void __user *buf;
|
||||||
@@ -240,18 +245,16 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
|
|||||||
if (gs != oldgs)
|
if (gs != oldgs)
|
||||||
load_gs_index(gs);
|
load_gs_index(gs);
|
||||||
|
|
||||||
RELOAD_SEG(fs, 3);
|
RELOAD_SEG(fs);
|
||||||
RELOAD_SEG(ds, 3);
|
RELOAD_SEG(ds);
|
||||||
RELOAD_SEG(es, 3);
|
RELOAD_SEG(es);
|
||||||
|
|
||||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||||
COPY(dx); COPY(cx); COPY(ip);
|
COPY(dx); COPY(cx); COPY(ip);
|
||||||
/* Don't touch extended registers */
|
/* Don't touch extended registers */
|
||||||
|
|
||||||
err |= __get_user(regs->cs, &sc->cs);
|
COPY_SEG_CPL3(cs);
|
||||||
regs->cs |= 3;
|
COPY_SEG_CPL3(ss);
|
||||||
err |= __get_user(regs->ss, &sc->ss);
|
|
||||||
regs->ss |= 3;
|
|
||||||
|
|
||||||
err |= __get_user(tmpflags, &sc->flags);
|
err |= __get_user(tmpflags, &sc->flags);
|
||||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||||
@@ -262,9 +265,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
|
|||||||
buf = compat_ptr(tmp);
|
buf = compat_ptr(tmp);
|
||||||
err |= restore_i387_xstate_ia32(buf);
|
err |= restore_i387_xstate_ia32(buf);
|
||||||
|
|
||||||
err |= __get_user(tmp, &sc->ax);
|
err |= __get_user(*pax, &sc->ax);
|
||||||
*peax = tmp;
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -359,20 +360,15 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
|
|||||||
err |= __put_user(regs->dx, &sc->dx);
|
err |= __put_user(regs->dx, &sc->dx);
|
||||||
err |= __put_user(regs->cx, &sc->cx);
|
err |= __put_user(regs->cx, &sc->cx);
|
||||||
err |= __put_user(regs->ax, &sc->ax);
|
err |= __put_user(regs->ax, &sc->ax);
|
||||||
err |= __put_user(regs->cs, &sc->cs);
|
|
||||||
err |= __put_user(regs->ss, &sc->ss);
|
|
||||||
err |= __put_user(current->thread.trap_no, &sc->trapno);
|
err |= __put_user(current->thread.trap_no, &sc->trapno);
|
||||||
err |= __put_user(current->thread.error_code, &sc->err);
|
err |= __put_user(current->thread.error_code, &sc->err);
|
||||||
err |= __put_user(regs->ip, &sc->ip);
|
err |= __put_user(regs->ip, &sc->ip);
|
||||||
|
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
|
||||||
err |= __put_user(regs->flags, &sc->flags);
|
err |= __put_user(regs->flags, &sc->flags);
|
||||||
err |= __put_user(regs->sp, &sc->sp_at_signal);
|
err |= __put_user(regs->sp, &sc->sp_at_signal);
|
||||||
|
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
|
||||||
|
|
||||||
tmp = save_i387_xstate_ia32(fpstate);
|
err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);
|
||||||
if (tmp < 0)
|
|
||||||
err = -EFAULT;
|
|
||||||
else
|
|
||||||
err |= __put_user(ptr_to_compat(tmp ? fpstate : NULL),
|
|
||||||
&sc->fpstate);
|
|
||||||
|
|
||||||
/* non-iBCS2 extensions.. */
|
/* non-iBCS2 extensions.. */
|
||||||
err |= __put_user(mask, &sc->oldmask);
|
err |= __put_user(mask, &sc->oldmask);
|
||||||
@@ -408,6 +404,8 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
|
|||||||
if (used_math()) {
|
if (used_math()) {
|
||||||
sp = sp - sig_xstate_ia32_size;
|
sp = sp - sig_xstate_ia32_size;
|
||||||
*fpstate = (struct _fpstate_ia32 *) sp;
|
*fpstate = (struct _fpstate_ia32 *) sp;
|
||||||
|
if (save_i387_xstate_ia32(*fpstate) < 0)
|
||||||
|
return (void __user *) -1L;
|
||||||
}
|
}
|
||||||
|
|
||||||
sp -= frame_size;
|
sp -= frame_size;
|
||||||
@@ -430,12 +428,10 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
|
|||||||
u16 poplmovl;
|
u16 poplmovl;
|
||||||
u32 val;
|
u32 val;
|
||||||
u16 int80;
|
u16 int80;
|
||||||
u16 pad;
|
|
||||||
} __attribute__((packed)) code = {
|
} __attribute__((packed)) code = {
|
||||||
0xb858, /* popl %eax ; movl $...,%eax */
|
0xb858, /* popl %eax ; movl $...,%eax */
|
||||||
__NR_ia32_sigreturn,
|
__NR_ia32_sigreturn,
|
||||||
0x80cd, /* int $0x80 */
|
0x80cd, /* int $0x80 */
|
||||||
0,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
|
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
|
||||||
@@ -511,8 +507,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||||||
u8 movl;
|
u8 movl;
|
||||||
u32 val;
|
u32 val;
|
||||||
u16 int80;
|
u16 int80;
|
||||||
u16 pad;
|
u8 pad;
|
||||||
u8 pad2;
|
|
||||||
} __attribute__((packed)) code = {
|
} __attribute__((packed)) code = {
|
||||||
0xb8,
|
0xb8,
|
||||||
__NR_ia32_rt_sigreturn,
|
__NR_ia32_rt_sigreturn,
|
||||||
@@ -572,11 +567,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||||||
regs->dx = (unsigned long) &frame->info;
|
regs->dx = (unsigned long) &frame->info;
|
||||||
regs->cx = (unsigned long) &frame->uc;
|
regs->cx = (unsigned long) &frame->uc;
|
||||||
|
|
||||||
/* Make -mregparm=3 work */
|
|
||||||
regs->ax = sig;
|
|
||||||
regs->dx = (unsigned long) &frame->info;
|
|
||||||
regs->cx = (unsigned long) &frame->uc;
|
|
||||||
|
|
||||||
loadsegment(ds, __USER32_DS);
|
loadsegment(ds, __USER32_DS);
|
||||||
loadsegment(es, __USER32_DS);
|
loadsegment(es, __USER32_DS);
|
||||||
|
|
||||||
|
@@ -168,7 +168,15 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
|
|||||||
*/
|
*/
|
||||||
static inline void change_bit(int nr, volatile unsigned long *addr)
|
static inline void change_bit(int nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
|
if (IS_IMMEDIATE(nr)) {
|
||||||
|
asm volatile(LOCK_PREFIX "xorb %1,%0"
|
||||||
|
: CONST_MASK_ADDR(nr, addr)
|
||||||
|
: "iq" ((u8)CONST_MASK(nr)));
|
||||||
|
} else {
|
||||||
|
asm volatile(LOCK_PREFIX "btc %1,%0"
|
||||||
|
: BITOP_ADDR(addr)
|
||||||
|
: "Ir" (nr));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@@ -4,26 +4,33 @@
|
|||||||
#include <asm/types.h>
|
#include <asm/types.h>
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
|
|
||||||
#ifdef __GNUC__
|
#define __LITTLE_ENDIAN
|
||||||
|
|
||||||
#ifdef __i386__
|
static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
|
||||||
|
|
||||||
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_BSWAP
|
#ifdef __i386__
|
||||||
asm("bswap %0" : "=r" (x) : "0" (x));
|
# ifdef CONFIG_X86_BSWAP
|
||||||
#else
|
asm("bswap %0" : "=r" (val) : "0" (val));
|
||||||
|
# else
|
||||||
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
|
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
|
||||||
"rorl $16,%0\n\t" /* swap words */
|
"rorl $16,%0\n\t" /* swap words */
|
||||||
"xchgb %b0,%h0" /* swap higher bytes */
|
"xchgb %b0,%h0" /* swap higher bytes */
|
||||||
: "=q" (x)
|
: "=q" (val)
|
||||||
: "0" (x));
|
: "0" (val));
|
||||||
#endif
|
# endif
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
|
#else /* __i386__ */
|
||||||
|
asm("bswapl %0"
|
||||||
|
: "=r" (val)
|
||||||
|
: "0" (val));
|
||||||
|
#endif
|
||||||
|
return val;
|
||||||
|
}
|
||||||
|
#define __arch_swab32 __arch_swab32
|
||||||
|
|
||||||
|
static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
|
||||||
{
|
{
|
||||||
|
#ifdef __i386__
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
__u32 a;
|
__u32 a;
|
||||||
@@ -32,50 +39,27 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
|
|||||||
__u64 u;
|
__u64 u;
|
||||||
} v;
|
} v;
|
||||||
v.u = val;
|
v.u = val;
|
||||||
#ifdef CONFIG_X86_BSWAP
|
# ifdef CONFIG_X86_BSWAP
|
||||||
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
|
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
|
||||||
: "=r" (v.s.a), "=r" (v.s.b)
|
: "=r" (v.s.a), "=r" (v.s.b)
|
||||||
: "0" (v.s.a), "1" (v.s.b));
|
: "0" (v.s.a), "1" (v.s.b));
|
||||||
#else
|
# else
|
||||||
v.s.a = ___arch__swab32(v.s.a);
|
v.s.a = __arch_swab32(v.s.a);
|
||||||
v.s.b = ___arch__swab32(v.s.b);
|
v.s.b = __arch_swab32(v.s.b);
|
||||||
asm("xchgl %0,%1"
|
asm("xchgl %0,%1"
|
||||||
: "=r" (v.s.a), "=r" (v.s.b)
|
: "=r" (v.s.a), "=r" (v.s.b)
|
||||||
: "0" (v.s.a), "1" (v.s.b));
|
: "0" (v.s.a), "1" (v.s.b));
|
||||||
#endif
|
# endif
|
||||||
return v.u;
|
return v.u;
|
||||||
}
|
|
||||||
|
|
||||||
#else /* __i386__ */
|
#else /* __i386__ */
|
||||||
|
|
||||||
static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
|
|
||||||
{
|
|
||||||
asm("bswapq %0"
|
asm("bswapq %0"
|
||||||
: "=r" (x)
|
: "=r" (val)
|
||||||
: "0" (x));
|
: "0" (val));
|
||||||
return x;
|
return val;
|
||||||
}
|
|
||||||
|
|
||||||
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
|
|
||||||
{
|
|
||||||
asm("bswapl %0"
|
|
||||||
: "=r" (x)
|
|
||||||
: "0" (x));
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
#define __arch_swab64 __arch_swab64
|
||||||
|
|
||||||
/* Do not define swab16. Gcc is smart enough to recognize "C" version and
|
#include <linux/byteorder.h>
|
||||||
convert it into rotation or exhange. */
|
|
||||||
|
|
||||||
#define __arch__swab64(x) ___arch__swab64(x)
|
|
||||||
#define __arch__swab32(x) ___arch__swab32(x)
|
|
||||||
|
|
||||||
#define __BYTEORDER_HAS_U64__
|
|
||||||
|
|
||||||
#endif /* __GNUC__ */
|
|
||||||
|
|
||||||
#include <linux/byteorder/little_endian.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_BYTEORDER_H */
|
#endif /* _ASM_X86_BYTEORDER_H */
|
||||||
|
@@ -6,56 +6,91 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Macros for dwarf2 CFI unwind table entries.
|
* Macros for dwarf2 CFI unwind table entries.
|
||||||
See "as.info" for details on these pseudo ops. Unfortunately
|
* See "as.info" for details on these pseudo ops. Unfortunately
|
||||||
they are only supported in very new binutils, so define them
|
* they are only supported in very new binutils, so define them
|
||||||
away for older version.
|
* away for older version.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_AS_CFI
|
#ifdef CONFIG_AS_CFI
|
||||||
|
|
||||||
#define CFI_STARTPROC .cfi_startproc
|
#define CFI_STARTPROC .cfi_startproc
|
||||||
#define CFI_ENDPROC .cfi_endproc
|
#define CFI_ENDPROC .cfi_endproc
|
||||||
#define CFI_DEF_CFA .cfi_def_cfa
|
#define CFI_DEF_CFA .cfi_def_cfa
|
||||||
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
|
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
|
||||||
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
|
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
|
||||||
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
|
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
|
||||||
#define CFI_OFFSET .cfi_offset
|
#define CFI_OFFSET .cfi_offset
|
||||||
#define CFI_REL_OFFSET .cfi_rel_offset
|
#define CFI_REL_OFFSET .cfi_rel_offset
|
||||||
#define CFI_REGISTER .cfi_register
|
#define CFI_REGISTER .cfi_register
|
||||||
#define CFI_RESTORE .cfi_restore
|
#define CFI_RESTORE .cfi_restore
|
||||||
#define CFI_REMEMBER_STATE .cfi_remember_state
|
#define CFI_REMEMBER_STATE .cfi_remember_state
|
||||||
#define CFI_RESTORE_STATE .cfi_restore_state
|
#define CFI_RESTORE_STATE .cfi_restore_state
|
||||||
#define CFI_UNDEFINED .cfi_undefined
|
#define CFI_UNDEFINED .cfi_undefined
|
||||||
|
|
||||||
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
|
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
|
||||||
#define CFI_SIGNAL_FRAME .cfi_signal_frame
|
#define CFI_SIGNAL_FRAME .cfi_signal_frame
|
||||||
#else
|
#else
|
||||||
#define CFI_SIGNAL_FRAME
|
#define CFI_SIGNAL_FRAME
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
/* Due to the structure of pre-exisiting code, don't use assembler line
|
/*
|
||||||
comment character # to ignore the arguments. Instead, use a dummy macro. */
|
* Due to the structure of pre-exisiting code, don't use assembler line
|
||||||
|
* comment character # to ignore the arguments. Instead, use a dummy macro.
|
||||||
|
*/
|
||||||
.macro cfi_ignore a=0, b=0, c=0, d=0
|
.macro cfi_ignore a=0, b=0, c=0, d=0
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#define CFI_STARTPROC cfi_ignore
|
#define CFI_STARTPROC cfi_ignore
|
||||||
#define CFI_ENDPROC cfi_ignore
|
#define CFI_ENDPROC cfi_ignore
|
||||||
#define CFI_DEF_CFA cfi_ignore
|
#define CFI_DEF_CFA cfi_ignore
|
||||||
#define CFI_DEF_CFA_REGISTER cfi_ignore
|
#define CFI_DEF_CFA_REGISTER cfi_ignore
|
||||||
#define CFI_DEF_CFA_OFFSET cfi_ignore
|
#define CFI_DEF_CFA_OFFSET cfi_ignore
|
||||||
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
|
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
|
||||||
#define CFI_OFFSET cfi_ignore
|
#define CFI_OFFSET cfi_ignore
|
||||||
#define CFI_REL_OFFSET cfi_ignore
|
#define CFI_REL_OFFSET cfi_ignore
|
||||||
#define CFI_REGISTER cfi_ignore
|
#define CFI_REGISTER cfi_ignore
|
||||||
#define CFI_RESTORE cfi_ignore
|
#define CFI_RESTORE cfi_ignore
|
||||||
#define CFI_REMEMBER_STATE cfi_ignore
|
#define CFI_REMEMBER_STATE cfi_ignore
|
||||||
#define CFI_RESTORE_STATE cfi_ignore
|
#define CFI_RESTORE_STATE cfi_ignore
|
||||||
#define CFI_UNDEFINED cfi_ignore
|
#define CFI_UNDEFINED cfi_ignore
|
||||||
#define CFI_SIGNAL_FRAME cfi_ignore
|
#define CFI_SIGNAL_FRAME cfi_ignore
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* An attempt to make CFI annotations more or less
|
||||||
|
* correct and shorter. It is implied that you know
|
||||||
|
* what you're doing if you use them.
|
||||||
|
*/
|
||||||
|
#ifdef __ASSEMBLY__
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
.macro pushq_cfi reg
|
||||||
|
pushq \reg
|
||||||
|
CFI_ADJUST_CFA_OFFSET 8
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro popq_cfi reg
|
||||||
|
popq \reg
|
||||||
|
CFI_ADJUST_CFA_OFFSET -8
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro movq_cfi reg offset=0
|
||||||
|
movq %\reg, \offset(%rsp)
|
||||||
|
CFI_REL_OFFSET \reg, \offset
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro movq_cfi_restore offset reg
|
||||||
|
movq \offset(%rsp), %\reg
|
||||||
|
CFI_RESTORE \reg
|
||||||
|
.endm
|
||||||
|
#else /*!CONFIG_X86_64*/
|
||||||
|
|
||||||
|
/* 32bit defenitions are missed yet */
|
||||||
|
|
||||||
|
#endif /*!CONFIG_X86_64*/
|
||||||
|
#endif /*__ASSEMBLY__*/
|
||||||
|
|
||||||
#endif /* _ASM_X86_DWARF2_H */
|
#endif /* _ASM_X86_DWARF2_H */
|
||||||
|
@@ -109,9 +109,7 @@ extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
|
||||||
extern void (*const interrupt[NR_VECTORS])(void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef int vector_irq_t[NR_VECTORS];
|
typedef int vector_irq_t[NR_VECTORS];
|
||||||
DECLARE_PER_CPU(vector_irq_t, vector_irq);
|
DECLARE_PER_CPU(vector_irq_t, vector_irq);
|
||||||
|
@@ -31,10 +31,6 @@ static inline int irq_canonicalize(int irq)
|
|||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_IRQBALANCE
|
|
||||||
extern int irqbalance_disable(char *str);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
extern void fixup_irqs(cpumask_t map);
|
extern void fixup_irqs(cpumask_t map);
|
||||||
|
@@ -9,6 +9,8 @@
|
|||||||
|
|
||||||
#include <asm/percpu.h>
|
#include <asm/percpu.h>
|
||||||
|
|
||||||
|
#define ARCH_HAS_OWN_IRQ_REGS
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
|
DECLARE_PER_CPU(struct pt_regs *, irq_regs);
|
||||||
|
|
||||||
static inline struct pt_regs *get_irq_regs(void)
|
static inline struct pt_regs *get_irq_regs(void)
|
||||||
|
@@ -57,5 +57,65 @@
|
|||||||
#define __ALIGN_STR ".align 16,0x90"
|
#define __ALIGN_STR ".align 16,0x90"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* to check ENTRY_X86/END_X86 and
|
||||||
|
* KPROBE_ENTRY_X86/KPROBE_END_X86
|
||||||
|
* unbalanced-missed-mixed appearance
|
||||||
|
*/
|
||||||
|
#define __set_entry_x86 .set ENTRY_X86_IN, 0
|
||||||
|
#define __unset_entry_x86 .set ENTRY_X86_IN, 1
|
||||||
|
#define __set_kprobe_x86 .set KPROBE_X86_IN, 0
|
||||||
|
#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1
|
||||||
|
|
||||||
|
#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
|
||||||
|
|
||||||
|
#define __check_entry_x86 \
|
||||||
|
.ifdef ENTRY_X86_IN; \
|
||||||
|
.ifeq ENTRY_X86_IN; \
|
||||||
|
__macro_err_x86; \
|
||||||
|
.abort; \
|
||||||
|
.endif; \
|
||||||
|
.endif
|
||||||
|
|
||||||
|
#define __check_kprobe_x86 \
|
||||||
|
.ifdef KPROBE_X86_IN; \
|
||||||
|
.ifeq KPROBE_X86_IN; \
|
||||||
|
__macro_err_x86; \
|
||||||
|
.abort; \
|
||||||
|
.endif; \
|
||||||
|
.endif
|
||||||
|
|
||||||
|
#define __check_entry_kprobe_x86 \
|
||||||
|
__check_entry_x86; \
|
||||||
|
__check_kprobe_x86
|
||||||
|
|
||||||
|
#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86
|
||||||
|
|
||||||
|
#define ENTRY_X86(name) \
|
||||||
|
__check_entry_kprobe_x86; \
|
||||||
|
__set_entry_x86; \
|
||||||
|
.globl name; \
|
||||||
|
__ALIGN; \
|
||||||
|
name:
|
||||||
|
|
||||||
|
#define END_X86(name) \
|
||||||
|
__unset_entry_x86; \
|
||||||
|
__check_entry_kprobe_x86; \
|
||||||
|
.size name, .-name
|
||||||
|
|
||||||
|
#define KPROBE_ENTRY_X86(name) \
|
||||||
|
__check_entry_kprobe_x86; \
|
||||||
|
__set_kprobe_x86; \
|
||||||
|
.pushsection .kprobes.text, "ax"; \
|
||||||
|
.globl name; \
|
||||||
|
__ALIGN; \
|
||||||
|
name:
|
||||||
|
|
||||||
|
#define KPROBE_END_X86(name) \
|
||||||
|
__unset_kprobe_x86; \
|
||||||
|
__check_entry_kprobe_x86; \
|
||||||
|
.size name, .-name; \
|
||||||
|
.popsection
|
||||||
|
|
||||||
#endif /* _ASM_X86_LINKAGE_H */
|
#endif /* _ASM_X86_LINKAGE_H */
|
||||||
|
|
||||||
|
@@ -33,7 +33,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
|
|||||||
struct old_sigaction __user *);
|
struct old_sigaction __user *);
|
||||||
asmlinkage int sys_sigaltstack(unsigned long);
|
asmlinkage int sys_sigaltstack(unsigned long);
|
||||||
asmlinkage unsigned long sys_sigreturn(unsigned long);
|
asmlinkage unsigned long sys_sigreturn(unsigned long);
|
||||||
asmlinkage int sys_rt_sigreturn(unsigned long);
|
asmlinkage int sys_rt_sigreturn(struct pt_regs);
|
||||||
|
|
||||||
/* kernel/ioport.c */
|
/* kernel/ioport.c */
|
||||||
asmlinkage long sys_iopl(unsigned long);
|
asmlinkage long sys_iopl(unsigned long);
|
||||||
|
@@ -34,8 +34,6 @@ static inline cycles_t get_cycles(void)
|
|||||||
|
|
||||||
static __always_inline cycles_t vget_cycles(void)
|
static __always_inline cycles_t vget_cycles(void)
|
||||||
{
|
{
|
||||||
cycles_t cycles;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We only do VDSOs on TSC capable CPUs, so this shouldnt
|
* We only do VDSOs on TSC capable CPUs, so this shouldnt
|
||||||
* access boot_cpu_data (which is not VDSO-safe):
|
* access boot_cpu_data (which is not VDSO-safe):
|
||||||
@@ -44,11 +42,7 @@ static __always_inline cycles_t vget_cycles(void)
|
|||||||
if (!cpu_has_tsc)
|
if (!cpu_has_tsc)
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
rdtsc_barrier();
|
return (cycles_t)__native_read_tsc();
|
||||||
cycles = (cycles_t)__native_read_tsc();
|
|
||||||
rdtsc_barrier();
|
|
||||||
|
|
||||||
return cycles;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void tsc_init(void);
|
extern void tsc_init(void);
|
||||||
|
@@ -12,6 +12,7 @@ CFLAGS_REMOVE_tsc.o = -pg
|
|||||||
CFLAGS_REMOVE_rtc.o = -pg
|
CFLAGS_REMOVE_rtc.o = -pg
|
||||||
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
|
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
|
||||||
CFLAGS_REMOVE_ftrace.o = -pg
|
CFLAGS_REMOVE_ftrace.o = -pg
|
||||||
|
CFLAGS_REMOVE_early_printk.o = -pg
|
||||||
endif
|
endif
|
||||||
|
|
||||||
#
|
#
|
||||||
@@ -23,7 +24,7 @@ CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
|
|||||||
CFLAGS_hpet.o := $(nostackp)
|
CFLAGS_hpet.o := $(nostackp)
|
||||||
CFLAGS_tsc.o := $(nostackp)
|
CFLAGS_tsc.o := $(nostackp)
|
||||||
|
|
||||||
obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o
|
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
|
||||||
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||||
obj-y += time_$(BITS).o ioport.o ldt.o
|
obj-y += time_$(BITS).o ioport.o ldt.o
|
||||||
obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
|
obj-y += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
|
||||||
|
@@ -619,28 +619,37 @@ END(syscall_badsys)
|
|||||||
27:;
|
27:;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Build the entry stubs and pointer table with
|
* Build the entry stubs and pointer table with some assembler magic.
|
||||||
* some assembler magic.
|
* We pack 7 stubs into a single 32-byte chunk, which will fit in a
|
||||||
|
* single cache line on all modern x86 implementations.
|
||||||
*/
|
*/
|
||||||
.section .rodata,"a"
|
.section .init.rodata,"a"
|
||||||
ENTRY(interrupt)
|
ENTRY(interrupt)
|
||||||
.text
|
.text
|
||||||
|
.p2align 5
|
||||||
|
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||||
ENTRY(irq_entries_start)
|
ENTRY(irq_entries_start)
|
||||||
RING0_INT_FRAME
|
RING0_INT_FRAME
|
||||||
vector=0
|
vector=FIRST_EXTERNAL_VECTOR
|
||||||
.rept NR_VECTORS
|
.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
|
||||||
ALIGN
|
.balign 32
|
||||||
.if vector
|
.rept 7
|
||||||
|
.if vector < NR_VECTORS
|
||||||
|
.if vector <> FIRST_EXTERNAL_VECTOR
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
CFI_ADJUST_CFA_OFFSET -4
|
||||||
.endif
|
.endif
|
||||||
1: pushl $~(vector)
|
1: pushl $(~vector+0x80) /* Note: always in signed byte range */
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
jmp common_interrupt
|
.if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
|
||||||
.previous
|
jmp 2f
|
||||||
|
.endif
|
||||||
|
.previous
|
||||||
.long 1b
|
.long 1b
|
||||||
.text
|
.text
|
||||||
vector=vector+1
|
vector=vector+1
|
||||||
|
.endif
|
||||||
|
.endr
|
||||||
|
2: jmp common_interrupt
|
||||||
.endr
|
.endr
|
||||||
END(irq_entries_start)
|
END(irq_entries_start)
|
||||||
|
|
||||||
@@ -652,8 +661,9 @@ END(interrupt)
|
|||||||
* the CPU automatically disables interrupts when executing an IRQ vector,
|
* the CPU automatically disables interrupts when executing an IRQ vector,
|
||||||
* so IRQ-flags tracing has to follow that:
|
* so IRQ-flags tracing has to follow that:
|
||||||
*/
|
*/
|
||||||
ALIGN
|
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||||
common_interrupt:
|
common_interrupt:
|
||||||
|
addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
|
||||||
SAVE_ALL
|
SAVE_ALL
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
movl %esp,%eax
|
movl %esp,%eax
|
||||||
@@ -678,65 +688,6 @@ ENDPROC(name)
|
|||||||
/* The include is where all of the SMP etc. interrupts come from */
|
/* The include is where all of the SMP etc. interrupts come from */
|
||||||
#include "entry_arch.h"
|
#include "entry_arch.h"
|
||||||
|
|
||||||
KPROBE_ENTRY(page_fault)
|
|
||||||
RING0_EC_FRAME
|
|
||||||
pushl $do_page_fault
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
ALIGN
|
|
||||||
error_code:
|
|
||||||
/* the function address is in %fs's slot on the stack */
|
|
||||||
pushl %es
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
/*CFI_REL_OFFSET es, 0*/
|
|
||||||
pushl %ds
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
/*CFI_REL_OFFSET ds, 0*/
|
|
||||||
pushl %eax
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET eax, 0
|
|
||||||
pushl %ebp
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET ebp, 0
|
|
||||||
pushl %edi
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET edi, 0
|
|
||||||
pushl %esi
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET esi, 0
|
|
||||||
pushl %edx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET edx, 0
|
|
||||||
pushl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET ecx, 0
|
|
||||||
pushl %ebx
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
CFI_REL_OFFSET ebx, 0
|
|
||||||
cld
|
|
||||||
pushl %fs
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
/*CFI_REL_OFFSET fs, 0*/
|
|
||||||
movl $(__KERNEL_PERCPU), %ecx
|
|
||||||
movl %ecx, %fs
|
|
||||||
UNWIND_ESPFIX_STACK
|
|
||||||
popl %ecx
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
/*CFI_REGISTER es, ecx*/
|
|
||||||
movl PT_FS(%esp), %edi # get the function address
|
|
||||||
movl PT_ORIG_EAX(%esp), %edx # get the error code
|
|
||||||
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
|
|
||||||
mov %ecx, PT_FS(%esp)
|
|
||||||
/*CFI_REL_OFFSET fs, ES*/
|
|
||||||
movl $(__USER_DS), %ecx
|
|
||||||
movl %ecx, %ds
|
|
||||||
movl %ecx, %es
|
|
||||||
TRACE_IRQS_OFF
|
|
||||||
movl %esp,%eax # pt_regs pointer
|
|
||||||
call *%edi
|
|
||||||
jmp ret_from_exception
|
|
||||||
CFI_ENDPROC
|
|
||||||
KPROBE_END(page_fault)
|
|
||||||
|
|
||||||
ENTRY(coprocessor_error)
|
ENTRY(coprocessor_error)
|
||||||
RING0_INT_FRAME
|
RING0_INT_FRAME
|
||||||
pushl $0
|
pushl $0
|
||||||
@@ -767,140 +718,6 @@ ENTRY(device_not_available)
|
|||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
END(device_not_available)
|
END(device_not_available)
|
||||||
|
|
||||||
/*
|
|
||||||
* Debug traps and NMI can happen at the one SYSENTER instruction
|
|
||||||
* that sets up the real kernel stack. Check here, since we can't
|
|
||||||
* allow the wrong stack to be used.
|
|
||||||
*
|
|
||||||
* "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
|
|
||||||
* already pushed 3 words if it hits on the sysenter instruction:
|
|
||||||
* eflags, cs and eip.
|
|
||||||
*
|
|
||||||
* We just load the right stack, and push the three (known) values
|
|
||||||
* by hand onto the new stack - while updating the return eip past
|
|
||||||
* the instruction that would have done it for sysenter.
|
|
||||||
*/
|
|
||||||
#define FIX_STACK(offset, ok, label) \
|
|
||||||
cmpw $__KERNEL_CS,4(%esp); \
|
|
||||||
jne ok; \
|
|
||||||
label: \
|
|
||||||
movl TSS_sysenter_sp0+offset(%esp),%esp; \
|
|
||||||
CFI_DEF_CFA esp, 0; \
|
|
||||||
CFI_UNDEFINED eip; \
|
|
||||||
pushfl; \
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4; \
|
|
||||||
pushl $__KERNEL_CS; \
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4; \
|
|
||||||
pushl $sysenter_past_esp; \
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4; \
|
|
||||||
CFI_REL_OFFSET eip, 0
|
|
||||||
|
|
||||||
KPROBE_ENTRY(debug)
|
|
||||||
RING0_INT_FRAME
|
|
||||||
cmpl $ia32_sysenter_target,(%esp)
|
|
||||||
jne debug_stack_correct
|
|
||||||
FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
|
|
||||||
debug_stack_correct:
|
|
||||||
pushl $-1 # mark this as an int
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
SAVE_ALL
|
|
||||||
TRACE_IRQS_OFF
|
|
||||||
xorl %edx,%edx # error code 0
|
|
||||||
movl %esp,%eax # pt_regs pointer
|
|
||||||
call do_debug
|
|
||||||
jmp ret_from_exception
|
|
||||||
CFI_ENDPROC
|
|
||||||
KPROBE_END(debug)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* NMI is doubly nasty. It can happen _while_ we're handling
|
|
||||||
* a debug fault, and the debug fault hasn't yet been able to
|
|
||||||
* clear up the stack. So we first check whether we got an
|
|
||||||
* NMI on the sysenter entry path, but after that we need to
|
|
||||||
* check whether we got an NMI on the debug path where the debug
|
|
||||||
* fault happened on the sysenter path.
|
|
||||||
*/
|
|
||||||
KPROBE_ENTRY(nmi)
|
|
||||||
RING0_INT_FRAME
|
|
||||||
pushl %eax
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
movl %ss, %eax
|
|
||||||
cmpw $__ESPFIX_SS, %ax
|
|
||||||
popl %eax
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
je nmi_espfix_stack
|
|
||||||
cmpl $ia32_sysenter_target,(%esp)
|
|
||||||
je nmi_stack_fixup
|
|
||||||
pushl %eax
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
movl %esp,%eax
|
|
||||||
/* Do not access memory above the end of our stack page,
|
|
||||||
* it might not exist.
|
|
||||||
*/
|
|
||||||
andl $(THREAD_SIZE-1),%eax
|
|
||||||
cmpl $(THREAD_SIZE-20),%eax
|
|
||||||
popl %eax
|
|
||||||
CFI_ADJUST_CFA_OFFSET -4
|
|
||||||
jae nmi_stack_correct
|
|
||||||
cmpl $ia32_sysenter_target,12(%esp)
|
|
||||||
je nmi_debug_stack_check
|
|
||||||
nmi_stack_correct:
|
|
||||||
/* We have a RING0_INT_FRAME here */
|
|
||||||
pushl %eax
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
SAVE_ALL
|
|
||||||
TRACE_IRQS_OFF
|
|
||||||
xorl %edx,%edx # zero error code
|
|
||||||
movl %esp,%eax # pt_regs pointer
|
|
||||||
call do_nmi
|
|
||||||
jmp restore_nocheck_notrace
|
|
||||||
CFI_ENDPROC
|
|
||||||
|
|
||||||
nmi_stack_fixup:
|
|
||||||
RING0_INT_FRAME
|
|
||||||
FIX_STACK(12,nmi_stack_correct, 1)
|
|
||||||
jmp nmi_stack_correct
|
|
||||||
|
|
||||||
nmi_debug_stack_check:
|
|
||||||
/* We have a RING0_INT_FRAME here */
|
|
||||||
cmpw $__KERNEL_CS,16(%esp)
|
|
||||||
jne nmi_stack_correct
|
|
||||||
cmpl $debug,(%esp)
|
|
||||||
jb nmi_stack_correct
|
|
||||||
cmpl $debug_esp_fix_insn,(%esp)
|
|
||||||
ja nmi_stack_correct
|
|
||||||
FIX_STACK(24,nmi_stack_correct, 1)
|
|
||||||
jmp nmi_stack_correct
|
|
||||||
|
|
||||||
nmi_espfix_stack:
|
|
||||||
/* We have a RING0_INT_FRAME here.
|
|
||||||
*
|
|
||||||
* create the pointer to lss back
|
|
||||||
*/
|
|
||||||
pushl %ss
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
pushl %esp
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
addw $4, (%esp)
|
|
||||||
/* copy the iret frame of 12 bytes */
|
|
||||||
.rept 3
|
|
||||||
pushl 16(%esp)
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
.endr
|
|
||||||
pushl %eax
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
SAVE_ALL
|
|
||||||
TRACE_IRQS_OFF
|
|
||||||
FIXUP_ESPFIX_STACK # %eax == %esp
|
|
||||||
xorl %edx,%edx # zero error code
|
|
||||||
call do_nmi
|
|
||||||
RESTORE_REGS
|
|
||||||
lss 12+4(%esp), %esp # back to espfix stack
|
|
||||||
CFI_ADJUST_CFA_OFFSET -24
|
|
||||||
jmp irq_return
|
|
||||||
CFI_ENDPROC
|
|
||||||
KPROBE_END(nmi)
|
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT
|
#ifdef CONFIG_PARAVIRT
|
||||||
ENTRY(native_iret)
|
ENTRY(native_iret)
|
||||||
iret
|
iret
|
||||||
@@ -916,19 +733,6 @@ ENTRY(native_irq_enable_sysexit)
|
|||||||
END(native_irq_enable_sysexit)
|
END(native_irq_enable_sysexit)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
KPROBE_ENTRY(int3)
|
|
||||||
RING0_INT_FRAME
|
|
||||||
pushl $-1 # mark this as an int
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
SAVE_ALL
|
|
||||||
TRACE_IRQS_OFF
|
|
||||||
xorl %edx,%edx # zero error code
|
|
||||||
movl %esp,%eax # pt_regs pointer
|
|
||||||
call do_int3
|
|
||||||
jmp ret_from_exception
|
|
||||||
CFI_ENDPROC
|
|
||||||
KPROBE_END(int3)
|
|
||||||
|
|
||||||
ENTRY(overflow)
|
ENTRY(overflow)
|
||||||
RING0_INT_FRAME
|
RING0_INT_FRAME
|
||||||
pushl $0
|
pushl $0
|
||||||
@@ -993,14 +797,6 @@ ENTRY(stack_segment)
|
|||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
END(stack_segment)
|
END(stack_segment)
|
||||||
|
|
||||||
KPROBE_ENTRY(general_protection)
|
|
||||||
RING0_EC_FRAME
|
|
||||||
pushl $do_general_protection
|
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
|
||||||
jmp error_code
|
|
||||||
CFI_ENDPROC
|
|
||||||
KPROBE_END(general_protection)
|
|
||||||
|
|
||||||
ENTRY(alignment_check)
|
ENTRY(alignment_check)
|
||||||
RING0_EC_FRAME
|
RING0_EC_FRAME
|
||||||
pushl $do_alignment_check
|
pushl $do_alignment_check
|
||||||
@@ -1051,6 +847,7 @@ ENTRY(kernel_thread_helper)
|
|||||||
push %eax
|
push %eax
|
||||||
CFI_ADJUST_CFA_OFFSET 4
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
call do_exit
|
call do_exit
|
||||||
|
ud2 # padding for call trace
|
||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
ENDPROC(kernel_thread_helper)
|
ENDPROC(kernel_thread_helper)
|
||||||
|
|
||||||
@@ -1210,3 +1007,227 @@ END(mcount)
|
|||||||
#include "syscall_table_32.S"
|
#include "syscall_table_32.S"
|
||||||
|
|
||||||
syscall_table_size=(.-sys_call_table)
|
syscall_table_size=(.-sys_call_table)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some functions should be protected against kprobes
|
||||||
|
*/
|
||||||
|
.pushsection .kprobes.text, "ax"
|
||||||
|
|
||||||
|
ENTRY(page_fault)
|
||||||
|
RING0_EC_FRAME
|
||||||
|
pushl $do_page_fault
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
ALIGN
|
||||||
|
error_code:
|
||||||
|
/* the function address is in %fs's slot on the stack */
|
||||||
|
pushl %es
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
/*CFI_REL_OFFSET es, 0*/
|
||||||
|
pushl %ds
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
/*CFI_REL_OFFSET ds, 0*/
|
||||||
|
pushl %eax
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
CFI_REL_OFFSET eax, 0
|
||||||
|
pushl %ebp
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
CFI_REL_OFFSET ebp, 0
|
||||||
|
pushl %edi
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
CFI_REL_OFFSET edi, 0
|
||||||
|
pushl %esi
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
CFI_REL_OFFSET esi, 0
|
||||||
|
pushl %edx
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
CFI_REL_OFFSET edx, 0
|
||||||
|
pushl %ecx
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
CFI_REL_OFFSET ecx, 0
|
||||||
|
pushl %ebx
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
CFI_REL_OFFSET ebx, 0
|
||||||
|
cld
|
||||||
|
pushl %fs
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
/*CFI_REL_OFFSET fs, 0*/
|
||||||
|
movl $(__KERNEL_PERCPU), %ecx
|
||||||
|
movl %ecx, %fs
|
||||||
|
UNWIND_ESPFIX_STACK
|
||||||
|
popl %ecx
|
||||||
|
CFI_ADJUST_CFA_OFFSET -4
|
||||||
|
/*CFI_REGISTER es, ecx*/
|
||||||
|
movl PT_FS(%esp), %edi # get the function address
|
||||||
|
movl PT_ORIG_EAX(%esp), %edx # get the error code
|
||||||
|
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
|
||||||
|
mov %ecx, PT_FS(%esp)
|
||||||
|
/*CFI_REL_OFFSET fs, ES*/
|
||||||
|
movl $(__USER_DS), %ecx
|
||||||
|
movl %ecx, %ds
|
||||||
|
movl %ecx, %es
|
||||||
|
TRACE_IRQS_OFF
|
||||||
|
movl %esp,%eax # pt_regs pointer
|
||||||
|
call *%edi
|
||||||
|
jmp ret_from_exception
|
||||||
|
CFI_ENDPROC
|
||||||
|
END(page_fault)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Debug traps and NMI can happen at the one SYSENTER instruction
|
||||||
|
* that sets up the real kernel stack. Check here, since we can't
|
||||||
|
* allow the wrong stack to be used.
|
||||||
|
*
|
||||||
|
* "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
|
||||||
|
* already pushed 3 words if it hits on the sysenter instruction:
|
||||||
|
* eflags, cs and eip.
|
||||||
|
*
|
||||||
|
* We just load the right stack, and push the three (known) values
|
||||||
|
* by hand onto the new stack - while updating the return eip past
|
||||||
|
* the instruction that would have done it for sysenter.
|
||||||
|
*/
|
||||||
|
#define FIX_STACK(offset, ok, label) \
|
||||||
|
cmpw $__KERNEL_CS,4(%esp); \
|
||||||
|
jne ok; \
|
||||||
|
label: \
|
||||||
|
movl TSS_sysenter_sp0+offset(%esp),%esp; \
|
||||||
|
CFI_DEF_CFA esp, 0; \
|
||||||
|
CFI_UNDEFINED eip; \
|
||||||
|
pushfl; \
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4; \
|
||||||
|
pushl $__KERNEL_CS; \
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4; \
|
||||||
|
pushl $sysenter_past_esp; \
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4; \
|
||||||
|
CFI_REL_OFFSET eip, 0
|
||||||
|
|
||||||
|
ENTRY(debug)
|
||||||
|
RING0_INT_FRAME
|
||||||
|
cmpl $ia32_sysenter_target,(%esp)
|
||||||
|
jne debug_stack_correct
|
||||||
|
FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
|
||||||
|
debug_stack_correct:
|
||||||
|
pushl $-1 # mark this as an int
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
SAVE_ALL
|
||||||
|
TRACE_IRQS_OFF
|
||||||
|
xorl %edx,%edx # error code 0
|
||||||
|
movl %esp,%eax # pt_regs pointer
|
||||||
|
call do_debug
|
||||||
|
jmp ret_from_exception
|
||||||
|
CFI_ENDPROC
|
||||||
|
END(debug)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* NMI is doubly nasty. It can happen _while_ we're handling
|
||||||
|
* a debug fault, and the debug fault hasn't yet been able to
|
||||||
|
* clear up the stack. So we first check whether we got an
|
||||||
|
* NMI on the sysenter entry path, but after that we need to
|
||||||
|
* check whether we got an NMI on the debug path where the debug
|
||||||
|
* fault happened on the sysenter path.
|
||||||
|
*/
|
||||||
|
ENTRY(nmi)
|
||||||
|
RING0_INT_FRAME
|
||||||
|
pushl %eax
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
movl %ss, %eax
|
||||||
|
cmpw $__ESPFIX_SS, %ax
|
||||||
|
popl %eax
|
||||||
|
CFI_ADJUST_CFA_OFFSET -4
|
||||||
|
je nmi_espfix_stack
|
||||||
|
cmpl $ia32_sysenter_target,(%esp)
|
||||||
|
je nmi_stack_fixup
|
||||||
|
pushl %eax
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
movl %esp,%eax
|
||||||
|
/* Do not access memory above the end of our stack page,
|
||||||
|
* it might not exist.
|
||||||
|
*/
|
||||||
|
andl $(THREAD_SIZE-1),%eax
|
||||||
|
cmpl $(THREAD_SIZE-20),%eax
|
||||||
|
popl %eax
|
||||||
|
CFI_ADJUST_CFA_OFFSET -4
|
||||||
|
jae nmi_stack_correct
|
||||||
|
cmpl $ia32_sysenter_target,12(%esp)
|
||||||
|
je nmi_debug_stack_check
|
||||||
|
nmi_stack_correct:
|
||||||
|
/* We have a RING0_INT_FRAME here */
|
||||||
|
pushl %eax
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
SAVE_ALL
|
||||||
|
TRACE_IRQS_OFF
|
||||||
|
xorl %edx,%edx # zero error code
|
||||||
|
movl %esp,%eax # pt_regs pointer
|
||||||
|
call do_nmi
|
||||||
|
jmp restore_nocheck_notrace
|
||||||
|
CFI_ENDPROC
|
||||||
|
|
||||||
|
nmi_stack_fixup:
|
||||||
|
RING0_INT_FRAME
|
||||||
|
FIX_STACK(12,nmi_stack_correct, 1)
|
||||||
|
jmp nmi_stack_correct
|
||||||
|
|
||||||
|
nmi_debug_stack_check:
|
||||||
|
/* We have a RING0_INT_FRAME here */
|
||||||
|
cmpw $__KERNEL_CS,16(%esp)
|
||||||
|
jne nmi_stack_correct
|
||||||
|
cmpl $debug,(%esp)
|
||||||
|
jb nmi_stack_correct
|
||||||
|
cmpl $debug_esp_fix_insn,(%esp)
|
||||||
|
ja nmi_stack_correct
|
||||||
|
FIX_STACK(24,nmi_stack_correct, 1)
|
||||||
|
jmp nmi_stack_correct
|
||||||
|
|
||||||
|
nmi_espfix_stack:
|
||||||
|
/* We have a RING0_INT_FRAME here.
|
||||||
|
*
|
||||||
|
* create the pointer to lss back
|
||||||
|
*/
|
||||||
|
pushl %ss
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
pushl %esp
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
addw $4, (%esp)
|
||||||
|
/* copy the iret frame of 12 bytes */
|
||||||
|
.rept 3
|
||||||
|
pushl 16(%esp)
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
.endr
|
||||||
|
pushl %eax
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
SAVE_ALL
|
||||||
|
TRACE_IRQS_OFF
|
||||||
|
FIXUP_ESPFIX_STACK # %eax == %esp
|
||||||
|
xorl %edx,%edx # zero error code
|
||||||
|
call do_nmi
|
||||||
|
RESTORE_REGS
|
||||||
|
lss 12+4(%esp), %esp # back to espfix stack
|
||||||
|
CFI_ADJUST_CFA_OFFSET -24
|
||||||
|
jmp irq_return
|
||||||
|
CFI_ENDPROC
|
||||||
|
END(nmi)
|
||||||
|
|
||||||
|
ENTRY(int3)
|
||||||
|
RING0_INT_FRAME
|
||||||
|
pushl $-1 # mark this as an int
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
SAVE_ALL
|
||||||
|
TRACE_IRQS_OFF
|
||||||
|
xorl %edx,%edx # zero error code
|
||||||
|
movl %esp,%eax # pt_regs pointer
|
||||||
|
call do_int3
|
||||||
|
jmp ret_from_exception
|
||||||
|
CFI_ENDPROC
|
||||||
|
END(int3)
|
||||||
|
|
||||||
|
ENTRY(general_protection)
|
||||||
|
RING0_EC_FRAME
|
||||||
|
pushl $do_general_protection
|
||||||
|
CFI_ADJUST_CFA_OFFSET 4
|
||||||
|
jmp error_code
|
||||||
|
CFI_ENDPROC
|
||||||
|
END(general_protection)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* End of kprobes section
|
||||||
|
*/
|
||||||
|
.popsection
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,6 @@
|
|||||||
#include <asm/idle.h>
|
#include <asm/idle.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
|
||||||
/*
|
/*
|
||||||
* Probabilistic stack overflow check:
|
* Probabilistic stack overflow check:
|
||||||
*
|
*
|
||||||
@@ -28,19 +27,18 @@
|
|||||||
*/
|
*/
|
||||||
static inline void stack_overflow_check(struct pt_regs *regs)
|
static inline void stack_overflow_check(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||||
u64 curbase = (u64)task_stack_page(current);
|
u64 curbase = (u64)task_stack_page(current);
|
||||||
static unsigned long warned = -60*HZ;
|
|
||||||
|
|
||||||
if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE &&
|
WARN_ONCE(regs->sp >= curbase &&
|
||||||
regs->sp < curbase + sizeof(struct thread_info) + 128 &&
|
regs->sp <= curbase + THREAD_SIZE &&
|
||||||
time_after(jiffies, warned + 60*HZ)) {
|
regs->sp < curbase + sizeof(struct thread_info) +
|
||||||
printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
|
sizeof(struct pt_regs) + 128,
|
||||||
current->comm, curbase, regs->sp);
|
|
||||||
show_stack(NULL,NULL);
|
"do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
|
||||||
warned = jiffies;
|
current->comm, curbase, regs->sp);
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* do_IRQ handles all normal device IRQ's (the special
|
* do_IRQ handles all normal device IRQ's (the special
|
||||||
@@ -60,9 +58,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
|
|||||||
irq_enter();
|
irq_enter();
|
||||||
irq = __get_cpu_var(vector_irq)[vector];
|
irq = __get_cpu_var(vector_irq)[vector];
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
|
||||||
stack_overflow_check(regs);
|
stack_overflow_check(regs);
|
||||||
#endif
|
|
||||||
|
|
||||||
desc = irq_to_desc(irq);
|
desc = irq_to_desc(irq);
|
||||||
if (likely(desc))
|
if (likely(desc))
|
||||||
|
@@ -129,7 +129,7 @@ void __init native_init_IRQ(void)
|
|||||||
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
|
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
|
||||||
/* SYSCALL_VECTOR was reserved in trap_init. */
|
/* SYSCALL_VECTOR was reserved in trap_init. */
|
||||||
if (i != SYSCALL_VECTOR)
|
if (i != SYSCALL_VECTOR)
|
||||||
set_intr_gate(i, interrupt[i]);
|
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@@ -23,41 +23,6 @@
|
|||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/i8259.h>
|
#include <asm/i8259.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* Common place to define all x86 IRQ vectors
|
|
||||||
*
|
|
||||||
* This builds up the IRQ handler stubs using some ugly macros in irq.h
|
|
||||||
*
|
|
||||||
* These macros create the low-level assembly IRQ routines that save
|
|
||||||
* register context and call do_IRQ(). do_IRQ() then does all the
|
|
||||||
* operations that are needed to keep the AT (or SMP IOAPIC)
|
|
||||||
* interrupt-controller happy.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define IRQ_NAME2(nr) nr##_interrupt(void)
|
|
||||||
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP has a few special interrupts for IPI messages
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define BUILD_IRQ(nr) \
|
|
||||||
asmlinkage void IRQ_NAME(nr); \
|
|
||||||
asm("\n.text\n.p2align\n" \
|
|
||||||
"IRQ" #nr "_interrupt:\n\t" \
|
|
||||||
"push $~(" #nr ") ; " \
|
|
||||||
"jmp common_interrupt\n" \
|
|
||||||
".previous");
|
|
||||||
|
|
||||||
#define BI(x,y) \
|
|
||||||
BUILD_IRQ(x##y)
|
|
||||||
|
|
||||||
#define BUILD_16_IRQS(x) \
|
|
||||||
BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
|
|
||||||
BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
|
|
||||||
BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
|
|
||||||
BI(x,c) BI(x,d) BI(x,e) BI(x,f)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
|
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
|
||||||
* (these are usually mapped to vectors 0x30-0x3f)
|
* (these are usually mapped to vectors 0x30-0x3f)
|
||||||
@@ -73,37 +38,6 @@
|
|||||||
*
|
*
|
||||||
* (these are usually mapped into the 0x30-0xff vector range)
|
* (these are usually mapped into the 0x30-0xff vector range)
|
||||||
*/
|
*/
|
||||||
BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
|
|
||||||
BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
|
|
||||||
BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
|
|
||||||
BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
|
|
||||||
|
|
||||||
#undef BUILD_16_IRQS
|
|
||||||
#undef BI
|
|
||||||
|
|
||||||
|
|
||||||
#define IRQ(x,y) \
|
|
||||||
IRQ##x##y##_interrupt
|
|
||||||
|
|
||||||
#define IRQLIST_16(x) \
|
|
||||||
IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
|
|
||||||
IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
|
|
||||||
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
|
|
||||||
IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
|
|
||||||
|
|
||||||
/* for the irq vectors */
|
|
||||||
static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
|
|
||||||
IRQLIST_16(0x2), IRQLIST_16(0x3),
|
|
||||||
IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
|
|
||||||
IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
|
|
||||||
IRQLIST_16(0xc), IRQLIST_16(0xd), IRQLIST_16(0xe), IRQLIST_16(0xf)
|
|
||||||
};
|
|
||||||
|
|
||||||
#undef IRQ
|
|
||||||
#undef IRQLIST_16
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IRQ2 is cascade interrupt to second interrupt controller
|
* IRQ2 is cascade interrupt to second interrupt controller
|
||||||
|
@@ -1,32 +1,37 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||||
|
* Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
|
||||||
*
|
*
|
||||||
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
|
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
|
||||||
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
|
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
|
||||||
|
* 2000-2002 x86-64 support by Andi Kleen
|
||||||
*/
|
*/
|
||||||
#include <linux/list.h>
|
|
||||||
|
|
||||||
#include <linux/personality.h>
|
|
||||||
#include <linux/binfmts.h>
|
|
||||||
#include <linux/suspend.h>
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/ptrace.h>
|
|
||||||
#include <linux/signal.h>
|
|
||||||
#include <linux/stddef.h>
|
|
||||||
#include <linux/unistd.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/tracehook.h>
|
|
||||||
#include <linux/elf.h>
|
|
||||||
#include <linux/smp.h>
|
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/signal.h>
|
||||||
|
#include <linux/errno.h>
|
||||||
|
#include <linux/wait.h>
|
||||||
|
#include <linux/ptrace.h>
|
||||||
|
#include <linux/tracehook.h>
|
||||||
|
#include <linux/unistd.h>
|
||||||
|
#include <linux/stddef.h>
|
||||||
|
#include <linux/personality.h>
|
||||||
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/ucontext.h>
|
#include <asm/ucontext.h>
|
||||||
#include <asm/uaccess.h>
|
|
||||||
#include <asm/i387.h>
|
#include <asm/i387.h>
|
||||||
#include <asm/vdso.h>
|
#include <asm/vdso.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
#include <asm/proto.h>
|
||||||
|
#include <asm/ia32_unistd.h>
|
||||||
|
#include <asm/mce.h>
|
||||||
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
#include <asm/syscall.h>
|
#include <asm/syscall.h>
|
||||||
#include <asm/syscalls.h>
|
#include <asm/syscalls.h>
|
||||||
|
|
||||||
@@ -45,74 +50,6 @@
|
|||||||
# define FIX_EFLAGS __FIX_EFLAGS
|
# define FIX_EFLAGS __FIX_EFLAGS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Atomically swap in the new signal mask, and wait for a signal.
|
|
||||||
*/
|
|
||||||
asmlinkage int
|
|
||||||
sys_sigsuspend(int history0, int history1, old_sigset_t mask)
|
|
||||||
{
|
|
||||||
mask &= _BLOCKABLE;
|
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
|
||||||
current->saved_sigmask = current->blocked;
|
|
||||||
siginitset(¤t->blocked, mask);
|
|
||||||
recalc_sigpending();
|
|
||||||
spin_unlock_irq(¤t->sighand->siglock);
|
|
||||||
|
|
||||||
current->state = TASK_INTERRUPTIBLE;
|
|
||||||
schedule();
|
|
||||||
set_restore_sigmask();
|
|
||||||
|
|
||||||
return -ERESTARTNOHAND;
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage int
|
|
||||||
sys_sigaction(int sig, const struct old_sigaction __user *act,
|
|
||||||
struct old_sigaction __user *oact)
|
|
||||||
{
|
|
||||||
struct k_sigaction new_ka, old_ka;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (act) {
|
|
||||||
old_sigset_t mask;
|
|
||||||
|
|
||||||
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
|
|
||||||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
|
|
||||||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
|
|
||||||
__get_user(mask, &act->sa_mask);
|
|
||||||
siginitset(&new_ka.sa.sa_mask, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
|
|
||||||
|
|
||||||
if (!ret && oact) {
|
|
||||||
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
|
|
||||||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
|
|
||||||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
|
|
||||||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage int sys_sigaltstack(unsigned long bx)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* This is needed to make gcc realize it doesn't own the
|
|
||||||
* "struct pt_regs"
|
|
||||||
*/
|
|
||||||
struct pt_regs *regs = (struct pt_regs *)&bx;
|
|
||||||
const stack_t __user *uss = (const stack_t __user *)bx;
|
|
||||||
stack_t __user *uoss = (stack_t __user *)regs->cx;
|
|
||||||
|
|
||||||
return do_sigaltstack(uss, uoss, regs->sp);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define COPY(x) { \
|
#define COPY(x) { \
|
||||||
err |= __get_user(regs->x, &sc->x); \
|
err |= __get_user(regs->x, &sc->x); \
|
||||||
}
|
}
|
||||||
@@ -123,7 +60,7 @@ asmlinkage int sys_sigaltstack(unsigned long bx)
|
|||||||
regs->seg = tmp; \
|
regs->seg = tmp; \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define COPY_SEG_STRICT(seg) { \
|
#define COPY_SEG_CPL3(seg) { \
|
||||||
unsigned short tmp; \
|
unsigned short tmp; \
|
||||||
err |= __get_user(tmp, &sc->seg); \
|
err |= __get_user(tmp, &sc->seg); \
|
||||||
regs->seg = tmp | 3; \
|
regs->seg = tmp | 3; \
|
||||||
@@ -135,9 +72,6 @@ asmlinkage int sys_sigaltstack(unsigned long bx)
|
|||||||
loadsegment(seg, tmp); \
|
loadsegment(seg, tmp); \
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Do a signal return; undo the signal stack.
|
|
||||||
*/
|
|
||||||
static int
|
static int
|
||||||
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
||||||
unsigned long *pax)
|
unsigned long *pax)
|
||||||
@@ -149,14 +83,36 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|||||||
/* Always make any pending restarted system calls return -EINTR */
|
/* Always make any pending restarted system calls return -EINTR */
|
||||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
GET_SEG(gs);
|
GET_SEG(gs);
|
||||||
COPY_SEG(fs);
|
COPY_SEG(fs);
|
||||||
COPY_SEG(es);
|
COPY_SEG(es);
|
||||||
COPY_SEG(ds);
|
COPY_SEG(ds);
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
||||||
COPY(dx); COPY(cx); COPY(ip);
|
COPY(dx); COPY(cx); COPY(ip);
|
||||||
COPY_SEG_STRICT(cs);
|
|
||||||
COPY_SEG_STRICT(ss);
|
#ifdef CONFIG_X86_64
|
||||||
|
COPY(r8);
|
||||||
|
COPY(r9);
|
||||||
|
COPY(r10);
|
||||||
|
COPY(r11);
|
||||||
|
COPY(r12);
|
||||||
|
COPY(r13);
|
||||||
|
COPY(r14);
|
||||||
|
COPY(r15);
|
||||||
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
COPY_SEG_CPL3(cs);
|
||||||
|
COPY_SEG_CPL3(ss);
|
||||||
|
#else /* !CONFIG_X86_32 */
|
||||||
|
/* Kernel saves and restores only the CS segment register on signals,
|
||||||
|
* which is the bare minimum needed to allow mixed 32/64-bit code.
|
||||||
|
* App's signal handler can save/restore other segments if needed. */
|
||||||
|
COPY_SEG_CPL3(cs);
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
err |= __get_user(tmpflags, &sc->flags);
|
err |= __get_user(tmpflags, &sc->flags);
|
||||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
||||||
@@ -169,102 +125,24 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage unsigned long sys_sigreturn(unsigned long __unused)
|
|
||||||
{
|
|
||||||
struct sigframe __user *frame;
|
|
||||||
struct pt_regs *regs;
|
|
||||||
unsigned long ax;
|
|
||||||
sigset_t set;
|
|
||||||
|
|
||||||
regs = (struct pt_regs *) &__unused;
|
|
||||||
frame = (struct sigframe __user *)(regs->sp - 8);
|
|
||||||
|
|
||||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
|
||||||
goto badframe;
|
|
||||||
if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1
|
|
||||||
&& __copy_from_user(&set.sig[1], &frame->extramask,
|
|
||||||
sizeof(frame->extramask))))
|
|
||||||
goto badframe;
|
|
||||||
|
|
||||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
|
||||||
current->blocked = set;
|
|
||||||
recalc_sigpending();
|
|
||||||
spin_unlock_irq(¤t->sighand->siglock);
|
|
||||||
|
|
||||||
if (restore_sigcontext(regs, &frame->sc, &ax))
|
|
||||||
goto badframe;
|
|
||||||
return ax;
|
|
||||||
|
|
||||||
badframe:
|
|
||||||
if (show_unhandled_signals && printk_ratelimit()) {
|
|
||||||
printk("%s%s[%d] bad frame in sigreturn frame:"
|
|
||||||
"%p ip:%lx sp:%lx oeax:%lx",
|
|
||||||
task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
|
|
||||||
current->comm, task_pid_nr(current), frame, regs->ip,
|
|
||||||
regs->sp, regs->orig_ax);
|
|
||||||
print_vma_addr(" in ", regs->ip);
|
|
||||||
printk(KERN_CONT "\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
force_sig(SIGSEGV, current);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static long do_rt_sigreturn(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct rt_sigframe __user *frame;
|
|
||||||
unsigned long ax;
|
|
||||||
sigset_t set;
|
|
||||||
|
|
||||||
frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
|
|
||||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
|
||||||
goto badframe;
|
|
||||||
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
|
||||||
goto badframe;
|
|
||||||
|
|
||||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
|
||||||
current->blocked = set;
|
|
||||||
recalc_sigpending();
|
|
||||||
spin_unlock_irq(¤t->sighand->siglock);
|
|
||||||
|
|
||||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
|
|
||||||
goto badframe;
|
|
||||||
|
|
||||||
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
|
|
||||||
goto badframe;
|
|
||||||
|
|
||||||
return ax;
|
|
||||||
|
|
||||||
badframe:
|
|
||||||
signal_fault(regs, frame, "rt_sigreturn");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage int sys_rt_sigreturn(unsigned long __unused)
|
|
||||||
{
|
|
||||||
struct pt_regs *regs = (struct pt_regs *)&__unused;
|
|
||||||
|
|
||||||
return do_rt_sigreturn(regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set up a signal frame.
|
|
||||||
*/
|
|
||||||
static int
|
static int
|
||||||
setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
|
setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
|
||||||
struct pt_regs *regs, unsigned long mask)
|
struct pt_regs *regs, unsigned long mask)
|
||||||
{
|
{
|
||||||
int tmp, err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
{
|
||||||
|
unsigned int tmp;
|
||||||
|
|
||||||
|
savesegment(gs, tmp);
|
||||||
|
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
|
||||||
|
}
|
||||||
err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
|
err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
|
||||||
savesegment(gs, tmp);
|
|
||||||
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
|
|
||||||
|
|
||||||
err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
|
err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
|
||||||
err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
|
err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
err |= __put_user(regs->di, &sc->di);
|
err |= __put_user(regs->di, &sc->di);
|
||||||
err |= __put_user(regs->si, &sc->si);
|
err |= __put_user(regs->si, &sc->si);
|
||||||
err |= __put_user(regs->bp, &sc->bp);
|
err |= __put_user(regs->bp, &sc->bp);
|
||||||
@@ -273,19 +151,33 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
|
|||||||
err |= __put_user(regs->dx, &sc->dx);
|
err |= __put_user(regs->dx, &sc->dx);
|
||||||
err |= __put_user(regs->cx, &sc->cx);
|
err |= __put_user(regs->cx, &sc->cx);
|
||||||
err |= __put_user(regs->ax, &sc->ax);
|
err |= __put_user(regs->ax, &sc->ax);
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
err |= __put_user(regs->r8, &sc->r8);
|
||||||
|
err |= __put_user(regs->r9, &sc->r9);
|
||||||
|
err |= __put_user(regs->r10, &sc->r10);
|
||||||
|
err |= __put_user(regs->r11, &sc->r11);
|
||||||
|
err |= __put_user(regs->r12, &sc->r12);
|
||||||
|
err |= __put_user(regs->r13, &sc->r13);
|
||||||
|
err |= __put_user(regs->r14, &sc->r14);
|
||||||
|
err |= __put_user(regs->r15, &sc->r15);
|
||||||
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
err |= __put_user(current->thread.trap_no, &sc->trapno);
|
err |= __put_user(current->thread.trap_no, &sc->trapno);
|
||||||
err |= __put_user(current->thread.error_code, &sc->err);
|
err |= __put_user(current->thread.error_code, &sc->err);
|
||||||
err |= __put_user(regs->ip, &sc->ip);
|
err |= __put_user(regs->ip, &sc->ip);
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
|
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
|
||||||
err |= __put_user(regs->flags, &sc->flags);
|
err |= __put_user(regs->flags, &sc->flags);
|
||||||
err |= __put_user(regs->sp, &sc->sp_at_signal);
|
err |= __put_user(regs->sp, &sc->sp_at_signal);
|
||||||
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
|
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
|
||||||
|
#else /* !CONFIG_X86_32 */
|
||||||
|
err |= __put_user(regs->flags, &sc->flags);
|
||||||
|
err |= __put_user(regs->cs, &sc->cs);
|
||||||
|
err |= __put_user(0, &sc->gs);
|
||||||
|
err |= __put_user(0, &sc->fs);
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
tmp = save_i387_xstate(fpstate);
|
err |= __put_user(fpstate, &sc->fpstate);
|
||||||
if (tmp < 0)
|
|
||||||
err = 1;
|
|
||||||
else
|
|
||||||
err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
|
|
||||||
|
|
||||||
/* non-iBCS2 extensions.. */
|
/* non-iBCS2 extensions.. */
|
||||||
err |= __put_user(mask, &sc->oldmask);
|
err |= __put_user(mask, &sc->oldmask);
|
||||||
@@ -294,6 +186,32 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set up a signal frame.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
static const struct {
|
||||||
|
u16 poplmovl;
|
||||||
|
u32 val;
|
||||||
|
u16 int80;
|
||||||
|
} __attribute__((packed)) retcode = {
|
||||||
|
0xb858, /* popl %eax; movl $..., %eax */
|
||||||
|
__NR_sigreturn,
|
||||||
|
0x80cd, /* int $0x80 */
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct {
|
||||||
|
u8 movl;
|
||||||
|
u32 val;
|
||||||
|
u16 int80;
|
||||||
|
u8 pad;
|
||||||
|
} __attribute__((packed)) rt_retcode = {
|
||||||
|
0xb8, /* movl $..., %eax */
|
||||||
|
__NR_rt_sigreturn,
|
||||||
|
0x80cd, /* int $0x80 */
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine which stack to use..
|
* Determine which stack to use..
|
||||||
*/
|
*/
|
||||||
@@ -328,6 +246,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
|
|||||||
if (used_math()) {
|
if (used_math()) {
|
||||||
sp = sp - sig_xstate_size;
|
sp = sp - sig_xstate_size;
|
||||||
*fpstate = (struct _fpstate *) sp;
|
*fpstate = (struct _fpstate *) sp;
|
||||||
|
if (save_i387_xstate(*fpstate) < 0)
|
||||||
|
return (void __user *)-1L;
|
||||||
}
|
}
|
||||||
|
|
||||||
sp -= frame_size;
|
sp -= frame_size;
|
||||||
@@ -383,9 +303,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
|
|||||||
* reasons and because gdb uses it as a signature to notice
|
* reasons and because gdb uses it as a signature to notice
|
||||||
* signal handler stack frames.
|
* signal handler stack frames.
|
||||||
*/
|
*/
|
||||||
err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
|
err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
|
||||||
err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
|
|
||||||
err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
|
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
@@ -454,9 +372,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||||||
* reasons and because gdb uses it as a signature to notice
|
* reasons and because gdb uses it as a signature to notice
|
||||||
* signal handler stack frames.
|
* signal handler stack frames.
|
||||||
*/
|
*/
|
||||||
err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
|
err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
|
||||||
err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
|
|
||||||
err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
|
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
@@ -475,23 +391,298 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#else /* !CONFIG_X86_32 */
|
||||||
|
/*
|
||||||
|
* Determine which stack to use..
|
||||||
|
*/
|
||||||
|
static void __user *
|
||||||
|
get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size)
|
||||||
|
{
|
||||||
|
/* Default to using normal stack - redzone*/
|
||||||
|
sp -= 128;
|
||||||
|
|
||||||
|
/* This is the X/Open sanctioned signal stack switching. */
|
||||||
|
if (ka->sa.sa_flags & SA_ONSTACK) {
|
||||||
|
if (sas_ss_flags(sp) == 0)
|
||||||
|
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (void __user *)round_down(sp - size, 64);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||||
|
sigset_t *set, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct rt_sigframe __user *frame;
|
||||||
|
void __user *fp = NULL;
|
||||||
|
int err = 0;
|
||||||
|
struct task_struct *me = current;
|
||||||
|
|
||||||
|
if (used_math()) {
|
||||||
|
fp = get_stack(ka, regs->sp, sig_xstate_size);
|
||||||
|
frame = (void __user *)round_down(
|
||||||
|
(unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
|
||||||
|
|
||||||
|
if (save_i387_xstate(fp) < 0)
|
||||||
|
return -EFAULT;
|
||||||
|
} else
|
||||||
|
frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8;
|
||||||
|
|
||||||
|
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (ka->sa.sa_flags & SA_SIGINFO) {
|
||||||
|
if (copy_siginfo_to_user(&frame->info, info))
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Create the ucontext. */
|
||||||
|
if (cpu_has_xsave)
|
||||||
|
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
||||||
|
else
|
||||||
|
err |= __put_user(0, &frame->uc.uc_flags);
|
||||||
|
err |= __put_user(0, &frame->uc.uc_link);
|
||||||
|
err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||||
|
err |= __put_user(sas_ss_flags(regs->sp),
|
||||||
|
&frame->uc.uc_stack.ss_flags);
|
||||||
|
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||||
|
err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
|
||||||
|
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||||
|
|
||||||
|
/* Set up to return from userspace. If provided, use a stub
|
||||||
|
already in userspace. */
|
||||||
|
/* x86-64 should always use SA_RESTORER. */
|
||||||
|
if (ka->sa.sa_flags & SA_RESTORER) {
|
||||||
|
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
|
||||||
|
} else {
|
||||||
|
/* could use a vstub here */
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
/* Set up registers for signal handler */
|
||||||
|
regs->di = sig;
|
||||||
|
/* In case the signal handler was declared without prototypes */
|
||||||
|
regs->ax = 0;
|
||||||
|
|
||||||
|
/* This also works for non SA_SIGINFO handlers because they expect the
|
||||||
|
next argument after the signal number on the stack. */
|
||||||
|
regs->si = (unsigned long)&frame->info;
|
||||||
|
regs->dx = (unsigned long)&frame->uc;
|
||||||
|
regs->ip = (unsigned long) ka->sa.sa_handler;
|
||||||
|
|
||||||
|
regs->sp = (unsigned long)frame;
|
||||||
|
|
||||||
|
/* Set up the CS register to run signal handlers in 64-bit mode,
|
||||||
|
even if the handler happens to be interrupting 32-bit code. */
|
||||||
|
regs->cs = __USER_CS;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
/*
|
||||||
|
* Atomically swap in the new signal mask, and wait for a signal.
|
||||||
|
*/
|
||||||
|
asmlinkage int
|
||||||
|
sys_sigsuspend(int history0, int history1, old_sigset_t mask)
|
||||||
|
{
|
||||||
|
mask &= _BLOCKABLE;
|
||||||
|
spin_lock_irq(¤t->sighand->siglock);
|
||||||
|
current->saved_sigmask = current->blocked;
|
||||||
|
siginitset(¤t->blocked, mask);
|
||||||
|
recalc_sigpending();
|
||||||
|
spin_unlock_irq(¤t->sighand->siglock);
|
||||||
|
|
||||||
|
current->state = TASK_INTERRUPTIBLE;
|
||||||
|
schedule();
|
||||||
|
set_restore_sigmask();
|
||||||
|
|
||||||
|
return -ERESTARTNOHAND;
|
||||||
|
}
|
||||||
|
|
||||||
|
asmlinkage int
|
||||||
|
sys_sigaction(int sig, const struct old_sigaction __user *act,
|
||||||
|
struct old_sigaction __user *oact)
|
||||||
|
{
|
||||||
|
struct k_sigaction new_ka, old_ka;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (act) {
|
||||||
|
old_sigset_t mask;
|
||||||
|
|
||||||
|
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
|
||||||
|
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
|
||||||
|
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
|
||||||
|
__get_user(mask, &act->sa_mask);
|
||||||
|
siginitset(&new_ka.sa.sa_mask, mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
|
||||||
|
|
||||||
|
if (!ret && oact) {
|
||||||
|
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
|
||||||
|
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
|
||||||
|
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
|
||||||
|
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
asmlinkage int sys_sigaltstack(unsigned long bx)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* This is needed to make gcc realize it doesn't own the
|
||||||
|
* "struct pt_regs"
|
||||||
|
*/
|
||||||
|
struct pt_regs *regs = (struct pt_regs *)&bx;
|
||||||
|
const stack_t __user *uss = (const stack_t __user *)bx;
|
||||||
|
stack_t __user *uoss = (stack_t __user *)regs->cx;
|
||||||
|
|
||||||
|
return do_sigaltstack(uss, uoss, regs->sp);
|
||||||
|
}
|
||||||
|
#else /* !CONFIG_X86_32 */
|
||||||
|
asmlinkage long
|
||||||
|
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
||||||
|
struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
return do_sigaltstack(uss, uoss, regs->sp);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do a signal return; undo the signal stack.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
asmlinkage unsigned long sys_sigreturn(unsigned long __unused)
|
||||||
|
{
|
||||||
|
struct sigframe __user *frame;
|
||||||
|
struct pt_regs *regs;
|
||||||
|
unsigned long ax;
|
||||||
|
sigset_t set;
|
||||||
|
|
||||||
|
regs = (struct pt_regs *) &__unused;
|
||||||
|
frame = (struct sigframe __user *)(regs->sp - 8);
|
||||||
|
|
||||||
|
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||||
|
goto badframe;
|
||||||
|
if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1
|
||||||
|
&& __copy_from_user(&set.sig[1], &frame->extramask,
|
||||||
|
sizeof(frame->extramask))))
|
||||||
|
goto badframe;
|
||||||
|
|
||||||
|
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||||
|
spin_lock_irq(¤t->sighand->siglock);
|
||||||
|
current->blocked = set;
|
||||||
|
recalc_sigpending();
|
||||||
|
spin_unlock_irq(¤t->sighand->siglock);
|
||||||
|
|
||||||
|
if (restore_sigcontext(regs, &frame->sc, &ax))
|
||||||
|
goto badframe;
|
||||||
|
return ax;
|
||||||
|
|
||||||
|
badframe:
|
||||||
|
if (show_unhandled_signals && printk_ratelimit()) {
|
||||||
|
printk("%s%s[%d] bad frame in sigreturn frame:"
|
||||||
|
"%p ip:%lx sp:%lx oeax:%lx",
|
||||||
|
task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
|
||||||
|
current->comm, task_pid_nr(current), frame, regs->ip,
|
||||||
|
regs->sp, regs->orig_ax);
|
||||||
|
print_vma_addr(" in ", regs->ip);
|
||||||
|
printk(KERN_CONT "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
force_sig(SIGSEGV, current);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
|
static long do_rt_sigreturn(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
struct rt_sigframe __user *frame;
|
||||||
|
unsigned long ax;
|
||||||
|
sigset_t set;
|
||||||
|
|
||||||
|
frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
|
||||||
|
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||||
|
goto badframe;
|
||||||
|
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
||||||
|
goto badframe;
|
||||||
|
|
||||||
|
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||||
|
spin_lock_irq(¤t->sighand->siglock);
|
||||||
|
current->blocked = set;
|
||||||
|
recalc_sigpending();
|
||||||
|
spin_unlock_irq(¤t->sighand->siglock);
|
||||||
|
|
||||||
|
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
|
||||||
|
goto badframe;
|
||||||
|
|
||||||
|
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
|
||||||
|
goto badframe;
|
||||||
|
|
||||||
|
return ax;
|
||||||
|
|
||||||
|
badframe:
|
||||||
|
signal_fault(regs, frame, "rt_sigreturn");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
asmlinkage int sys_rt_sigreturn(struct pt_regs regs)
|
||||||
|
{
|
||||||
|
return do_rt_sigreturn(®s);
|
||||||
|
}
|
||||||
|
#else /* !CONFIG_X86_32 */
|
||||||
|
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
return do_rt_sigreturn(regs);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OK, we're invoking a handler:
|
* OK, we're invoking a handler:
|
||||||
*/
|
*/
|
||||||
static int signr_convert(int sig)
|
static int signr_convert(int sig)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
struct thread_info *info = current_thread_info();
|
struct thread_info *info = current_thread_info();
|
||||||
|
|
||||||
if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32)
|
if (info->exec_domain && info->exec_domain->signal_invmap && sig < 32)
|
||||||
return info->exec_domain->signal_invmap[sig];
|
return info->exec_domain->signal_invmap[sig];
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
return sig;
|
return sig;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
|
||||||
#define is_ia32 1
|
#define is_ia32 1
|
||||||
#define ia32_setup_frame __setup_frame
|
#define ia32_setup_frame __setup_frame
|
||||||
#define ia32_setup_rt_frame __setup_rt_frame
|
#define ia32_setup_rt_frame __setup_rt_frame
|
||||||
|
|
||||||
|
#else /* !CONFIG_X86_32 */
|
||||||
|
|
||||||
|
#ifdef CONFIG_IA32_EMULATION
|
||||||
|
#define is_ia32 test_thread_flag(TIF_IA32)
|
||||||
|
#else /* !CONFIG_IA32_EMULATION */
|
||||||
|
#define is_ia32 0
|
||||||
|
#endif /* CONFIG_IA32_EMULATION */
|
||||||
|
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
static int
|
static int
|
||||||
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||||
sigset_t *set, struct pt_regs *regs)
|
sigset_t *set, struct pt_regs *regs)
|
||||||
@@ -592,7 +783,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
#define NR_restart_syscall __NR_restart_syscall
|
#define NR_restart_syscall __NR_restart_syscall
|
||||||
|
#else /* !CONFIG_X86_32 */
|
||||||
|
#define NR_restart_syscall \
|
||||||
|
test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
||||||
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
@@ -1,516 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
||||||
* Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
|
|
||||||
*
|
|
||||||
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
|
|
||||||
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
|
|
||||||
* 2000-2002 x86-64 support by Andi Kleen
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/smp.h>
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/signal.h>
|
|
||||||
#include <linux/errno.h>
|
|
||||||
#include <linux/wait.h>
|
|
||||||
#include <linux/ptrace.h>
|
|
||||||
#include <linux/tracehook.h>
|
|
||||||
#include <linux/unistd.h>
|
|
||||||
#include <linux/stddef.h>
|
|
||||||
#include <linux/personality.h>
|
|
||||||
#include <linux/compiler.h>
|
|
||||||
#include <linux/uaccess.h>
|
|
||||||
|
|
||||||
#include <asm/processor.h>
|
|
||||||
#include <asm/ucontext.h>
|
|
||||||
#include <asm/i387.h>
|
|
||||||
#include <asm/proto.h>
|
|
||||||
#include <asm/ia32_unistd.h>
|
|
||||||
#include <asm/mce.h>
|
|
||||||
#include <asm/syscall.h>
|
|
||||||
#include <asm/syscalls.h>
|
|
||||||
#include "sigframe.h"
|
|
||||||
|
|
||||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
|
||||||
|
|
||||||
#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
|
|
||||||
X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
|
|
||||||
X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
|
|
||||||
X86_EFLAGS_CF)
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
|
|
||||||
#else
|
|
||||||
# define FIX_EFLAGS __FIX_EFLAGS
|
|
||||||
#endif
|
|
||||||
|
|
||||||
asmlinkage long
|
|
||||||
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
|
||||||
struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
return do_sigaltstack(uss, uoss, regs->sp);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define COPY(x) { \
|
|
||||||
err |= __get_user(regs->x, &sc->x); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define COPY_SEG_STRICT(seg) { \
|
|
||||||
unsigned short tmp; \
|
|
||||||
err |= __get_user(tmp, &sc->seg); \
|
|
||||||
regs->seg = tmp | 3; \
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Do a signal return; undo the signal stack.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|
||||||
unsigned long *pax)
|
|
||||||
{
|
|
||||||
void __user *buf;
|
|
||||||
unsigned int tmpflags;
|
|
||||||
unsigned int err = 0;
|
|
||||||
|
|
||||||
/* Always make any pending restarted system calls return -EINTR */
|
|
||||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
|
||||||
|
|
||||||
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
|
||||||
COPY(dx); COPY(cx); COPY(ip);
|
|
||||||
COPY(r8);
|
|
||||||
COPY(r9);
|
|
||||||
COPY(r10);
|
|
||||||
COPY(r11);
|
|
||||||
COPY(r12);
|
|
||||||
COPY(r13);
|
|
||||||
COPY(r14);
|
|
||||||
COPY(r15);
|
|
||||||
|
|
||||||
/* Kernel saves and restores only the CS segment register on signals,
|
|
||||||
* which is the bare minimum needed to allow mixed 32/64-bit code.
|
|
||||||
* App's signal handler can save/restore other segments if needed. */
|
|
||||||
COPY_SEG_STRICT(cs);
|
|
||||||
|
|
||||||
err |= __get_user(tmpflags, &sc->flags);
|
|
||||||
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
|
||||||
regs->orig_ax = -1; /* disable syscall checks */
|
|
||||||
|
|
||||||
err |= __get_user(buf, &sc->fpstate);
|
|
||||||
err |= restore_i387_xstate(buf);
|
|
||||||
|
|
||||||
err |= __get_user(*pax, &sc->ax);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static long do_rt_sigreturn(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct rt_sigframe __user *frame;
|
|
||||||
unsigned long ax;
|
|
||||||
sigset_t set;
|
|
||||||
|
|
||||||
frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
|
|
||||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
|
||||||
goto badframe;
|
|
||||||
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
|
||||||
goto badframe;
|
|
||||||
|
|
||||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
|
||||||
current->blocked = set;
|
|
||||||
recalc_sigpending();
|
|
||||||
spin_unlock_irq(¤t->sighand->siglock);
|
|
||||||
|
|
||||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
|
|
||||||
goto badframe;
|
|
||||||
|
|
||||||
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
|
|
||||||
goto badframe;
|
|
||||||
|
|
||||||
return ax;
|
|
||||||
|
|
||||||
badframe:
|
|
||||||
signal_fault(regs, frame, "rt_sigreturn");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
return do_rt_sigreturn(regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set up a signal frame.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
|
|
||||||
unsigned long mask, struct task_struct *me)
|
|
||||||
{
|
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
err |= __put_user(regs->cs, &sc->cs);
|
|
||||||
err |= __put_user(0, &sc->gs);
|
|
||||||
err |= __put_user(0, &sc->fs);
|
|
||||||
|
|
||||||
err |= __put_user(regs->di, &sc->di);
|
|
||||||
err |= __put_user(regs->si, &sc->si);
|
|
||||||
err |= __put_user(regs->bp, &sc->bp);
|
|
||||||
err |= __put_user(regs->sp, &sc->sp);
|
|
||||||
err |= __put_user(regs->bx, &sc->bx);
|
|
||||||
err |= __put_user(regs->dx, &sc->dx);
|
|
||||||
err |= __put_user(regs->cx, &sc->cx);
|
|
||||||
err |= __put_user(regs->ax, &sc->ax);
|
|
||||||
err |= __put_user(regs->r8, &sc->r8);
|
|
||||||
err |= __put_user(regs->r9, &sc->r9);
|
|
||||||
err |= __put_user(regs->r10, &sc->r10);
|
|
||||||
err |= __put_user(regs->r11, &sc->r11);
|
|
||||||
err |= __put_user(regs->r12, &sc->r12);
|
|
||||||
err |= __put_user(regs->r13, &sc->r13);
|
|
||||||
err |= __put_user(regs->r14, &sc->r14);
|
|
||||||
err |= __put_user(regs->r15, &sc->r15);
|
|
||||||
err |= __put_user(me->thread.trap_no, &sc->trapno);
|
|
||||||
err |= __put_user(me->thread.error_code, &sc->err);
|
|
||||||
err |= __put_user(regs->ip, &sc->ip);
|
|
||||||
err |= __put_user(regs->flags, &sc->flags);
|
|
||||||
err |= __put_user(mask, &sc->oldmask);
|
|
||||||
err |= __put_user(me->thread.cr2, &sc->cr2);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Determine which stack to use..
|
|
||||||
*/
|
|
||||||
|
|
||||||
static void __user *
|
|
||||||
get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
|
|
||||||
{
|
|
||||||
unsigned long sp;
|
|
||||||
|
|
||||||
/* Default to using normal stack - redzone*/
|
|
||||||
sp = regs->sp - 128;
|
|
||||||
|
|
||||||
/* This is the X/Open sanctioned signal stack switching. */
|
|
||||||
if (ka->sa.sa_flags & SA_ONSTACK) {
|
|
||||||
if (sas_ss_flags(sp) == 0)
|
|
||||||
sp = current->sas_ss_sp + current->sas_ss_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (void __user *)round_down(sp - size, 64);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
||||||
sigset_t *set, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct rt_sigframe __user *frame;
|
|
||||||
void __user *fp = NULL;
|
|
||||||
int err = 0;
|
|
||||||
struct task_struct *me = current;
|
|
||||||
|
|
||||||
if (used_math()) {
|
|
||||||
fp = get_stack(ka, regs, sig_xstate_size);
|
|
||||||
frame = (void __user *)round_down(
|
|
||||||
(unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
|
|
||||||
|
|
||||||
if (save_i387_xstate(fp) < 0)
|
|
||||||
return -EFAULT;
|
|
||||||
} else
|
|
||||||
frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
|
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
if (ka->sa.sa_flags & SA_SIGINFO) {
|
|
||||||
if (copy_siginfo_to_user(&frame->info, info))
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create the ucontext. */
|
|
||||||
if (cpu_has_xsave)
|
|
||||||
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
|
||||||
else
|
|
||||||
err |= __put_user(0, &frame->uc.uc_flags);
|
|
||||||
err |= __put_user(0, &frame->uc.uc_link);
|
|
||||||
err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
|
||||||
err |= __put_user(sas_ss_flags(regs->sp),
|
|
||||||
&frame->uc.uc_stack.ss_flags);
|
|
||||||
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
|
||||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
|
|
||||||
err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
|
|
||||||
if (sizeof(*set) == 16) {
|
|
||||||
__put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
|
|
||||||
__put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
|
|
||||||
} else
|
|
||||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
|
||||||
|
|
||||||
/* Set up to return from userspace. If provided, use a stub
|
|
||||||
already in userspace. */
|
|
||||||
/* x86-64 should always use SA_RESTORER. */
|
|
||||||
if (ka->sa.sa_flags & SA_RESTORER) {
|
|
||||||
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
|
|
||||||
} else {
|
|
||||||
/* could use a vstub here */
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (err)
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
/* Set up registers for signal handler */
|
|
||||||
regs->di = sig;
|
|
||||||
/* In case the signal handler was declared without prototypes */
|
|
||||||
regs->ax = 0;
|
|
||||||
|
|
||||||
/* This also works for non SA_SIGINFO handlers because they expect the
|
|
||||||
next argument after the signal number on the stack. */
|
|
||||||
regs->si = (unsigned long)&frame->info;
|
|
||||||
regs->dx = (unsigned long)&frame->uc;
|
|
||||||
regs->ip = (unsigned long) ka->sa.sa_handler;
|
|
||||||
|
|
||||||
regs->sp = (unsigned long)frame;
|
|
||||||
|
|
||||||
/* Set up the CS register to run signal handlers in 64-bit mode,
|
|
||||||
even if the handler happens to be interrupting 32-bit code. */
|
|
||||||
regs->cs = __USER_CS;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* OK, we're invoking a handler
|
|
||||||
*/
|
|
||||||
static int signr_convert(int sig)
|
|
||||||
{
|
|
||||||
return sig;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_IA32_EMULATION
|
|
||||||
#define is_ia32 test_thread_flag(TIF_IA32)
|
|
||||||
#else
|
|
||||||
#define is_ia32 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int
|
|
||||||
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
||||||
sigset_t *set, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
int usig = signr_convert(sig);
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* Set up the stack frame */
|
|
||||||
if (is_ia32) {
|
|
||||||
if (ka->sa.sa_flags & SA_SIGINFO)
|
|
||||||
ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
|
|
||||||
else
|
|
||||||
ret = ia32_setup_frame(usig, ka, set, regs);
|
|
||||||
} else
|
|
||||||
ret = __setup_rt_frame(sig, ka, info, set, regs);
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
force_sigsegv(sig, current);
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
|
||||||
sigset_t *oldset, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* Are we from a system call? */
|
|
||||||
if (syscall_get_nr(current, regs) >= 0) {
|
|
||||||
/* If so, check system call restarting.. */
|
|
||||||
switch (syscall_get_error(current, regs)) {
|
|
||||||
case -ERESTART_RESTARTBLOCK:
|
|
||||||
case -ERESTARTNOHAND:
|
|
||||||
regs->ax = -EINTR;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case -ERESTARTSYS:
|
|
||||||
if (!(ka->sa.sa_flags & SA_RESTART)) {
|
|
||||||
regs->ax = -EINTR;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
/* fallthrough */
|
|
||||||
case -ERESTARTNOINTR:
|
|
||||||
regs->ax = regs->orig_ax;
|
|
||||||
regs->ip -= 2;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
|
|
||||||
* flag so that register information in the sigcontext is correct.
|
|
||||||
*/
|
|
||||||
if (unlikely(regs->flags & X86_EFLAGS_TF) &&
|
|
||||||
likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
|
|
||||||
regs->flags &= ~X86_EFLAGS_TF;
|
|
||||||
|
|
||||||
ret = setup_rt_frame(sig, ka, info, oldset, regs);
|
|
||||||
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
/*
|
|
||||||
* This has nothing to do with segment registers,
|
|
||||||
* despite the name. This magic affects uaccess.h
|
|
||||||
* macros' behavior. Reset it to the normal setting.
|
|
||||||
*/
|
|
||||||
set_fs(USER_DS);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Clear the direction flag as per the ABI for function entry.
|
|
||||||
*/
|
|
||||||
regs->flags &= ~X86_EFLAGS_DF;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Clear TF when entering the signal handler, but
|
|
||||||
* notify any tracer that was single-stepping it.
|
|
||||||
* The tracer may want to single-step inside the
|
|
||||||
* handler too.
|
|
||||||
*/
|
|
||||||
regs->flags &= ~X86_EFLAGS_TF;
|
|
||||||
|
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
|
||||||
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
|
|
||||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
|
||||||
sigaddset(¤t->blocked, sig);
|
|
||||||
recalc_sigpending();
|
|
||||||
spin_unlock_irq(¤t->sighand->siglock);
|
|
||||||
|
|
||||||
tracehook_signal_handler(sig, info, ka, regs,
|
|
||||||
test_thread_flag(TIF_SINGLESTEP));
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define NR_restart_syscall \
|
|
||||||
test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
|
|
||||||
/*
|
|
||||||
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
|
||||||
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
|
||||||
* mistake.
|
|
||||||
*/
|
|
||||||
static void do_signal(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct k_sigaction ka;
|
|
||||||
siginfo_t info;
|
|
||||||
int signr;
|
|
||||||
sigset_t *oldset;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We want the common case to go fast, which is why we may in certain
|
|
||||||
* cases get here from kernel mode. Just return without doing anything
|
|
||||||
* if so.
|
|
||||||
* X86_32: vm86 regs switched out by assembly code before reaching
|
|
||||||
* here, so testing against kernel CS suffices.
|
|
||||||
*/
|
|
||||||
if (!user_mode(regs))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
|
|
||||||
oldset = ¤t->saved_sigmask;
|
|
||||||
else
|
|
||||||
oldset = ¤t->blocked;
|
|
||||||
|
|
||||||
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
|
||||||
if (signr > 0) {
|
|
||||||
/*
|
|
||||||
* Re-enable any watchpoints before delivering the
|
|
||||||
* signal to user space. The processor register will
|
|
||||||
* have been cleared if the watchpoint triggered
|
|
||||||
* inside the kernel.
|
|
||||||
*/
|
|
||||||
if (current->thread.debugreg7)
|
|
||||||
set_debugreg(current->thread.debugreg7, 7);
|
|
||||||
|
|
||||||
/* Whee! Actually deliver the signal. */
|
|
||||||
if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
|
|
||||||
/*
|
|
||||||
* A signal was successfully delivered; the saved
|
|
||||||
* sigmask will have been stored in the signal frame,
|
|
||||||
* and will be restored by sigreturn, so we can simply
|
|
||||||
* clear the TS_RESTORE_SIGMASK flag.
|
|
||||||
*/
|
|
||||||
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Did we come from a system call? */
|
|
||||||
if (syscall_get_nr(current, regs) >= 0) {
|
|
||||||
/* Restart the system call - no handlers present */
|
|
||||||
switch (syscall_get_error(current, regs)) {
|
|
||||||
case -ERESTARTNOHAND:
|
|
||||||
case -ERESTARTSYS:
|
|
||||||
case -ERESTARTNOINTR:
|
|
||||||
regs->ax = regs->orig_ax;
|
|
||||||
regs->ip -= 2;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case -ERESTART_RESTARTBLOCK:
|
|
||||||
regs->ax = NR_restart_syscall;
|
|
||||||
regs->ip -= 2;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there's no signal to deliver, we just put the saved sigmask
|
|
||||||
* back.
|
|
||||||
*/
|
|
||||||
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
|
|
||||||
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
|
||||||
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* notification of userspace execution resumption
|
|
||||||
* - triggered by the TIF_WORK_MASK flags
|
|
||||||
*/
|
|
||||||
void
|
|
||||||
do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
|
|
||||||
{
|
|
||||||
#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
|
|
||||||
/* notify userspace of pending MCEs */
|
|
||||||
if (thread_info_flags & _TIF_MCE_NOTIFY)
|
|
||||||
mce_notify_user();
|
|
||||||
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
|
|
||||||
|
|
||||||
/* deal with pending signal delivery */
|
|
||||||
if (thread_info_flags & _TIF_SIGPENDING)
|
|
||||||
do_signal(regs);
|
|
||||||
|
|
||||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
|
||||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
|
||||||
tracehook_notify_resume(regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
clear_thread_flag(TIF_IRET);
|
|
||||||
#endif /* CONFIG_X86_32 */
|
|
||||||
}
|
|
||||||
|
|
||||||
void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
|
|
||||||
{
|
|
||||||
struct task_struct *me = current;
|
|
||||||
|
|
||||||
if (show_unhandled_signals && printk_ratelimit()) {
|
|
||||||
printk(KERN_INFO
|
|
||||||
"%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
|
|
||||||
me->comm, me->pid, where, frame,
|
|
||||||
regs->ip, regs->sp, regs->orig_ax);
|
|
||||||
print_vma_addr(" in ", regs->ip);
|
|
||||||
printk(KERN_CONT "\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
force_sig(SIGSEGV, me);
|
|
||||||
}
|
|
@@ -80,6 +80,8 @@ unsigned long __init calibrate_cpu(void)
|
|||||||
break;
|
break;
|
||||||
no_ctr_free = (i == 4);
|
no_ctr_free = (i == 4);
|
||||||
if (no_ctr_free) {
|
if (no_ctr_free) {
|
||||||
|
WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
|
||||||
|
"cpu_khz value may be incorrect.\n");
|
||||||
i = 3;
|
i = 3;
|
||||||
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
|
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
|
||||||
wrmsrl(MSR_K7_EVNTSEL3, 0);
|
wrmsrl(MSR_K7_EVNTSEL3, 0);
|
||||||
|
@@ -128,7 +128,16 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
|
|||||||
gettimeofday(tv,NULL);
|
gettimeofday(tv,NULL);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Surround the RDTSC by barriers, to make sure it's not
|
||||||
|
* speculated to outside the seqlock critical section and
|
||||||
|
* does not cause time warps:
|
||||||
|
*/
|
||||||
|
rdtsc_barrier();
|
||||||
now = vread();
|
now = vread();
|
||||||
|
rdtsc_barrier();
|
||||||
|
|
||||||
base = __vsyscall_gtod_data.clock.cycle_last;
|
base = __vsyscall_gtod_data.clock.cycle_last;
|
||||||
mask = __vsyscall_gtod_data.clock.mask;
|
mask = __vsyscall_gtod_data.clock.mask;
|
||||||
mult = __vsyscall_gtod_data.clock.mult;
|
mult = __vsyscall_gtod_data.clock.mult;
|
||||||
|
@@ -590,7 +590,8 @@ static void __init lguest_init_IRQ(void)
|
|||||||
* a straightforward 1 to 1 mapping, so force that here. */
|
* a straightforward 1 to 1 mapping, so force that here. */
|
||||||
__get_cpu_var(vector_irq)[vector] = i;
|
__get_cpu_var(vector_irq)[vector] = i;
|
||||||
if (vector != SYSCALL_VECTOR) {
|
if (vector != SYSCALL_VECTOR) {
|
||||||
set_intr_gate(vector, interrupt[vector]);
|
set_intr_gate(vector,
|
||||||
|
interrupt[vector-FIRST_EXTERNAL_VECTOR]);
|
||||||
set_irq_chip_and_handler_name(i, &lguest_irq_controller,
|
set_irq_chip_and_handler_name(i, &lguest_irq_controller,
|
||||||
handle_level_irq,
|
handle_level_irq,
|
||||||
"level");
|
"level");
|
||||||
|
@@ -102,6 +102,8 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
|
|||||||
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
|
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
|
||||||
pud = pud_offset(pgd, 0);
|
pud = pud_offset(pgd, 0);
|
||||||
BUG_ON(pmd_table != pmd_offset(pud, 0));
|
BUG_ON(pmd_table != pmd_offset(pud, 0));
|
||||||
|
|
||||||
|
return pmd_table;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
pud = pud_offset(pgd, 0);
|
pud = pud_offset(pgd, 0);
|
||||||
|
@@ -64,14 +64,6 @@
|
|||||||
name:
|
name:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define KPROBE_ENTRY(name) \
|
|
||||||
.pushsection .kprobes.text, "ax"; \
|
|
||||||
ENTRY(name)
|
|
||||||
|
|
||||||
#define KPROBE_END(name) \
|
|
||||||
END(name); \
|
|
||||||
.popsection
|
|
||||||
|
|
||||||
#ifndef END
|
#ifndef END
|
||||||
#define END(name) \
|
#define END(name) \
|
||||||
.size name, .-name
|
.size name, .-name
|
||||||
|
Reference in New Issue
Block a user