Merge branch 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/smap support from Ingo Molnar: "This adds support for the SMAP (Supervisor Mode Access Prevention) CPU feature on Intel CPUs: a hardware feature that prevents unintended user-space data access from kernel privileged code. It's turned on automatically when possible. This, in combination with SMEP, makes it even harder to exploit kernel bugs such as NULL pointer dereferences." Fix up trivial conflict in arch/x86/kernel/entry_64.S due to newly added includes right next to each other. * 'x86-smap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, smep, smap: Make the switching functions one-way x86, suspend: On wakeup always initialize cr4 and EFER x86-32: Start out eflags and cr4 clean x86, smap: Do not abuse the [f][x]rstor_checking() functions for user space x86-32, smap: Add STAC/CLAC instructions to 32-bit kernel entry x86, smap: Reduce the SMAP overhead for signal handling x86, smap: A page fault due to SMAP is an oops x86, smap: Turn on Supervisor Mode Access Prevention x86, smap: Add STAC and CLAC instructions to control user space access x86, uaccess: Merge prototypes for clear_user/__clear_user x86, smap: Add a header file with macros for STAC/CLAC x86, alternative: Add header guards to <asm/alternative-asm.h> x86, alternative: Use .pushsection/.popsection x86, smap: Add CR4 bit for SMAP x86-32, mm: The WP test should be done on a kernel page
This commit is contained in:
@@ -43,17 +43,22 @@ int acpi_suspend_lowlevel(void)
|
||||
|
||||
header->video_mode = saved_video_mode;
|
||||
|
||||
header->pmode_behavior = 0;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
store_gdt((struct desc_ptr *)&header->pmode_gdt);
|
||||
|
||||
if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low,
|
||||
&header->pmode_efer_high))
|
||||
header->pmode_efer_low = header->pmode_efer_high = 0;
|
||||
if (!rdmsr_safe(MSR_EFER,
|
||||
&header->pmode_efer_low,
|
||||
&header->pmode_efer_high))
|
||||
header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
|
||||
#endif /* !CONFIG_64BIT */
|
||||
|
||||
header->pmode_cr0 = read_cr0();
|
||||
header->pmode_cr4 = read_cr4_safe();
|
||||
header->pmode_behavior = 0;
|
||||
if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
|
||||
header->pmode_cr4 = read_cr4();
|
||||
header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
|
||||
}
|
||||
if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
|
||||
&header->pmode_misc_en_low,
|
||||
&header->pmode_misc_en_high))
|
||||
|
@@ -259,23 +259,36 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int disable_smep __cpuinitdata;
|
||||
static __init int setup_disable_smep(char *arg)
|
||||
{
|
||||
disable_smep = 1;
|
||||
setup_clear_cpu_cap(X86_FEATURE_SMEP);
|
||||
return 1;
|
||||
}
|
||||
__setup("nosmep", setup_disable_smep);
|
||||
|
||||
static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
|
||||
static __always_inline void setup_smep(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (cpu_has(c, X86_FEATURE_SMEP)) {
|
||||
if (unlikely(disable_smep)) {
|
||||
setup_clear_cpu_cap(X86_FEATURE_SMEP);
|
||||
clear_in_cr4(X86_CR4_SMEP);
|
||||
} else
|
||||
set_in_cr4(X86_CR4_SMEP);
|
||||
}
|
||||
if (cpu_has(c, X86_FEATURE_SMEP))
|
||||
set_in_cr4(X86_CR4_SMEP);
|
||||
}
|
||||
|
||||
static __init int setup_disable_smap(char *arg)
|
||||
{
|
||||
setup_clear_cpu_cap(X86_FEATURE_SMAP);
|
||||
return 1;
|
||||
}
|
||||
__setup("nosmap", setup_disable_smap);
|
||||
|
||||
static __always_inline void setup_smap(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned long eflags;
|
||||
|
||||
/* This should have been cleared long ago */
|
||||
raw_local_save_flags(eflags);
|
||||
BUG_ON(eflags & X86_EFLAGS_AC);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_SMAP))
|
||||
set_in_cr4(X86_CR4_SMAP);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -712,8 +725,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
c->cpu_index = 0;
|
||||
filter_cpuid_features(c, false);
|
||||
|
||||
setup_smep(c);
|
||||
|
||||
if (this_cpu->c_bsp_init)
|
||||
this_cpu->c_bsp_init(c);
|
||||
}
|
||||
@@ -798,8 +809,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
|
||||
c->phys_proc_id = c->initial_apicid;
|
||||
}
|
||||
|
||||
setup_smep(c);
|
||||
|
||||
get_model_name(c); /* Default name */
|
||||
|
||||
detect_nopl(c);
|
||||
@@ -864,6 +873,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
/* Disable the PN if appropriate */
|
||||
squash_the_stupid_serial_number(c);
|
||||
|
||||
/* Set up SMEP/SMAP */
|
||||
setup_smep(c);
|
||||
setup_smap(c);
|
||||
|
||||
/*
|
||||
* The vendor-specific functions might have changed features.
|
||||
* Now we do "generic changes."
|
||||
@@ -1114,7 +1127,8 @@ void syscall_init(void)
|
||||
|
||||
/* Flags to clear on syscall */
|
||||
wrmsrl(MSR_SYSCALL_MASK,
|
||||
X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
|
||||
X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
|
||||
X86_EFLAGS_IOPL|X86_EFLAGS_AC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -57,6 +57,7 @@
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/smap.h>
|
||||
|
||||
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
||||
#include <linux/elf-em.h>
|
||||
@@ -407,7 +408,9 @@ sysenter_past_esp:
|
||||
*/
|
||||
cmpl $__PAGE_OFFSET-3,%ebp
|
||||
jae syscall_fault
|
||||
ASM_STAC
|
||||
1: movl (%ebp),%ebp
|
||||
ASM_CLAC
|
||||
movl %ebp,PT_EBP(%esp)
|
||||
_ASM_EXTABLE(1b,syscall_fault)
|
||||
|
||||
@@ -488,6 +491,7 @@ ENDPROC(ia32_sysenter_target)
|
||||
# system call handler stub
|
||||
ENTRY(system_call)
|
||||
RING0_INT_FRAME # can't unwind into user space anyway
|
||||
ASM_CLAC
|
||||
pushl_cfi %eax # save orig_eax
|
||||
SAVE_ALL
|
||||
GET_THREAD_INFO(%ebp)
|
||||
@@ -670,6 +674,7 @@ END(syscall_exit_work)
|
||||
|
||||
RING0_INT_FRAME # can't unwind into user space anyway
|
||||
syscall_fault:
|
||||
ASM_CLAC
|
||||
GET_THREAD_INFO(%ebp)
|
||||
movl $-EFAULT,PT_EAX(%esp)
|
||||
jmp resume_userspace
|
||||
@@ -825,6 +830,7 @@ END(interrupt)
|
||||
*/
|
||||
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||
common_interrupt:
|
||||
ASM_CLAC
|
||||
addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
|
||||
SAVE_ALL
|
||||
TRACE_IRQS_OFF
|
||||
@@ -841,6 +847,7 @@ ENDPROC(common_interrupt)
|
||||
#define BUILD_INTERRUPT3(name, nr, fn) \
|
||||
ENTRY(name) \
|
||||
RING0_INT_FRAME; \
|
||||
ASM_CLAC; \
|
||||
pushl_cfi $~(nr); \
|
||||
SAVE_ALL; \
|
||||
TRACE_IRQS_OFF \
|
||||
@@ -857,6 +864,7 @@ ENDPROC(name)
|
||||
|
||||
ENTRY(coprocessor_error)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $0
|
||||
pushl_cfi $do_coprocessor_error
|
||||
jmp error_code
|
||||
@@ -865,6 +873,7 @@ END(coprocessor_error)
|
||||
|
||||
ENTRY(simd_coprocessor_error)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $0
|
||||
#ifdef CONFIG_X86_INVD_BUG
|
||||
/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
|
||||
@@ -886,6 +895,7 @@ END(simd_coprocessor_error)
|
||||
|
||||
ENTRY(device_not_available)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $-1 # mark this as an int
|
||||
pushl_cfi $do_device_not_available
|
||||
jmp error_code
|
||||
@@ -906,6 +916,7 @@ END(native_irq_enable_sysexit)
|
||||
|
||||
ENTRY(overflow)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $0
|
||||
pushl_cfi $do_overflow
|
||||
jmp error_code
|
||||
@@ -914,6 +925,7 @@ END(overflow)
|
||||
|
||||
ENTRY(bounds)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $0
|
||||
pushl_cfi $do_bounds
|
||||
jmp error_code
|
||||
@@ -922,6 +934,7 @@ END(bounds)
|
||||
|
||||
ENTRY(invalid_op)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $0
|
||||
pushl_cfi $do_invalid_op
|
||||
jmp error_code
|
||||
@@ -930,6 +943,7 @@ END(invalid_op)
|
||||
|
||||
ENTRY(coprocessor_segment_overrun)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $0
|
||||
pushl_cfi $do_coprocessor_segment_overrun
|
||||
jmp error_code
|
||||
@@ -938,6 +952,7 @@ END(coprocessor_segment_overrun)
|
||||
|
||||
ENTRY(invalid_TSS)
|
||||
RING0_EC_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $do_invalid_TSS
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
@@ -945,6 +960,7 @@ END(invalid_TSS)
|
||||
|
||||
ENTRY(segment_not_present)
|
||||
RING0_EC_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $do_segment_not_present
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
@@ -952,6 +968,7 @@ END(segment_not_present)
|
||||
|
||||
ENTRY(stack_segment)
|
||||
RING0_EC_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $do_stack_segment
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
@@ -959,6 +976,7 @@ END(stack_segment)
|
||||
|
||||
ENTRY(alignment_check)
|
||||
RING0_EC_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $do_alignment_check
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
@@ -966,6 +984,7 @@ END(alignment_check)
|
||||
|
||||
ENTRY(divide_error)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $0 # no error code
|
||||
pushl_cfi $do_divide_error
|
||||
jmp error_code
|
||||
@@ -975,6 +994,7 @@ END(divide_error)
|
||||
#ifdef CONFIG_X86_MCE
|
||||
ENTRY(machine_check)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $0
|
||||
pushl_cfi machine_check_vector
|
||||
jmp error_code
|
||||
@@ -984,6 +1004,7 @@ END(machine_check)
|
||||
|
||||
ENTRY(spurious_interrupt_bug)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $0
|
||||
pushl_cfi $do_spurious_interrupt_bug
|
||||
jmp error_code
|
||||
@@ -1273,6 +1294,7 @@ return_to_handler:
|
||||
|
||||
ENTRY(page_fault)
|
||||
RING0_EC_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $do_page_fault
|
||||
ALIGN
|
||||
error_code:
|
||||
@@ -1345,6 +1367,7 @@ END(page_fault)
|
||||
|
||||
ENTRY(debug)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
cmpl $ia32_sysenter_target,(%esp)
|
||||
jne debug_stack_correct
|
||||
FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
|
||||
@@ -1369,6 +1392,7 @@ END(debug)
|
||||
*/
|
||||
ENTRY(nmi)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi %eax
|
||||
movl %ss, %eax
|
||||
cmpw $__ESPFIX_SS, %ax
|
||||
@@ -1439,6 +1463,7 @@ END(nmi)
|
||||
|
||||
ENTRY(int3)
|
||||
RING0_INT_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $-1 # mark this as an int
|
||||
SAVE_ALL
|
||||
TRACE_IRQS_OFF
|
||||
@@ -1459,6 +1484,7 @@ END(general_protection)
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
ENTRY(async_page_fault)
|
||||
RING0_EC_FRAME
|
||||
ASM_CLAC
|
||||
pushl_cfi $do_async_page_fault
|
||||
jmp error_code
|
||||
CFI_ENDPROC
|
||||
|
@@ -57,6 +57,7 @@
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/rcu.h>
|
||||
#include <asm/smap.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
||||
@@ -568,7 +569,8 @@ END(ret_from_fork)
|
||||
* System call entry. Up to 6 arguments in registers are supported.
|
||||
*
|
||||
* SYSCALL does not save anything on the stack and does not change the
|
||||
* stack pointer.
|
||||
* stack pointer. However, it does mask the flags register for us, so
|
||||
* CLD and CLAC are not needed.
|
||||
*/
|
||||
|
||||
/*
|
||||
@@ -987,6 +989,7 @@ END(interrupt)
|
||||
*/
|
||||
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||
common_interrupt:
|
||||
ASM_CLAC
|
||||
XCPT_FRAME
|
||||
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
|
||||
interrupt do_IRQ
|
||||
@@ -1126,6 +1129,7 @@ END(common_interrupt)
|
||||
*/
|
||||
.macro apicinterrupt num sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
INTR_FRAME
|
||||
pushq_cfi $~(\num)
|
||||
.Lcommon_\sym:
|
||||
@@ -1180,6 +1184,7 @@ apicinterrupt IRQ_WORK_VECTOR \
|
||||
*/
|
||||
.macro zeroentry sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
INTR_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
|
||||
@@ -1197,6 +1202,7 @@ END(\sym)
|
||||
|
||||
.macro paranoidzeroentry sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
INTR_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
|
||||
@@ -1215,6 +1221,7 @@ END(\sym)
|
||||
#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
|
||||
.macro paranoidzeroentry_ist sym do_sym ist
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
INTR_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
|
||||
@@ -1234,6 +1241,7 @@ END(\sym)
|
||||
|
||||
.macro errorentry sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
XCPT_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
subq $ORIG_RAX-R15, %rsp
|
||||
@@ -1252,6 +1260,7 @@ END(\sym)
|
||||
/* error code is on the stack already */
|
||||
.macro paranoiderrorentry sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
XCPT_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
subq $ORIG_RAX-R15, %rsp
|
||||
|
@@ -287,27 +287,28 @@ ENTRY(startup_32_smp)
|
||||
leal -__PAGE_OFFSET(%ecx),%esp
|
||||
|
||||
default_entry:
|
||||
|
||||
/*
|
||||
* New page tables may be in 4Mbyte page mode and may
|
||||
* be using the global pages.
|
||||
*
|
||||
* NOTE! If we are on a 486 we may have no cr4 at all!
|
||||
* So we do not try to touch it unless we really have
|
||||
* some bits in it to set. This won't work if the BSP
|
||||
* implements cr4 but this AP does not -- very unlikely
|
||||
* but be warned! The same applies to the pse feature
|
||||
* if not equally supported. --macro
|
||||
*
|
||||
* NOTE! We have to correct for the fact that we're
|
||||
* not yet offset PAGE_OFFSET..
|
||||
* Specifically, cr4 exists if and only if CPUID exists,
|
||||
* which in turn exists if and only if EFLAGS.ID exists.
|
||||
*/
|
||||
#define cr4_bits pa(mmu_cr4_features)
|
||||
movl cr4_bits,%edx
|
||||
andl %edx,%edx
|
||||
jz 6f
|
||||
movl %cr4,%eax # Turn on paging options (PSE,PAE,..)
|
||||
orl %edx,%eax
|
||||
movl $X86_EFLAGS_ID,%ecx
|
||||
pushl %ecx
|
||||
popfl
|
||||
pushfl
|
||||
popl %eax
|
||||
pushl $0
|
||||
popfl
|
||||
pushfl
|
||||
popl %edx
|
||||
xorl %edx,%eax
|
||||
testl %ecx,%eax
|
||||
jz 6f # No ID flag = no CPUID = no CR4
|
||||
|
||||
movl pa(mmu_cr4_features),%eax
|
||||
movl %eax,%cr4
|
||||
|
||||
testb $X86_CR4_PAE, %al # check if PAE is enabled
|
||||
|
@@ -114,11 +114,12 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
||||
regs->orig_ax = -1; /* disable syscall checks */
|
||||
|
||||
get_user_ex(buf, &sc->fpstate);
|
||||
err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
|
||||
|
||||
get_user_ex(*pax, &sc->ax);
|
||||
} get_user_catch(err);
|
||||
|
||||
err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -355,7 +356,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
put_user_ex(sig, &frame->sig);
|
||||
put_user_ex(&frame->info, &frame->pinfo);
|
||||
put_user_ex(&frame->uc, &frame->puc);
|
||||
err |= copy_siginfo_to_user(&frame->info, info);
|
||||
|
||||
/* Create the ucontext. */
|
||||
if (cpu_has_xsave)
|
||||
@@ -367,9 +367,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
put_user_ex(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
/* Set up to return from userspace. */
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
|
||||
@@ -386,6 +383,11 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
*/
|
||||
put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
err |= copy_siginfo_to_user(&frame->info, info);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
@@ -434,8 +436,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
put_user_ex(sas_ss_flags(regs->sp),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
/* Set up to return from userspace. If provided, use a stub
|
||||
already in userspace. */
|
||||
@@ -448,6 +448,9 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
}
|
||||
} put_user_catch(err);
|
||||
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
@@ -504,9 +507,6 @@ static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
put_user_ex(0, &frame->uc.uc__pad0);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
if (ka->sa.sa_flags & SA_RESTORER) {
|
||||
restorer = ka->sa.sa_restorer;
|
||||
@@ -518,6 +518,10 @@ static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
|
||||
put_user_ex(restorer, &frame->pretcode);
|
||||
} put_user_catch(err);
|
||||
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
|
||||
regs, set->sig[0]);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
|
@@ -315,7 +315,7 @@ static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
|
||||
if ((unsigned long)buf % 64 || fx_only) {
|
||||
u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
|
||||
xrstor_state(init_xstate_buf, init_bv);
|
||||
return fxrstor_checking((__force void *) buf);
|
||||
return fxrstor_user(buf);
|
||||
} else {
|
||||
u64 init_bv = pcntxt_mask & ~xbv;
|
||||
if (unlikely(init_bv))
|
||||
@@ -323,9 +323,9 @@ static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
|
||||
return xrestore_user(buf, xbv);
|
||||
}
|
||||
} else if (use_fxsr()) {
|
||||
return fxrstor_checking((__force void *) buf);
|
||||
return fxrstor_user(buf);
|
||||
} else
|
||||
return frstor_checking((__force void *) buf);
|
||||
return frstor_user(buf);
|
||||
}
|
||||
|
||||
int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
|
Reference in New Issue
Block a user