x86: implement x86_32 stack protector
Impact: stack protector for x86_32 Implement stack protector for x86_32. GDT entry 28 is used for it. It's set to point to stack_canary-20 and have the length of 24 bytes. CONFIG_CC_STACKPROTECTOR turns off CONFIG_X86_32_LAZY_GS and sets %gs to the stack canary segment on entry. As %gs is otherwise unused by the kernel, the canary can be anywhere. It's defined as a percpu variable. x86_32 exception handlers take register frame on stack directly as struct pt_regs. With -fstack-protector turned on, gcc copies the whole structure after the stack canary and (of course) doesn't copy back on return thus losing all changed. For now, -fno-stack-protector is added to all files which contain those functions. We definitely need something better. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -39,6 +39,7 @@
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/stackprotector.h>
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
@@ -122,6 +123,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
||||
|
||||
[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
|
||||
[GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
|
||||
GDT_STACK_CANARY_INIT
|
||||
#endif
|
||||
} };
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
||||
@@ -261,6 +263,7 @@ void load_percpu_segment(int cpu)
|
||||
loadsegment(gs, 0);
|
||||
wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
|
||||
#endif
|
||||
load_stack_canary_segment();
|
||||
}
|
||||
|
||||
/* Current gdt points %fs at the "master" per-cpu area: after this,
|
||||
@@ -946,16 +949,21 @@ unsigned long kernel_eflags;
|
||||
*/
|
||||
DEFINE_PER_CPU(struct orig_ist, orig_ist);
|
||||
|
||||
#else
|
||||
#else /* x86_64 */
|
||||
|
||||
/* Make sure %fs is initialized properly in idle threads */
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
DEFINE_PER_CPU(unsigned long, stack_canary);
|
||||
#endif
|
||||
|
||||
/* Make sure %fs and %gs are initialized properly in idle threads */
|
||||
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
|
||||
{
|
||||
memset(regs, 0, sizeof(struct pt_regs));
|
||||
regs->fs = __KERNEL_PERCPU;
|
||||
regs->gs = __KERNEL_STACK_CANARY;
|
||||
return regs;
|
||||
}
|
||||
#endif
|
||||
#endif /* x86_64 */
|
||||
|
||||
/*
|
||||
* cpu_init() initializes state that is per-CPU. Some data is already
|
||||
@@ -1120,9 +1128,6 @@ void __cpuinit cpu_init(void)
|
||||
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
|
||||
#endif
|
||||
|
||||
/* Clear %gs. */
|
||||
asm volatile ("mov %0, %%gs" : : "r" (0));
|
||||
|
||||
/* Clear all 6 debug registers: */
|
||||
set_debugreg(0, 0);
|
||||
set_debugreg(0, 1);
|
||||
|
Reference in New Issue
Block a user