Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "Most of the changes relate to Peter Zijlstra's cleanup of ptregs handling, in particular the i386 part is now much simplified and standardized - no more partial ptregs stack frames via the esp/ss oddity. This simplifies ftrace, kprobes, the unwinder, ptrace, kdump and kgdb. There's also a CR4 hardening enhancements by Kees Cook, to make the generic platform functions such as native_write_cr4() less useful as ROP gadgets that disable SMEP/SMAP. Also protect the WP bit of CR0 against similar attacks. The rest is smaller cleanups/fixes" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/alternatives: Add int3_emulate_call() selftest x86/stackframe/32: Allow int3_emulate_push() x86/stackframe/32: Provide consistent pt_regs x86/stackframe, x86/ftrace: Add pt_regs frame annotations x86/stackframe, x86/kprobes: Fix frame pointer annotations x86/stackframe: Move ENCODE_FRAME_POINTER to asm/frame.h x86/entry/32: Clean up return from interrupt preemption path x86/asm: Pin sensitive CR0 bits x86/asm: Pin sensitive CR4 bits Documentation/x86: Fix path to entry_32.S x86/asm: Remove unused TASK_TI_flags from asm-offsets.c
This commit is contained in:
@@ -67,7 +67,6 @@
|
||||
# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
|
||||
#else
|
||||
# define preempt_stop(clobbers)
|
||||
# define resume_kernel restore_all_kernel
|
||||
#endif
|
||||
|
||||
.macro TRACE_IRQS_IRET
|
||||
@@ -203,9 +202,102 @@
|
||||
.Lend_\@:
|
||||
.endm
|
||||
|
||||
#define CS_FROM_ENTRY_STACK (1 << 31)
|
||||
#define CS_FROM_USER_CR3 (1 << 30)
|
||||
#define CS_FROM_KERNEL (1 << 29)
|
||||
|
||||
.macro FIXUP_FRAME
|
||||
/*
|
||||
* The high bits of the CS dword (__csh) are used for CS_FROM_*.
|
||||
* Clear them in case hardware didn't do this for us.
|
||||
*/
|
||||
andl $0x0000ffff, 3*4(%esp)
|
||||
|
||||
#ifdef CONFIG_VM86
|
||||
testl $X86_EFLAGS_VM, 4*4(%esp)
|
||||
jnz .Lfrom_usermode_no_fixup_\@
|
||||
#endif
|
||||
testl $SEGMENT_RPL_MASK, 3*4(%esp)
|
||||
jnz .Lfrom_usermode_no_fixup_\@
|
||||
|
||||
orl $CS_FROM_KERNEL, 3*4(%esp)
|
||||
|
||||
/*
|
||||
* When we're here from kernel mode; the (exception) stack looks like:
|
||||
*
|
||||
* 5*4(%esp) - <previous context>
|
||||
* 4*4(%esp) - flags
|
||||
* 3*4(%esp) - cs
|
||||
* 2*4(%esp) - ip
|
||||
* 1*4(%esp) - orig_eax
|
||||
* 0*4(%esp) - gs / function
|
||||
*
|
||||
* Lets build a 5 entry IRET frame after that, such that struct pt_regs
|
||||
* is complete and in particular regs->sp is correct. This gives us
|
||||
* the original 5 enties as gap:
|
||||
*
|
||||
* 12*4(%esp) - <previous context>
|
||||
* 11*4(%esp) - gap / flags
|
||||
* 10*4(%esp) - gap / cs
|
||||
* 9*4(%esp) - gap / ip
|
||||
* 8*4(%esp) - gap / orig_eax
|
||||
* 7*4(%esp) - gap / gs / function
|
||||
* 6*4(%esp) - ss
|
||||
* 5*4(%esp) - sp
|
||||
* 4*4(%esp) - flags
|
||||
* 3*4(%esp) - cs
|
||||
* 2*4(%esp) - ip
|
||||
* 1*4(%esp) - orig_eax
|
||||
* 0*4(%esp) - gs / function
|
||||
*/
|
||||
|
||||
pushl %ss # ss
|
||||
pushl %esp # sp (points at ss)
|
||||
addl $6*4, (%esp) # point sp back at the previous context
|
||||
pushl 6*4(%esp) # flags
|
||||
pushl 6*4(%esp) # cs
|
||||
pushl 6*4(%esp) # ip
|
||||
pushl 6*4(%esp) # orig_eax
|
||||
pushl 6*4(%esp) # gs / function
|
||||
.Lfrom_usermode_no_fixup_\@:
|
||||
.endm
|
||||
|
||||
.macro IRET_FRAME
|
||||
testl $CS_FROM_KERNEL, 1*4(%esp)
|
||||
jz .Lfinished_frame_\@
|
||||
|
||||
/*
|
||||
* Reconstruct the 3 entry IRET frame right after the (modified)
|
||||
* regs->sp without lowering %esp in between, such that an NMI in the
|
||||
* middle doesn't scribble our stack.
|
||||
*/
|
||||
pushl %eax
|
||||
pushl %ecx
|
||||
movl 5*4(%esp), %eax # (modified) regs->sp
|
||||
|
||||
movl 4*4(%esp), %ecx # flags
|
||||
movl %ecx, -4(%eax)
|
||||
|
||||
movl 3*4(%esp), %ecx # cs
|
||||
andl $0x0000ffff, %ecx
|
||||
movl %ecx, -8(%eax)
|
||||
|
||||
movl 2*4(%esp), %ecx # ip
|
||||
movl %ecx, -12(%eax)
|
||||
|
||||
movl 1*4(%esp), %ecx # eax
|
||||
movl %ecx, -16(%eax)
|
||||
|
||||
popl %ecx
|
||||
lea -16(%eax), %esp
|
||||
popl %eax
|
||||
.Lfinished_frame_\@:
|
||||
.endm
|
||||
|
||||
.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
|
||||
cld
|
||||
PUSH_GS
|
||||
FIXUP_FRAME
|
||||
pushl %fs
|
||||
pushl %es
|
||||
pushl %ds
|
||||
@@ -247,22 +339,6 @@
|
||||
.Lend_\@:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* This is a sneaky trick to help the unwinder find pt_regs on the stack. The
|
||||
* frame pointer is replaced with an encoded pointer to pt_regs. The encoding
|
||||
* is just clearing the MSB, which makes it an invalid stack address and is also
|
||||
* a signal to the unwinder that it's a pt_regs pointer in disguise.
|
||||
*
|
||||
* NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
|
||||
* original rbp.
|
||||
*/
|
||||
.macro ENCODE_FRAME_POINTER
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
mov %esp, %ebp
|
||||
andl $0x7fffffff, %ebp
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro RESTORE_INT_REGS
|
||||
popl %ebx
|
||||
popl %ecx
|
||||
@@ -375,9 +451,6 @@
|
||||
* switch to it before we do any copying.
|
||||
*/
|
||||
|
||||
#define CS_FROM_ENTRY_STACK (1 << 31)
|
||||
#define CS_FROM_USER_CR3 (1 << 30)
|
||||
|
||||
.macro SWITCH_TO_KERNEL_STACK
|
||||
|
||||
ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
|
||||
@@ -391,13 +464,6 @@
|
||||
* that register for the time this macro runs
|
||||
*/
|
||||
|
||||
/*
|
||||
* The high bits of the CS dword (__csh) are used for
|
||||
* CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case
|
||||
* hardware didn't do this for us.
|
||||
*/
|
||||
andl $(0x0000ffff), PT_CS(%esp)
|
||||
|
||||
/* Are we on the entry stack? Bail out if not! */
|
||||
movl PER_CPU_VAR(cpu_entry_area), %ecx
|
||||
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
|
||||
@@ -755,7 +821,7 @@ ret_from_intr:
|
||||
andl $SEGMENT_RPL_MASK, %eax
|
||||
#endif
|
||||
cmpl $USER_RPL, %eax
|
||||
jb resume_kernel # not returning to v8086 or userspace
|
||||
jb restore_all_kernel # not returning to v8086 or userspace
|
||||
|
||||
ENTRY(resume_userspace)
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
@@ -765,18 +831,6 @@ ENTRY(resume_userspace)
|
||||
jmp restore_all
|
||||
END(ret_from_exception)
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
ENTRY(resume_kernel)
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
cmpl $0, PER_CPU_VAR(__preempt_count)
|
||||
jnz restore_all_kernel
|
||||
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
|
||||
jz restore_all_kernel
|
||||
call preempt_schedule_irq
|
||||
jmp restore_all_kernel
|
||||
END(resume_kernel)
|
||||
#endif
|
||||
|
||||
GLOBAL(__begin_SYSENTER_singlestep_region)
|
||||
/*
|
||||
* All code from here through __end_SYSENTER_singlestep_region is subject
|
||||
@@ -1019,6 +1073,7 @@ restore_all:
|
||||
/* Restore user state */
|
||||
RESTORE_REGS pop=4 # skip orig_eax/error_code
|
||||
.Lirq_return:
|
||||
IRET_FRAME
|
||||
/*
|
||||
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
|
||||
* when returning from IPI handler and when returning from
|
||||
@@ -1027,6 +1082,15 @@ restore_all:
|
||||
INTERRUPT_RETURN
|
||||
|
||||
restore_all_kernel:
|
||||
#ifdef CONFIG_PREEMPT
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
cmpl $0, PER_CPU_VAR(__preempt_count)
|
||||
jnz .Lno_preempt
|
||||
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
|
||||
jz .Lno_preempt
|
||||
call preempt_schedule_irq
|
||||
.Lno_preempt:
|
||||
#endif
|
||||
TRACE_IRQS_IRET
|
||||
PARANOID_EXIT_TO_KERNEL_MODE
|
||||
BUG_IF_WRONG_CR3
|
||||
@@ -1384,6 +1448,7 @@ END(page_fault)
|
||||
|
||||
common_exception:
|
||||
/* the function address is in %gs's slot on the stack */
|
||||
FIXUP_FRAME
|
||||
pushl %fs
|
||||
pushl %es
|
||||
pushl %ds
|
||||
|
Reference in New Issue
Block a user