x86/entry/64: Make cpu_entry_area.tss read-only
The TSS is a fairly juicy target for exploits, and, now that the TSS is in the cpu_entry_area, it's no longer protected by kASLR. Make it read-only on x86_64. On x86_32, it can't be RO because it's written by the CPU during task switches, and we use a task gate for double faults. I'd also be nervous about errata if we tried to make it RO even on configurations without double fault handling. [ tglx: AMD confirmed that there is no problem on 64-bit with TSS RO. So it's probably safe to assume that it's a non issue, though Intel might have been creative in that area. Still waiting for confirmation. ] Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bpetkov@suse.de> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kees Cook <keescook@chromium.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Link: https://lkml.kernel.org/r/20171204150606.733700132@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
0f9a48100f
commit
c482feefe1
@@ -340,13 +340,11 @@ struct SYSENTER_stack {
|
||||
unsigned long words[64];
|
||||
};
|
||||
|
||||
struct tss_struct {
|
||||
/*
|
||||
* Space for the temporary SYSENTER stack, used for SYSENTER
|
||||
* and the entry trampoline as well.
|
||||
*/
|
||||
struct SYSENTER_stack SYSENTER_stack;
|
||||
struct SYSENTER_stack_page {
|
||||
struct SYSENTER_stack stack;
|
||||
} __aligned(PAGE_SIZE);
|
||||
|
||||
struct tss_struct {
|
||||
/*
|
||||
* The fixed hardware portion. This must not cross a page boundary
|
||||
* at risk of violating the SDM's advice and potentially triggering
|
||||
@@ -363,7 +361,7 @@ struct tss_struct {
|
||||
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
|
||||
} __aligned(PAGE_SIZE);
|
||||
|
||||
DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
|
||||
DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
|
||||
|
||||
/*
|
||||
* sizeof(unsigned long) coming from an extra "long" at the end
|
||||
@@ -378,7 +376,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
|
||||
#ifdef CONFIG_X86_32
|
||||
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
|
||||
#else
|
||||
#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1
|
||||
/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
|
||||
#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -538,7 +537,7 @@ static inline void native_set_iopl_mask(unsigned mask)
|
||||
static inline void
|
||||
native_load_sp0(unsigned long sp0)
|
||||
{
|
||||
this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
|
||||
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
|
||||
}
|
||||
|
||||
static inline void native_swapgs(void)
|
||||
|
Reference in New Issue
Block a user