Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 pti fixes from Thomas Gleixner: "A set of updates for the x86/pti related code: - Preserve r8-r11 in int $0x80. r8-r11 need to be preserved, but the int$80 entry code removed that quite some time ago. Make it correct again. - A set of fixes for the Global Bit work which went into 4.17 and caused a bunch of interesting regressions: - Triggering a BUG in the page attribute code due to a missing check for early boot stage - Warnings in the page attribute code about holes in the kernel text mapping which are caused by the freeing of the init code. Handle such holes gracefully. - Reduce the amount of kernel memory which is set global to the actual text and do not incidentally overlap with data. - Disable the global bit when RANDSTRUCT is enabled as it partially defeats the hardening. - Make the page protection setup correct for vma->page_prot population again. The adjustment of the protections fell through the crack during the Global bit rework and triggers warnings on machines which do not support certain features, e.g. NX" * 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/entry/64/compat: Preserve r8-r11 in int $0x80 x86/pti: Filter at vma->vm_page_prot population x86/pti: Disallow global kernel text with RANDSTRUCT x86/pti: Reduce amount of kernel text allowed to be Global x86/pti: Fix boot warning from Global-bit setting x86/pti: Fix boot problems from Global-bit setting
This commit is contained in:
@@ -100,12 +100,19 @@ asm (
|
||||
" shl $32, %r8\n"
|
||||
" orq $0x7f7f7f7f, %r8\n"
|
||||
" movq %r8, %r9\n"
|
||||
" movq %r8, %r10\n"
|
||||
" movq %r8, %r11\n"
|
||||
" movq %r8, %r12\n"
|
||||
" movq %r8, %r13\n"
|
||||
" movq %r8, %r14\n"
|
||||
" movq %r8, %r15\n"
|
||||
" incq %r9\n"
|
||||
" movq %r9, %r10\n"
|
||||
" incq %r10\n"
|
||||
" movq %r10, %r11\n"
|
||||
" incq %r11\n"
|
||||
" movq %r11, %r12\n"
|
||||
" incq %r12\n"
|
||||
" movq %r12, %r13\n"
|
||||
" incq %r13\n"
|
||||
" movq %r13, %r14\n"
|
||||
" incq %r14\n"
|
||||
" movq %r14, %r15\n"
|
||||
" incq %r15\n"
|
||||
" ret\n"
|
||||
" .code32\n"
|
||||
" .popsection\n"
|
||||
@@ -128,12 +135,13 @@ int check_regs64(void)
|
||||
int err = 0;
|
||||
int num = 8;
|
||||
uint64_t *r64 = ®s64.r8;
|
||||
uint64_t expected = 0x7f7f7f7f7f7f7f7fULL;
|
||||
|
||||
if (!kernel_is_64bit)
|
||||
return 0;
|
||||
|
||||
do {
|
||||
if (*r64 == 0x7f7f7f7f7f7f7f7fULL)
|
||||
if (*r64 == expected++)
|
||||
continue; /* register did not change */
|
||||
if (syscall_addr != (long)&int80) {
|
||||
/*
|
||||
@@ -147,18 +155,17 @@ int check_regs64(void)
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
/* INT80 syscall entrypoint can be used by
|
||||
/*
|
||||
* INT80 syscall entrypoint can be used by
|
||||
* 64-bit programs too, unlike SYSCALL/SYSENTER.
|
||||
* Therefore it must preserve R12+
|
||||
* (they are callee-saved registers in 64-bit C ABI).
|
||||
*
|
||||
* This was probably historically not intended,
|
||||
* but R8..11 are clobbered (cleared to 0).
|
||||
* IOW: they are the only registers which aren't
|
||||
* preserved across INT80 syscall.
|
||||
* Starting in Linux 4.17 (and any kernel that
|
||||
* backports the change), R8..11 are preserved.
|
||||
* Historically (and probably unintentionally), they
|
||||
* were clobbered or zeroed.
|
||||
*/
|
||||
if (*r64 == 0 && num <= 11)
|
||||
continue;
|
||||
}
|
||||
printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64);
|
||||
err++;
|
||||
|
Reference in New Issue
Block a user