Merge branch 'x86/pti' into x86/mm, to pick up dependencies

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2018-03-12 12:10:03 +01:00
69 changed files with 912 additions and 473 deletions

View File

@@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
for_each_possible_cpu(cpu)
setup_cpu_entry_area(cpu);
/*
* This is the last essential update to swapper_pgdir which needs
* to be synchronized to initial_page_table on 32bit.
*/
sync_initial_page_table();
}

View File

@@ -1248,10 +1248,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
tsk = current;
mm = tsk->mm;
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))

View File

@@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base)
}
#endif /* CONFIG_HIGHMEM */
void __init sync_initial_page_table(void)
{
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS);
/*
* sync back low identity map too. It is used for example
* in the 32-bit EFI stub.
*/
clone_pgd_range(initial_page_table,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
}
void __init native_pagetable_init(void)
{
unsigned long pfn, va;

View File

@@ -15,6 +15,7 @@
#include <asm/page.h>
#include <asm/processor-flags.h>
#include <asm/msr-index.h>
#include <asm/nospec-branch.h>
.text
.code64
@@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute)
movq %rax, %r8 /* Workarea encryption routine */
addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
ANNOTATE_RETPOLINE_SAFE
call *%rax /* Call the encryption routine */
pop %r12

View File

@@ -332,7 +332,7 @@ static void __init pti_clone_user_shared(void)
}
/*
* Clone the ESPFIX P4D into the user space visinble page table
* Clone the ESPFIX P4D into the user space visible page table
*/
static void __init pti_setup_espfix64(void)
{