x86/mm: Do not auto-massage page protections
A PTE is constructed from a physical address and a pgprotval_t. __PAGE_KERNEL, for instance, is a pgprot_t and must be converted into a pgprotval_t before it can be used to create a PTE. This is done implicitly within functions like pfn_pte() by massage_pgprot(). However, this makes it very challenging to set bits (and keep them set) if your bit is being filtered out by massage_pgprot(). This moves the bit filtering out of pfn_pte() and friends. For users of PAGE_KERNEL*, filtering will be done automatically inside those macros but for users of __PAGE_KERNEL*, they need to do their own filtering now. Note that we also just move pfn_pte/pmd/pud() over to check_pgprot() instead of massage_pgprot(). This way, we still *look* for unsupported bits and properly warn about them if we find them. This might happen if an unfiltered __PAGE_KERNEL* value was passed in, for instance. - printk format warning fix from: Arnd Bergmann <arnd@arndb.de> - boot crash fix from: Tom Lendacky <thomas.lendacky@amd.com> - crash bisected by: Mike Galbraith <efault@gmx.de> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reported-and-fixed-by: Arnd Bergmann <arnd@arndb.de> Fixed-by: Tom Lendacky <thomas.lendacky@amd.com> Bisected-by: Mike Galbraith <efault@gmx.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Hugh Dickins <hughd@google.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kees Cook <keescook@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nadav Amit <namit@vmware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180406205509.77E1D7F6@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -51,6 +51,12 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
|
||||
pmd_t *pmd;
|
||||
pud_t *pud;
|
||||
p4d_t *p4d = NULL;
|
||||
pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
|
||||
pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
|
||||
|
||||
/* Filter out unsupported __PAGE_KERNEL* bits: */
|
||||
pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
|
||||
pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
|
||||
|
||||
/*
|
||||
* The new mapping only has to cover the page containing the image
|
||||
@@ -81,15 +87,19 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
set_pmd(pmd + pmd_index(restore_jump_address),
|
||||
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
|
||||
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
|
||||
set_pud(pud + pud_index(restore_jump_address),
|
||||
__pud(__pa(pmd) | _KERNPG_TABLE));
|
||||
__pud(__pa(pmd) | pgprot_val(pgtable_prot)));
|
||||
if (p4d) {
|
||||
set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE));
|
||||
set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE));
|
||||
p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
|
||||
pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
|
||||
|
||||
set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
|
||||
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
|
||||
} else {
|
||||
/* No p4d for 4-level paging: point the pgd to the pud page table */
|
||||
set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(pud) | _KERNPG_TABLE));
|
||||
pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
|
||||
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
Reference in New Issue
Block a user