x86/boot/32: Convert the 32-bit pgtable setup code from assembly to C
The new Xen PVH entry point requires page tables to be setup by the kernel since it is entered with paging disabled. Pull the common code out of head_32.S so that mk_early_pgtbl_32() can be invoked from both the new Xen entry point and the existing startup_32() code. Convert resulting common code to C. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: matt@codeblueprint.co.uk Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1481215471-9639-1-git-send-email-boris.ostrovsky@oracle.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
b4ed1d15b4
commit
1e620f9b23
@@ -49,3 +49,65 @@ asmlinkage __visible void __init i386_start_kernel(void)
|
||||
|
||||
start_kernel();
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize page tables. This creates a PDE and a set of page
|
||||
* tables, which are located immediately beyond __brk_base. The variable
|
||||
* _brk_end is set up to point to the first "safe" location.
|
||||
* Mappings are created both at virtual address 0 (identity mapping)
|
||||
* and PAGE_OFFSET for up to _end.
|
||||
*
|
||||
* In PAE mode initial_page_table is statically defined to contain
|
||||
* enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
|
||||
* entries). The identity mapping is handled by pointing two PGD entries
|
||||
* to the first kernel PMD. Note the upper half of each PMD or PTE are
|
||||
* always zero at this stage.
|
||||
*/
|
||||
void __init mk_early_pgtbl_32(void)
|
||||
{
|
||||
#ifdef __pa
|
||||
#undef __pa
|
||||
#endif
|
||||
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
|
||||
pte_t pte, *ptep;
|
||||
int i;
|
||||
unsigned long *ptr;
|
||||
/* Enough space to fit pagetables for the low memory linear map */
|
||||
const unsigned long limit = __pa(_end) +
|
||||
(PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT);
|
||||
#ifdef CONFIG_X86_PAE
|
||||
pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd);
|
||||
#define SET_PL2(pl2, val) { (pl2).pmd = (val); }
|
||||
#else
|
||||
pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table);
|
||||
#define SET_PL2(pl2, val) { (pl2).pgd = (val); }
|
||||
#endif
|
||||
|
||||
ptep = (pte_t *)__pa(__brk_base);
|
||||
pte.pte = PTE_IDENT_ATTR;
|
||||
|
||||
while ((pte.pte & PTE_PFN_MASK) < limit) {
|
||||
|
||||
SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR);
|
||||
*pl2p = pl2;
|
||||
#ifndef CONFIG_X86_PAE
|
||||
/* Kernel PDE entry */
|
||||
*(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2;
|
||||
#endif
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
*ptep = pte;
|
||||
pte.pte += PAGE_SIZE;
|
||||
ptep++;
|
||||
}
|
||||
|
||||
pl2p++;
|
||||
}
|
||||
|
||||
ptr = (unsigned long *)__pa(&max_pfn_mapped);
|
||||
/* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */
|
||||
*ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
|
||||
ptr = (unsigned long *)__pa(&_brk_end);
|
||||
*ptr = (unsigned long)ptep + PAGE_OFFSET;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user