m68k: mm: Restructure Motorola MMU page-table layout

The Motorola 68xxx MMUs, 040 (and later) have a fixed 7,7,{5,6}
page-table setup, where the last depends on the page-size selected (8k
vs 4k resp.), and head.S selects 4K pages. For 030 (and earlier) we
explicitly program 7,7,6 and 4K pages in %tc.

However, the current code implements this mightily weird. What it does
is group 16 of those (6 bit) pte tables into one 4k page to not waste
space. The down-side is that that forces pmd_t to be a 16-tuple
pointing to consecutive pte tables.

This breaks the generic code which assumes READ_ONCE(*pmd) will be
word sized.

Therefore implement a straight forward 7,7,6 3 level page-table setup,
with the addition (for 020/030) of (partial) large-page support. For
now this increases the memory footprint for pte-tables 15 fold.

Tested with ARAnyM/68040 emulation.

Suggested-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Greg Ungerer <gerg@linux-m68k.org>
Tested-by: Michael Schmitz <schmitzmic@gmail.com>
Tested-by: Greg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.711478295@infradead.org
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
This commit is contained in:
Peter Zijlstra
2020-01-31 13:45:36 +01:00
committed by Geert Uytterhoeven
parent 5ad272abee
commit ef22d8abd8
5 changed files with 39 additions and 56 deletions

View File

@@ -236,8 +236,6 @@ static pmd_t * __init kernel_ptr_table(void)
static void __init map_node(int node)
{
#define PTRTREESIZE (256*1024)
#define ROOTTREESIZE (32*1024*1024)
unsigned long physaddr, virtaddr, size;
pgd_t *pgd_dir;
p4d_t *p4d_dir;
@@ -255,21 +253,21 @@ static void __init map_node(int node)
while (size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
if (!(virtaddr & (PMD_SIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
if (virtaddr && CPU_IS_020_OR_030) {
if (!(virtaddr & (ROOTTREESIZE-1)) &&
size >= ROOTTREESIZE) {
if (!(virtaddr & (PGDIR_SIZE-1)) &&
size >= PGDIR_SIZE) {
#ifdef DEBUG
printk ("[very early term]");
#endif
pgd_val(*pgd_dir) = physaddr;
size -= ROOTTREESIZE;
virtaddr += ROOTTREESIZE;
physaddr += ROOTTREESIZE;
size -= PGDIR_SIZE;
virtaddr += PGDIR_SIZE;
physaddr += PGDIR_SIZE;
continue;
}
}
@@ -289,8 +287,8 @@ static void __init map_node(int node)
#ifdef DEBUG
printk ("[early term]");
#endif
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
pmd_val(*pmd_dir) = physaddr;
physaddr += PMD_SIZE;
} else {
int i;
#ifdef DEBUG
@@ -298,15 +296,15 @@ static void __init map_node(int node)
#endif
zero_pgtable = kernel_ptr_table();
pte_dir = (pte_t *)zero_pgtable;
pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
_PAGE_TABLE | _PAGE_ACCESSED;
pmd_set(pmd_dir, pte_dir);
pte_val(*pte_dir++) = 0;
physaddr += PAGE_SIZE;
for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
pte_val(*pte_dir++) = physaddr;
}
size -= PTRTREESIZE;
virtaddr += PTRTREESIZE;
size -= PMD_SIZE;
virtaddr += PMD_SIZE;
} else {
if (!pmd_present(*pmd_dir)) {
#ifdef DEBUG