m68k: mm: Restructure Motorola MMU page-table layout

The Motorola 68xxx MMUs, 040 (and later) have a fixed 7,7,{5,6}
page-table setup, where the last depends on the page-size selected (8k
vs 4k resp.), and head.S selects 4K pages. For 030 (and earlier) we
explicitly program 7,7,6 and 4K pages in %tc.

However, the current code implements this mightily weird. What it does
is group 16 of those (6 bit) pte tables into one 4k page to not waste
space. The down-side is that that forces pmd_t to be a 16-tuple
pointing to consecutive pte tables.

This breaks the generic code which assumes READ_ONCE(*pmd) will be
word sized.

Therefore implement a straight forward 7,7,6 3 level page-table setup,
with the addition (for 020/030) of (partial) large-page support. For
now this increases the memory footprint for pte-tables 15 fold.

Tested with ARAnyM/68040 emulation.

Suggested-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Greg Ungerer <gerg@linux-m68k.org>
Tested-by: Michael Schmitz <schmitzmic@gmail.com>
Tested-by: Greg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.711478295@infradead.org
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
此提交包含在:
Peter Zijlstra
2020-01-31 13:45:36 +01:00
提交者 Geert Uytterhoeven
父節點 5ad272abee
當前提交 ef22d8abd8
共有 5 個檔案被更改,包括 39 行新增56 行删除

查看文件

@@ -24,8 +24,6 @@
#undef DEBUG
#define PTRTREESIZE (256*1024)
/*
* For 040/060 we can use the virtual memory area like other architectures,
* but for 020/030 we want to use early termination page descriptors and we
@@ -50,7 +48,7 @@ static inline void free_io_area(void *addr)
#else
#define IO_SIZE (256*1024)
#define IO_SIZE PMD_SIZE
static struct vm_struct *iolist;
@@ -81,14 +79,13 @@ static void __free_io_area(void *addr, unsigned long size)
#if CONFIG_PGTABLE_LEVELS == 3
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
if (pmd_type == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = 0;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
pmd_clear(pmd_dir);
virtaddr += PMD_SIZE;
size -= PMD_SIZE;
} else if (pmd_type == 0)
continue;
}
@@ -249,7 +246,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
while ((long)size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
if (!(virtaddr & (PMD_SIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
@@ -263,10 +260,10 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
#if CONFIG_PGTABLE_LEVELS == 3
if (CPU_IS_020_OR_030) {
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
pmd_val(*pmd_dir) = physaddr;
physaddr += PMD_SIZE;
virtaddr += PMD_SIZE;
size -= PMD_SIZE;
} else
#endif
{
@@ -367,13 +364,12 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
#if CONFIG_PGTABLE_LEVELS == 3
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
unsigned long pmd = pmd_val(*pmd_dir);
if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
_CACHEMASK040) | cmode;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
*pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
virtaddr += PMD_SIZE;
size -= PMD_SIZE;
continue;
}
}