Files
android_kernel_xiaomi_sm8450/arch/arm/include/asm/pgtable-nommu.h
Giulio Benetti dbd78abd69 ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation
[ Upstream commit 340a982825f76f1cff0daa605970fe47321b5ee7 ]

Actually in no-MMU SoCs(i.e. i.MXRT) ZERO_PAGE(vaddr) expands to
```
virt_to_page(0)
```
that in order expands to:
```
pfn_to_page(virt_to_pfn(0))
```
and then virt_to_pfn(0) to:
```
        ((((unsigned long)(0) - PAGE_OFFSET) >> PAGE_SHIFT) +
         PHYS_PFN_OFFSET)
```
where PAGE_OFFSET and PHYS_PFN_OFFSET are the DRAM offset(0x80000000) and
PAGE_SHIFT is 12. This way we obtain 16MB(0x01000000) summed to the base of
DRAM(0x80000000).
When ZERO_PAGE(0) is then used, for example in bio_add_page(), the page
gets an address that is out of DRAM bounds.
So instead of using fake virtual page 0 let's allocate a dedicated
zero_page during paging_init() and assign it to a global 'struct page *
empty_zero_page' the same way mmu.c does and it's the same approach used
in m68k with commit dc068f462179 as discussed here[0]. Then let's move
ZERO_PAGE() definition to the top of pgtable.h to be in common between
mmu.c and nommu.c.

[0]: https://lore.kernel.org/linux-m68k/2a462b23-5b8e-bbf4-ec7d-778434a3b9d7@google.com/T/#m1266ceb63
ad140743174d6b3070364d3c9a5179b

Signed-off-by: Giulio Benetti <giulio.benetti@benettiengineering.com>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
2022-12-14 11:31:53 +01:00

92 lines
1.9 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/pgtable-nommu.h
*
* Copyright (C) 1995-2002 Russell King
* Copyright (C) 2004 Hyok S. Choi
*/
#ifndef _ASMARM_PGTABLE_NOMMU_H
#define _ASMARM_PGTABLE_NOMMU_H
#ifndef __ASSEMBLY__
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* Trivial page table functions.
*/
#define pgd_present(pgd) (1)
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp)
#define kern_addr_valid(addr) (1)
/* FIXME */
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
#define PGDIR_SHIFT 21
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/* FIXME */
#define PAGE_NONE __pgprot(0)
#define PAGE_SHARED __pgprot(0)
#define PAGE_COPY __pgprot(0)
#define PAGE_READONLY __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir ((pgd_t *) 0)
typedef pte_t *pte_addr_t;
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) (prot)
#define pgprot_writecombine(prot) (prot)
#define pgprot_device(prot) (prot)
/*
* These would be in other places but having them here reduces the diffs.
*/
extern unsigned int kobjsize(const void *objp);
/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
*/
#define VMALLOC_START 0UL
#define VMALLOC_END 0xffffffffUL
#define FIRST_USER_ADDRESS 0UL
#else
/*
* dummy tlb and user structures.
*/
#define v3_tlb_fns (0)
#define v4_tlb_fns (0)
#define v4wb_tlb_fns (0)
#define v4wbi_tlb_fns (0)
#define v6wbi_tlb_fns (0)
#define v7wbi_tlb_fns (0)
#define v3_user_fns (0)
#define v4_user_fns (0)
#define v4_mc_user_fns (0)
#define v4wb_user_fns (0)
#define v4wt_user_fns (0)
#define v6_user_fns (0)
#define xscale_mc_user_fns (0)
#endif /*__ASSEMBLY__*/
#endif /* _ASMARM_PGTABLE_H */