mm: pgtable: add shortcuts for accessing kernel PMD and PTE
The powerpc 32-bit implementation of pgtable has nice shortcuts for accessing kernel PMD and PTE for a given virtual address. Make these helpers available for all architectures. [rppt@linux.ibm.com: microblaze: fix page table traversal in setup_rt_frame()] Link: http://lkml.kernel.org/r/20200518191511.GD1118872@kernel.org [akpm@linux-foundation.org: s/pmd_ptr_k/pmd_off_k/ in various powerpc places] Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-9-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
88107d330d
commit
e05c7b1f2b
@@ -320,7 +320,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea)
|
||||
|
||||
if (!Hash)
|
||||
return;
|
||||
pmd = pmd_ptr(mm, ea);
|
||||
pmd = pmd_off(mm, ea);
|
||||
if (!pmd_none(*pmd))
|
||||
add_hash_page(mm->context.id, ea, pmd_val(*pmd));
|
||||
}
|
||||
|
@@ -90,7 +90,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
|
||||
if (start >= end)
|
||||
return;
|
||||
end = (end - 1) | ~PAGE_MASK;
|
||||
pmd = pmd_ptr(mm, start);
|
||||
pmd = pmd_off(mm, start);
|
||||
for (;;) {
|
||||
pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
|
||||
if (pmd_end > end)
|
||||
@@ -148,7 +148,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
return;
|
||||
}
|
||||
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
|
||||
pmd = pmd_ptr(mm, vmaddr);
|
||||
pmd = pmd_off(mm, vmaddr);
|
||||
if (!pmd_none(*pmd))
|
||||
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
|
||||
}
|
||||
|
@@ -10,7 +10,7 @@
|
||||
static int __init
|
||||
kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
|
||||
{
|
||||
pmd_t *pmd = pmd_ptr_k(k_start);
|
||||
pmd_t *pmd = pmd_off_k(k_start);
|
||||
unsigned long k_cur, k_next;
|
||||
|
||||
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
|
||||
@@ -59,7 +59,7 @@ int __init kasan_init_region(void *start, size_t size)
|
||||
return ret;
|
||||
|
||||
for (; k_cur < k_end; k_cur += PAGE_SIZE) {
|
||||
pmd_t *pmd = pmd_ptr_k(k_cur);
|
||||
pmd_t *pmd = pmd_off_k(k_cur);
|
||||
void *va = block + k_cur - k_start;
|
||||
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
|
||||
|
||||
|
@@ -46,7 +46,7 @@ int __init kasan_init_region(void *start, size_t size)
|
||||
kasan_update_early_region(k_start, k_cur, __pte(0));
|
||||
|
||||
for (; k_cur < k_end; k_cur += PAGE_SIZE) {
|
||||
pmd_t *pmd = pmd_ptr_k(k_cur);
|
||||
pmd_t *pmd = pmd_off_k(k_cur);
|
||||
void *va = block + k_cur - k_start;
|
||||
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
|
||||
|
||||
|
@@ -33,7 +33,7 @@ int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_
|
||||
pmd_t *pmd;
|
||||
unsigned long k_cur, k_next;
|
||||
|
||||
pmd = pmd_ptr_k(k_start);
|
||||
pmd = pmd_off_k(k_start);
|
||||
|
||||
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
|
||||
pte_t *new;
|
||||
@@ -69,7 +69,7 @@ int __init __weak kasan_init_region(void *start, size_t size)
|
||||
return -ENOMEM;
|
||||
|
||||
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
|
||||
pmd_t *pmd = pmd_ptr_k(k_cur);
|
||||
pmd_t *pmd = pmd_off_k(k_cur);
|
||||
void *va = block + k_cur - k_start;
|
||||
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
|
||||
|
||||
@@ -86,7 +86,7 @@ kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte)
|
||||
phys_addr_t pa = __pa(kasan_early_shadow_page);
|
||||
|
||||
for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) {
|
||||
pmd_t *pmd = pmd_ptr_k(k_cur);
|
||||
pmd_t *pmd = pmd_off_k(k_cur);
|
||||
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
|
||||
|
||||
if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
|
||||
@@ -184,7 +184,7 @@ void __init kasan_early_init(void)
|
||||
unsigned long addr = KASAN_SHADOW_START;
|
||||
unsigned long end = KASAN_SHADOW_END;
|
||||
unsigned long next;
|
||||
pmd_t *pmd = pmd_ptr_k(addr);
|
||||
pmd_t *pmd = pmd_off_k(addr);
|
||||
|
||||
BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
|
||||
|
||||
|
@@ -103,7 +103,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
|
||||
pmd_t *pmdp;
|
||||
unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW;
|
||||
|
||||
pmdp = pmd_ptr_k(v);
|
||||
pmdp = pmd_off_k(v);
|
||||
*pmdp++ = __pmd(val);
|
||||
*pmdp++ = __pmd(val);
|
||||
*pmdp++ = __pmd(val);
|
||||
@@ -118,7 +118,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
|
||||
pmd_t *pmdp;
|
||||
unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW;
|
||||
|
||||
pmdp = pmd_ptr_k(v);
|
||||
pmdp = pmd_off_k(v);
|
||||
*pmdp = __pmd(val);
|
||||
|
||||
v += LARGE_PAGE_SIZE_4M;
|
||||
|
@@ -74,7 +74,7 @@ static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
|
||||
static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
|
||||
pgprot_t prot, int psize, bool new)
|
||||
{
|
||||
pmd_t *pmdp = pmd_ptr_k(va);
|
||||
pmd_t *pmdp = pmd_off_k(va);
|
||||
pte_t *ptep;
|
||||
|
||||
if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
|
||||
|
@@ -40,7 +40,7 @@ notrace void __init early_ioremap_init(void)
|
||||
{
|
||||
unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
|
||||
pte_t *ptep = (pte_t *)early_fixmap_pagetable;
|
||||
pmd_t *pmdp = pmd_ptr_k(addr);
|
||||
pmd_t *pmdp = pmd_off_k(addr);
|
||||
|
||||
for (; (s32)(FIXADDR_TOP - addr) > 0;
|
||||
addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
|
||||
@@ -78,7 +78,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
|
||||
int err = -ENOMEM;
|
||||
|
||||
/* Use upper 10 bits of VA to index the first level map */
|
||||
pd = pmd_ptr_k(va);
|
||||
pd = pmd_off_k(va);
|
||||
/* Use middle 10 bits of VA to index the second-level map */
|
||||
if (likely(slab_is_available()))
|
||||
pg = pte_alloc_kernel(pd, va);
|
||||
|
Reference in New Issue
Block a user