
Patch series "mm: consolidate definitions of page table accessors", v2. The low level page table accessors (pXY_index(), pXY_offset()) are duplicated across all architectures and sometimes more than once. For instance, we have 31 definition of pgd_offset() for 25 supported architectures. Most of these definitions are actually identical and typically it boils down to, e.g. static inline unsigned long pmd_index(unsigned long address) { return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); } These definitions can be shared among 90% of the arches provided XYZ_SHIFT, PTRS_PER_XYZ and xyz_page_vaddr() are defined. For architectures that really need a custom version there is always possibility to override the generic version with the usual ifdefs magic. These patches introduce include/linux/pgtable.h that replaces include/asm-generic/pgtable.h and add the definitions of the page table accessors to the new header. This patch (of 12): The linux/mm.h header includes <asm/pgtable.h> to allow inlining of the functions involving page table manipulations, e.g. pte_alloc() and pmd_alloc(). So, there is no point to explicitly include <asm/pgtable.h> in the files that include <linux/mm.h>. The include statements in such cases are remove with a simple loop: for f in $(git grep -l "include <linux/mm.h>") ; do sed -i -e '/include <asm\/pgtable.h>/ d' $f done Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-1-rppt@kernel.org Link: http://lkml.kernel.org/r/20200514170327.31389-2-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
167 lines
3.4 KiB
C
167 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
|
|
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/sections.h>
|
|
#include <as-layout.h>
|
|
#include <os.h>
|
|
#include <skas.h>
|
|
|
|
static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
|
|
unsigned long kernel)
|
|
{
|
|
pgd_t *pgd;
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
pgd = pgd_offset(mm, proc);
|
|
|
|
p4d = p4d_alloc(mm, pgd, proc);
|
|
if (!p4d)
|
|
goto out;
|
|
|
|
pud = pud_alloc(mm, p4d, proc);
|
|
if (!pud)
|
|
goto out_pud;
|
|
|
|
pmd = pmd_alloc(mm, pud, proc);
|
|
if (!pmd)
|
|
goto out_pmd;
|
|
|
|
pte = pte_alloc_map(mm, pmd, proc);
|
|
if (!pte)
|
|
goto out_pte;
|
|
|
|
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
|
|
*pte = pte_mkread(*pte);
|
|
return 0;
|
|
|
|
out_pte:
|
|
pmd_free(mm, pmd);
|
|
out_pmd:
|
|
pud_free(mm, pud);
|
|
out_pud:
|
|
p4d_free(mm, p4d);
|
|
out:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
int init_new_context(struct task_struct *task, struct mm_struct *mm)
|
|
{
|
|
struct mm_context *from_mm = NULL;
|
|
struct mm_context *to_mm = &mm->context;
|
|
unsigned long stack = 0;
|
|
int ret = -ENOMEM;
|
|
|
|
stack = get_zeroed_page(GFP_KERNEL);
|
|
if (stack == 0)
|
|
goto out;
|
|
|
|
to_mm->id.stack = stack;
|
|
if (current->mm != NULL && current->mm != &init_mm)
|
|
from_mm = ¤t->mm->context;
|
|
|
|
block_signals_trace();
|
|
if (from_mm)
|
|
to_mm->id.u.pid = copy_context_skas0(stack,
|
|
from_mm->id.u.pid);
|
|
else to_mm->id.u.pid = start_userspace(stack);
|
|
unblock_signals_trace();
|
|
|
|
if (to_mm->id.u.pid < 0) {
|
|
ret = to_mm->id.u.pid;
|
|
goto out_free;
|
|
}
|
|
|
|
ret = init_new_ldt(to_mm, from_mm);
|
|
if (ret < 0) {
|
|
printk(KERN_ERR "init_new_context_skas - init_ldt"
|
|
" failed, errno = %d\n", ret);
|
|
goto out_free;
|
|
}
|
|
|
|
return 0;
|
|
|
|
out_free:
|
|
if (to_mm->id.stack != 0)
|
|
free_page(to_mm->id.stack);
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
void uml_setup_stubs(struct mm_struct *mm)
|
|
{
|
|
int err, ret;
|
|
|
|
ret = init_stub_pte(mm, STUB_CODE,
|
|
(unsigned long) __syscall_stub_start);
|
|
if (ret)
|
|
goto out;
|
|
|
|
ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
|
|
if (ret)
|
|
goto out;
|
|
|
|
mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
|
|
mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
|
|
|
|
/* dup_mmap already holds mmap_sem */
|
|
err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
|
|
VM_READ | VM_MAYREAD | VM_EXEC |
|
|
VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
|
|
mm->context.stub_pages);
|
|
if (err) {
|
|
printk(KERN_ERR "install_special_mapping returned %d\n", err);
|
|
goto out;
|
|
}
|
|
return;
|
|
|
|
out:
|
|
force_sigsegv(SIGSEGV);
|
|
}
|
|
|
|
void arch_exit_mmap(struct mm_struct *mm)
|
|
{
|
|
pte_t *pte;
|
|
|
|
pte = virt_to_pte(mm, STUB_CODE);
|
|
if (pte != NULL)
|
|
pte_clear(mm, STUB_CODE, pte);
|
|
|
|
pte = virt_to_pte(mm, STUB_DATA);
|
|
if (pte == NULL)
|
|
return;
|
|
|
|
pte_clear(mm, STUB_DATA, pte);
|
|
}
|
|
|
|
void destroy_context(struct mm_struct *mm)
|
|
{
|
|
struct mm_context *mmu = &mm->context;
|
|
|
|
/*
|
|
* If init_new_context wasn't called, this will be
|
|
* zero, resulting in a kill(0), which will result in the
|
|
* whole UML suddenly dying. Also, cover negative and
|
|
* 1 cases, since they shouldn't happen either.
|
|
*/
|
|
if (mmu->id.u.pid < 2) {
|
|
printk(KERN_ERR "corrupt mm_context - pid = %d\n",
|
|
mmu->id.u.pid);
|
|
return;
|
|
}
|
|
os_kill_ptraced_process(mmu->id.u.pid, 1);
|
|
|
|
free_page(mmu->id.stack);
|
|
free_ldt(mmu);
|
|
}
|