123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192 |
- // SPDX-License-Identifier: GPL-2.0-only
- /* Page Fault Handling for ARC (TLB Miss / ProtV)
- *
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- */
- #include <linux/signal.h>
- #include <linux/interrupt.h>
- #include <linux/sched/signal.h>
- #include <linux/errno.h>
- #include <linux/ptrace.h>
- #include <linux/uaccess.h>
- #include <linux/kdebug.h>
- #include <linux/perf_event.h>
- #include <linux/mm_types.h>
- #include <asm/mmu.h>
- /*
- * kernel virtual address is required to implement vmalloc/pkmap/fixmap
- * Refer to asm/processor.h for System Memory Map
- *
- * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
- * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
- */
- noinline static int handle_kernel_vaddr_fault(unsigned long address)
- {
- /*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- */
- pgd_t *pgd, *pgd_k;
- p4d_t *p4d, *p4d_k;
- pud_t *pud, *pud_k;
- pmd_t *pmd, *pmd_k;
- pgd = pgd_offset(current->active_mm, address);
- pgd_k = pgd_offset_k(address);
- if (pgd_none (*pgd_k))
- goto bad_area;
- if (!pgd_present(*pgd))
- set_pgd(pgd, *pgd_k);
- p4d = p4d_offset(pgd, address);
- p4d_k = p4d_offset(pgd_k, address);
- if (p4d_none(*p4d_k))
- goto bad_area;
- if (!p4d_present(*p4d))
- set_p4d(p4d, *p4d_k);
- pud = pud_offset(p4d, address);
- pud_k = pud_offset(p4d_k, address);
- if (pud_none(*pud_k))
- goto bad_area;
- if (!pud_present(*pud))
- set_pud(pud, *pud_k);
- pmd = pmd_offset(pud, address);
- pmd_k = pmd_offset(pud_k, address);
- if (pmd_none(*pmd_k))
- goto bad_area;
- if (!pmd_present(*pmd))
- set_pmd(pmd, *pmd_k);
- /* XXX: create the TLB entry here */
- return 0;
- bad_area:
- return 1;
- }
- void do_page_fault(unsigned long address, struct pt_regs *regs)
- {
- struct vm_area_struct *vma = NULL;
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- int sig, si_code = SEGV_MAPERR;
- unsigned int write = 0, exec = 0, mask;
- vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
- unsigned int flags; /* handle_mm_fault() input */
- /*
- * NOTE! We MUST NOT take any locks for this case. We may
- * be in an interrupt or a critical region, and should
- * only copy the information from the master page table,
- * nothing more.
- */
- if (address >= VMALLOC_START && !user_mode(regs)) {
- if (unlikely(handle_kernel_vaddr_fault(address)))
- goto no_context;
- else
- return;
- }
- /*
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
- if (faulthandler_disabled() || !mm)
- goto no_context;
- if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
- write = 1;
- else if ((regs->ecr_vec == ECR_V_PROTV) &&
- (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
- exec = 1;
- flags = FAULT_FLAG_DEFAULT;
- if (user_mode(regs))
- flags |= FAULT_FLAG_USER;
- if (write)
- flags |= FAULT_FLAG_WRITE;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- retry:
- vma = lock_mm_and_find_vma(mm, address, regs);
- if (!vma)
- goto bad_area_nosemaphore;
- /*
- * vm_area is good, now check permissions for this memory access
- */
- mask = VM_READ;
- if (write)
- mask = VM_WRITE;
- if (exec)
- mask = VM_EXEC;
- if (!(vma->vm_flags & mask)) {
- si_code = SEGV_ACCERR;
- goto bad_area;
- }
- fault = handle_mm_fault(vma, address, flags, regs);
- /* Quick path to respond to signals */
- if (fault_signal_pending(fault, regs)) {
- if (!user_mode(regs))
- goto no_context;
- return;
- }
- /* The fault is fully completed (including releasing mmap lock) */
- if (fault & VM_FAULT_COMPLETED)
- return;
- /*
- * Fault retry nuances, mmap_lock already relinquished by core mm
- */
- if (unlikely(fault & VM_FAULT_RETRY)) {
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
- bad_area:
- mmap_read_unlock(mm);
- bad_area_nosemaphore:
- /*
- * Major/minor page fault accounting
- * (in case of retry we only land here once)
- */
- if (likely(!(fault & VM_FAULT_ERROR)))
- /* Normal return path: fault Handled Gracefully */
- return;
- if (!user_mode(regs))
- goto no_context;
- if (fault & VM_FAULT_OOM) {
- pagefault_out_of_memory();
- return;
- }
- if (fault & VM_FAULT_SIGBUS) {
- sig = SIGBUS;
- si_code = BUS_ADRERR;
- }
- else {
- sig = SIGSEGV;
- }
- tsk->thread.fault_address = address;
- force_sig_fault(sig, si_code, (void __user *)address);
- return;
- no_context:
- if (fixup_exception(regs))
- return;
- die("Oops", regs, address);
- }
|