fault.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Page Fault Handling for ARC (TLB Miss / ProtV)
  3. *
  4. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5. */
  6. #include <linux/signal.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/sched/signal.h>
  9. #include <linux/errno.h>
  10. #include <linux/ptrace.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/kdebug.h>
  13. #include <linux/perf_event.h>
  14. #include <linux/mm_types.h>
  15. #include <asm/mmu.h>
  16. /*
  17. * kernel virtual address is required to implement vmalloc/pkmap/fixmap
  18. * Refer to asm/processor.h for System Memory Map
  19. *
  20. * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
  21. * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
  22. */
  23. noinline static int handle_kernel_vaddr_fault(unsigned long address)
  24. {
  25. /*
  26. * Synchronize this task's top level page-table
  27. * with the 'reference' page table.
  28. */
  29. pgd_t *pgd, *pgd_k;
  30. p4d_t *p4d, *p4d_k;
  31. pud_t *pud, *pud_k;
  32. pmd_t *pmd, *pmd_k;
  33. pgd = pgd_offset(current->active_mm, address);
  34. pgd_k = pgd_offset_k(address);
  35. if (pgd_none (*pgd_k))
  36. goto bad_area;
  37. if (!pgd_present(*pgd))
  38. set_pgd(pgd, *pgd_k);
  39. p4d = p4d_offset(pgd, address);
  40. p4d_k = p4d_offset(pgd_k, address);
  41. if (p4d_none(*p4d_k))
  42. goto bad_area;
  43. if (!p4d_present(*p4d))
  44. set_p4d(p4d, *p4d_k);
  45. pud = pud_offset(p4d, address);
  46. pud_k = pud_offset(p4d_k, address);
  47. if (pud_none(*pud_k))
  48. goto bad_area;
  49. if (!pud_present(*pud))
  50. set_pud(pud, *pud_k);
  51. pmd = pmd_offset(pud, address);
  52. pmd_k = pmd_offset(pud_k, address);
  53. if (pmd_none(*pmd_k))
  54. goto bad_area;
  55. if (!pmd_present(*pmd))
  56. set_pmd(pmd, *pmd_k);
  57. /* XXX: create the TLB entry here */
  58. return 0;
  59. bad_area:
  60. return 1;
  61. }
  62. void do_page_fault(unsigned long address, struct pt_regs *regs)
  63. {
  64. struct vm_area_struct *vma = NULL;
  65. struct task_struct *tsk = current;
  66. struct mm_struct *mm = tsk->mm;
  67. int sig, si_code = SEGV_MAPERR;
  68. unsigned int write = 0, exec = 0, mask;
  69. vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
  70. unsigned int flags; /* handle_mm_fault() input */
  71. /*
  72. * NOTE! We MUST NOT take any locks for this case. We may
  73. * be in an interrupt or a critical region, and should
  74. * only copy the information from the master page table,
  75. * nothing more.
  76. */
  77. if (address >= VMALLOC_START && !user_mode(regs)) {
  78. if (unlikely(handle_kernel_vaddr_fault(address)))
  79. goto no_context;
  80. else
  81. return;
  82. }
  83. /*
  84. * If we're in an interrupt or have no user
  85. * context, we must not take the fault..
  86. */
  87. if (faulthandler_disabled() || !mm)
  88. goto no_context;
  89. if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
  90. write = 1;
  91. else if ((regs->ecr_vec == ECR_V_PROTV) &&
  92. (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
  93. exec = 1;
  94. flags = FAULT_FLAG_DEFAULT;
  95. if (user_mode(regs))
  96. flags |= FAULT_FLAG_USER;
  97. if (write)
  98. flags |= FAULT_FLAG_WRITE;
  99. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  100. retry:
  101. vma = lock_mm_and_find_vma(mm, address, regs);
  102. if (!vma)
  103. goto bad_area_nosemaphore;
  104. /*
  105. * vm_area is good, now check permissions for this memory access
  106. */
  107. mask = VM_READ;
  108. if (write)
  109. mask = VM_WRITE;
  110. if (exec)
  111. mask = VM_EXEC;
  112. if (!(vma->vm_flags & mask)) {
  113. si_code = SEGV_ACCERR;
  114. goto bad_area;
  115. }
  116. fault = handle_mm_fault(vma, address, flags, regs);
  117. /* Quick path to respond to signals */
  118. if (fault_signal_pending(fault, regs)) {
  119. if (!user_mode(regs))
  120. goto no_context;
  121. return;
  122. }
  123. /* The fault is fully completed (including releasing mmap lock) */
  124. if (fault & VM_FAULT_COMPLETED)
  125. return;
  126. /*
  127. * Fault retry nuances, mmap_lock already relinquished by core mm
  128. */
  129. if (unlikely(fault & VM_FAULT_RETRY)) {
  130. flags |= FAULT_FLAG_TRIED;
  131. goto retry;
  132. }
  133. bad_area:
  134. mmap_read_unlock(mm);
  135. bad_area_nosemaphore:
  136. /*
  137. * Major/minor page fault accounting
  138. * (in case of retry we only land here once)
  139. */
  140. if (likely(!(fault & VM_FAULT_ERROR)))
  141. /* Normal return path: fault Handled Gracefully */
  142. return;
  143. if (!user_mode(regs))
  144. goto no_context;
  145. if (fault & VM_FAULT_OOM) {
  146. pagefault_out_of_memory();
  147. return;
  148. }
  149. if (fault & VM_FAULT_SIGBUS) {
  150. sig = SIGBUS;
  151. si_code = BUS_ADRERR;
  152. }
  153. else {
  154. sig = SIGSEGV;
  155. }
  156. tsk->thread.fault_address = address;
  157. force_sig_fault(sig, si_code, (void __user *)address);
  158. return;
  159. no_context:
  160. if (fixup_exception(regs))
  161. return;
  162. die("Oops", regs, address);
  163. }