fault.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * S390 version
  4. * Copyright IBM Corp. 1999
  5. * Author(s): Hartmut Penner ([email protected])
  6. * Ulrich Weigand ([email protected])
  7. *
  8. * Derived from "arch/i386/mm/fault.c"
  9. * Copyright (C) 1995 Linus Torvalds
  10. */
  11. #include <linux/kernel_stat.h>
  12. #include <linux/perf_event.h>
  13. #include <linux/signal.h>
  14. #include <linux/sched.h>
  15. #include <linux/sched/debug.h>
  16. #include <linux/kernel.h>
  17. #include <linux/errno.h>
  18. #include <linux/string.h>
  19. #include <linux/types.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/mman.h>
  22. #include <linux/mm.h>
  23. #include <linux/compat.h>
  24. #include <linux/smp.h>
  25. #include <linux/kdebug.h>
  26. #include <linux/init.h>
  27. #include <linux/console.h>
  28. #include <linux/extable.h>
  29. #include <linux/hardirq.h>
  30. #include <linux/kprobes.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/hugetlb.h>
  33. #include <asm/asm-offsets.h>
  34. #include <asm/diag.h>
  35. #include <asm/gmap.h>
  36. #include <asm/irq.h>
  37. #include <asm/mmu_context.h>
  38. #include <asm/facility.h>
  39. #include <asm/uv.h>
  40. #include "../kernel/entry.h"
  41. #define __FAIL_ADDR_MASK -4096L
  42. #define __SUBCODE_MASK 0x0600
  43. #define __PF_RES_FIELD 0x8000000000000000ULL
  44. #define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000)
  45. #define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000)
  46. #define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000)
  47. #define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000)
  48. #define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000)
  49. enum fault_type {
  50. KERNEL_FAULT,
  51. USER_FAULT,
  52. VDSO_FAULT,
  53. GMAP_FAULT,
  54. };
  55. static unsigned long store_indication __read_mostly;
  56. static int __init fault_init(void)
  57. {
  58. if (test_facility(75))
  59. store_indication = 0xc00;
  60. return 0;
  61. }
  62. early_initcall(fault_init);
  63. /*
  64. * Find out which address space caused the exception.
  65. */
  66. static enum fault_type get_fault_type(struct pt_regs *regs)
  67. {
  68. unsigned long trans_exc_code;
  69. trans_exc_code = regs->int_parm_long & 3;
  70. if (likely(trans_exc_code == 0)) {
  71. /* primary space exception */
  72. if (IS_ENABLED(CONFIG_PGSTE) &&
  73. test_pt_regs_flag(regs, PIF_GUEST_FAULT))
  74. return GMAP_FAULT;
  75. if (current->thread.mm_segment == USER_DS)
  76. return USER_FAULT;
  77. return KERNEL_FAULT;
  78. }
  79. if (trans_exc_code == 2) {
  80. /* secondary space exception */
  81. if (current->thread.mm_segment & 1) {
  82. if (current->thread.mm_segment == USER_DS_SACF)
  83. return USER_FAULT;
  84. return KERNEL_FAULT;
  85. }
  86. return VDSO_FAULT;
  87. }
  88. if (trans_exc_code == 1) {
  89. /* access register mode, not used in the kernel */
  90. return USER_FAULT;
  91. }
  92. /* home space exception -> access via kernel ASCE */
  93. return KERNEL_FAULT;
  94. }
  95. static int bad_address(void *p)
  96. {
  97. unsigned long dummy;
  98. return get_kernel_nofault(dummy, (unsigned long *)p);
  99. }
  100. static void dump_pagetable(unsigned long asce, unsigned long address)
  101. {
  102. unsigned long *table = __va(asce & _ASCE_ORIGIN);
  103. pr_alert("AS:%016lx ", asce);
  104. switch (asce & _ASCE_TYPE_MASK) {
  105. case _ASCE_TYPE_REGION1:
  106. table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
  107. if (bad_address(table))
  108. goto bad;
  109. pr_cont("R1:%016lx ", *table);
  110. if (*table & _REGION_ENTRY_INVALID)
  111. goto out;
  112. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  113. fallthrough;
  114. case _ASCE_TYPE_REGION2:
  115. table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
  116. if (bad_address(table))
  117. goto bad;
  118. pr_cont("R2:%016lx ", *table);
  119. if (*table & _REGION_ENTRY_INVALID)
  120. goto out;
  121. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  122. fallthrough;
  123. case _ASCE_TYPE_REGION3:
  124. table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
  125. if (bad_address(table))
  126. goto bad;
  127. pr_cont("R3:%016lx ", *table);
  128. if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
  129. goto out;
  130. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  131. fallthrough;
  132. case _ASCE_TYPE_SEGMENT:
  133. table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
  134. if (bad_address(table))
  135. goto bad;
  136. pr_cont("S:%016lx ", *table);
  137. if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
  138. goto out;
  139. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  140. }
  141. table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
  142. if (bad_address(table))
  143. goto bad;
  144. pr_cont("P:%016lx ", *table);
  145. out:
  146. pr_cont("\n");
  147. return;
  148. bad:
  149. pr_cont("BAD\n");
  150. }
  151. static void dump_fault_info(struct pt_regs *regs)
  152. {
  153. unsigned long asce;
  154. pr_alert("Failing address: %016lx TEID: %016lx\n",
  155. regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
  156. pr_alert("Fault in ");
  157. switch (regs->int_parm_long & 3) {
  158. case 3:
  159. pr_cont("home space ");
  160. break;
  161. case 2:
  162. pr_cont("secondary space ");
  163. break;
  164. case 1:
  165. pr_cont("access register ");
  166. break;
  167. case 0:
  168. pr_cont("primary space ");
  169. break;
  170. }
  171. pr_cont("mode while using ");
  172. switch (get_fault_type(regs)) {
  173. case USER_FAULT:
  174. asce = S390_lowcore.user_asce;
  175. pr_cont("user ");
  176. break;
  177. case VDSO_FAULT:
  178. asce = S390_lowcore.vdso_asce;
  179. pr_cont("vdso ");
  180. break;
  181. case GMAP_FAULT:
  182. asce = ((struct gmap *) S390_lowcore.gmap)->asce;
  183. pr_cont("gmap ");
  184. break;
  185. case KERNEL_FAULT:
  186. asce = S390_lowcore.kernel_asce;
  187. pr_cont("kernel ");
  188. break;
  189. default:
  190. unreachable();
  191. }
  192. pr_cont("ASCE.\n");
  193. dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
  194. }
  195. int show_unhandled_signals = 1;
  196. void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
  197. {
  198. if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
  199. return;
  200. if (!unhandled_signal(current, signr))
  201. return;
  202. if (!printk_ratelimit())
  203. return;
  204. printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
  205. regs->int_code & 0xffff, regs->int_code >> 17);
  206. print_vma_addr(KERN_CONT "in ", regs->psw.addr);
  207. printk(KERN_CONT "\n");
  208. if (is_mm_fault)
  209. dump_fault_info(regs);
  210. show_regs(regs);
  211. }
  212. /*
  213. * Send SIGSEGV to task. This is an external routine
  214. * to keep the stack usage of do_page_fault small.
  215. */
  216. static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
  217. {
  218. report_user_fault(regs, SIGSEGV, 1);
  219. force_sig_fault(SIGSEGV, si_code,
  220. (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
  221. }
  222. const struct exception_table_entry *s390_search_extables(unsigned long addr)
  223. {
  224. const struct exception_table_entry *fixup;
  225. fixup = search_extable(__start_dma_ex_table,
  226. __stop_dma_ex_table - __start_dma_ex_table,
  227. addr);
  228. if (!fixup)
  229. fixup = search_exception_tables(addr);
  230. return fixup;
  231. }
  232. static noinline void do_no_context(struct pt_regs *regs)
  233. {
  234. const struct exception_table_entry *fixup;
  235. /* Are we prepared to handle this kernel fault? */
  236. fixup = s390_search_extables(regs->psw.addr);
  237. if (fixup && ex_handle(fixup, regs))
  238. return;
  239. /*
  240. * Oops. The kernel tried to access some bad page. We'll have to
  241. * terminate things with extreme prejudice.
  242. */
  243. if (get_fault_type(regs) == KERNEL_FAULT)
  244. printk(KERN_ALERT "Unable to handle kernel pointer dereference"
  245. " in virtual kernel address space\n");
  246. else
  247. printk(KERN_ALERT "Unable to handle kernel paging request"
  248. " in virtual user address space\n");
  249. dump_fault_info(regs);
  250. die(regs, "Oops");
  251. do_exit(SIGKILL);
  252. }
  253. static noinline void do_low_address(struct pt_regs *regs)
  254. {
  255. /* Low-address protection hit in kernel mode means
  256. NULL pointer write access in kernel mode. */
  257. if (regs->psw.mask & PSW_MASK_PSTATE) {
  258. /* Low-address protection hit in user mode 'cannot happen'. */
  259. die (regs, "Low-address protection");
  260. do_exit(SIGKILL);
  261. }
  262. do_no_context(regs);
  263. }
  264. static noinline void do_sigbus(struct pt_regs *regs)
  265. {
  266. /*
  267. * Send a sigbus, regardless of whether we were in kernel
  268. * or user mode.
  269. */
  270. force_sig_fault(SIGBUS, BUS_ADRERR,
  271. (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
  272. }
  273. static noinline int signal_return(struct pt_regs *regs)
  274. {
  275. u16 instruction;
  276. int rc;
  277. rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
  278. if (rc)
  279. return rc;
  280. if (instruction == 0x0a77) {
  281. set_pt_regs_flag(regs, PIF_SYSCALL);
  282. regs->int_code = 0x00040077;
  283. return 0;
  284. } else if (instruction == 0x0aad) {
  285. set_pt_regs_flag(regs, PIF_SYSCALL);
  286. regs->int_code = 0x000400ad;
  287. return 0;
  288. }
  289. return -EACCES;
  290. }
  291. static noinline void do_fault_error(struct pt_regs *regs, int access,
  292. vm_fault_t fault)
  293. {
  294. int si_code;
  295. switch (fault) {
  296. case VM_FAULT_BADACCESS:
  297. if (access == VM_EXEC && signal_return(regs) == 0)
  298. break;
  299. fallthrough;
  300. case VM_FAULT_BADMAP:
  301. /* Bad memory access. Check if it is kernel or user space. */
  302. if (user_mode(regs)) {
  303. /* User mode accesses just cause a SIGSEGV */
  304. si_code = (fault == VM_FAULT_BADMAP) ?
  305. SEGV_MAPERR : SEGV_ACCERR;
  306. do_sigsegv(regs, si_code);
  307. break;
  308. }
  309. fallthrough;
  310. case VM_FAULT_BADCONTEXT:
  311. case VM_FAULT_PFAULT:
  312. do_no_context(regs);
  313. break;
  314. case VM_FAULT_SIGNAL:
  315. if (!user_mode(regs))
  316. do_no_context(regs);
  317. break;
  318. default: /* fault & VM_FAULT_ERROR */
  319. if (fault & VM_FAULT_OOM) {
  320. if (!user_mode(regs))
  321. do_no_context(regs);
  322. else
  323. pagefault_out_of_memory();
  324. } else if (fault & VM_FAULT_SIGSEGV) {
  325. /* Kernel mode? Handle exceptions or die */
  326. if (!user_mode(regs))
  327. do_no_context(regs);
  328. else
  329. do_sigsegv(regs, SEGV_MAPERR);
  330. } else if (fault & VM_FAULT_SIGBUS) {
  331. /* Kernel mode? Handle exceptions or die */
  332. if (!user_mode(regs))
  333. do_no_context(regs);
  334. else
  335. do_sigbus(regs);
  336. } else
  337. BUG();
  338. break;
  339. }
  340. }
  341. /*
  342. * This routine handles page faults. It determines the address,
  343. * and the problem, and then passes it off to one of the appropriate
  344. * routines.
  345. *
  346. * interruption code (int_code):
  347. * 04 Protection -> Write-Protection (suppression)
  348. * 10 Segment translation -> Not present (nullification)
  349. * 11 Page translation -> Not present (nullification)
  350. * 3b Region third trans. -> Not present (nullification)
  351. */
  352. static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
  353. {
  354. struct gmap *gmap;
  355. struct task_struct *tsk;
  356. struct mm_struct *mm;
  357. struct vm_area_struct *vma;
  358. enum fault_type type;
  359. unsigned long trans_exc_code;
  360. unsigned long address;
  361. unsigned int flags;
  362. vm_fault_t fault;
  363. tsk = current;
  364. /*
  365. * The instruction that caused the program check has
  366. * been nullified. Don't signal single step via SIGTRAP.
  367. */
  368. clear_pt_regs_flag(regs, PIF_PER_TRAP);
  369. if (kprobe_page_fault(regs, 14))
  370. return 0;
  371. mm = tsk->mm;
  372. trans_exc_code = regs->int_parm_long;
  373. /*
  374. * Verify that the fault happened in user space, that
  375. * we are not in an interrupt and that there is a
  376. * user context.
  377. */
  378. fault = VM_FAULT_BADCONTEXT;
  379. type = get_fault_type(regs);
  380. switch (type) {
  381. case KERNEL_FAULT:
  382. goto out;
  383. case VDSO_FAULT:
  384. fault = VM_FAULT_BADMAP;
  385. goto out;
  386. case USER_FAULT:
  387. case GMAP_FAULT:
  388. if (faulthandler_disabled() || !mm)
  389. goto out;
  390. break;
  391. }
  392. address = trans_exc_code & __FAIL_ADDR_MASK;
  393. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  394. flags = FAULT_FLAG_DEFAULT;
  395. if (user_mode(regs))
  396. flags |= FAULT_FLAG_USER;
  397. if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
  398. flags |= FAULT_FLAG_WRITE;
  399. mmap_read_lock(mm);
  400. gmap = NULL;
  401. if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
  402. gmap = (struct gmap *) S390_lowcore.gmap;
  403. current->thread.gmap_addr = address;
  404. current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
  405. current->thread.gmap_int_code = regs->int_code & 0xffff;
  406. address = __gmap_translate(gmap, address);
  407. if (address == -EFAULT) {
  408. fault = VM_FAULT_BADMAP;
  409. goto out_up;
  410. }
  411. if (gmap->pfault_enabled)
  412. flags |= FAULT_FLAG_RETRY_NOWAIT;
  413. }
  414. retry:
  415. fault = VM_FAULT_BADMAP;
  416. vma = find_vma(mm, address);
  417. if (!vma)
  418. goto out_up;
  419. if (unlikely(vma->vm_start > address)) {
  420. if (!(vma->vm_flags & VM_GROWSDOWN))
  421. goto out_up;
  422. if (expand_stack(vma, address))
  423. goto out_up;
  424. }
  425. /*
  426. * Ok, we have a good vm_area for this memory access, so
  427. * we can handle it..
  428. */
  429. fault = VM_FAULT_BADACCESS;
  430. if (unlikely(!(vma->vm_flags & access)))
  431. goto out_up;
  432. if (is_vm_hugetlb_page(vma))
  433. address &= HPAGE_MASK;
  434. /*
  435. * If for any reason at all we couldn't handle the fault,
  436. * make sure we exit gracefully rather than endlessly redo
  437. * the fault.
  438. */
  439. fault = handle_mm_fault(vma, address, flags);
  440. if (fault_signal_pending(fault, regs)) {
  441. fault = VM_FAULT_SIGNAL;
  442. if (flags & FAULT_FLAG_RETRY_NOWAIT)
  443. goto out_up;
  444. goto out;
  445. }
  446. if (unlikely(fault & VM_FAULT_ERROR))
  447. goto out_up;
  448. /*
  449. * Major/minor page fault accounting is only done on the
  450. * initial attempt. If we go through a retry, it is extremely
  451. * likely that the page will be found in page cache at that point.
  452. */
  453. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  454. if (fault & VM_FAULT_MAJOR) {
  455. tsk->maj_flt++;
  456. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
  457. regs, address);
  458. } else {
  459. tsk->min_flt++;
  460. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
  461. regs, address);
  462. }
  463. if (fault & VM_FAULT_RETRY) {
  464. if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
  465. (flags & FAULT_FLAG_RETRY_NOWAIT)) {
  466. /* FAULT_FLAG_RETRY_NOWAIT has been set,
  467. * mmap_lock has not been released */
  468. current->thread.gmap_pfault = 1;
  469. fault = VM_FAULT_PFAULT;
  470. goto out_up;
  471. }
  472. flags &= ~FAULT_FLAG_RETRY_NOWAIT;
  473. flags |= FAULT_FLAG_TRIED;
  474. mmap_read_lock(mm);
  475. goto retry;
  476. }
  477. }
  478. if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
  479. address = __gmap_link(gmap, current->thread.gmap_addr,
  480. address);
  481. if (address == -EFAULT) {
  482. fault = VM_FAULT_BADMAP;
  483. goto out_up;
  484. }
  485. if (address == -ENOMEM) {
  486. fault = VM_FAULT_OOM;
  487. goto out_up;
  488. }
  489. }
  490. fault = 0;
  491. out_up:
  492. mmap_read_unlock(mm);
  493. out:
  494. return fault;
  495. }
  496. void do_protection_exception(struct pt_regs *regs)
  497. {
  498. unsigned long trans_exc_code;
  499. int access;
  500. vm_fault_t fault;
  501. trans_exc_code = regs->int_parm_long;
  502. /*
  503. * Protection exceptions are suppressing, decrement psw address.
  504. * The exception to this rule are aborted transactions, for these
  505. * the PSW already points to the correct location.
  506. */
  507. if (!(regs->int_code & 0x200))
  508. regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
  509. /*
  510. * Check for low-address protection. This needs to be treated
  511. * as a special case because the translation exception code
  512. * field is not guaranteed to contain valid data in this case.
  513. */
  514. if (unlikely(!(trans_exc_code & 4))) {
  515. do_low_address(regs);
  516. return;
  517. }
  518. if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
  519. regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
  520. (regs->psw.addr & PAGE_MASK);
  521. access = VM_EXEC;
  522. fault = VM_FAULT_BADACCESS;
  523. } else {
  524. access = VM_WRITE;
  525. fault = do_exception(regs, access);
  526. }
  527. if (unlikely(fault))
  528. do_fault_error(regs, access, fault);
  529. }
  530. NOKPROBE_SYMBOL(do_protection_exception);
  531. void do_dat_exception(struct pt_regs *regs)
  532. {
  533. int access;
  534. vm_fault_t fault;
  535. access = VM_ACCESS_FLAGS;
  536. fault = do_exception(regs, access);
  537. if (unlikely(fault))
  538. do_fault_error(regs, access, fault);
  539. }
  540. NOKPROBE_SYMBOL(do_dat_exception);
  541. #ifdef CONFIG_PFAULT
  542. /*
  543. * 'pfault' pseudo page faults routines.
  544. */
  545. static int pfault_disable;
  546. static int __init nopfault(char *str)
  547. {
  548. pfault_disable = 1;
  549. return 1;
  550. }
  551. __setup("nopfault", nopfault);
  552. struct pfault_refbk {
  553. u16 refdiagc;
  554. u16 reffcode;
  555. u16 refdwlen;
  556. u16 refversn;
  557. u64 refgaddr;
  558. u64 refselmk;
  559. u64 refcmpmk;
  560. u64 reserved;
  561. } __attribute__ ((packed, aligned(8)));
  562. static struct pfault_refbk pfault_init_refbk = {
  563. .refdiagc = 0x258,
  564. .reffcode = 0,
  565. .refdwlen = 5,
  566. .refversn = 2,
  567. .refgaddr = __LC_LPP,
  568. .refselmk = 1ULL << 48,
  569. .refcmpmk = 1ULL << 48,
  570. .reserved = __PF_RES_FIELD
  571. };
  572. int pfault_init(void)
  573. {
  574. int rc;
  575. if (pfault_disable)
  576. return -1;
  577. diag_stat_inc(DIAG_STAT_X258);
  578. asm volatile(
  579. " diag %1,%0,0x258\n"
  580. "0: j 2f\n"
  581. "1: la %0,8\n"
  582. "2:\n"
  583. EX_TABLE(0b,1b)
  584. : "=d" (rc)
  585. : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
  586. return rc;
  587. }
  588. static struct pfault_refbk pfault_fini_refbk = {
  589. .refdiagc = 0x258,
  590. .reffcode = 1,
  591. .refdwlen = 5,
  592. .refversn = 2,
  593. };
  594. void pfault_fini(void)
  595. {
  596. if (pfault_disable)
  597. return;
  598. diag_stat_inc(DIAG_STAT_X258);
  599. asm volatile(
  600. " diag %0,0,0x258\n"
  601. "0: nopr %%r7\n"
  602. EX_TABLE(0b,0b)
  603. : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
  604. }
  605. static DEFINE_SPINLOCK(pfault_lock);
  606. static LIST_HEAD(pfault_list);
  607. #define PF_COMPLETE 0x0080
  608. /*
  609. * The mechanism of our pfault code: if Linux is running as guest, runs a user
  610. * space process and the user space process accesses a page that the host has
  611. * paged out we get a pfault interrupt.
  612. *
  613. * This allows us, within the guest, to schedule a different process. Without
  614. * this mechanism the host would have to suspend the whole virtual cpu until
  615. * the page has been paged in.
  616. *
  617. * So when we get such an interrupt then we set the state of the current task
  618. * to uninterruptible and also set the need_resched flag. Both happens within
  619. * interrupt context(!). If we later on want to return to user space we
  620. * recognize the need_resched flag and then call schedule(). It's not very
  621. * obvious how this works...
  622. *
  623. * Of course we have a lot of additional fun with the completion interrupt (->
  624. * host signals that a page of a process has been paged in and the process can
  625. * continue to run). This interrupt can arrive on any cpu and, since we have
  626. * virtual cpus, actually appear before the interrupt that signals that a page
  627. * is missing.
  628. */
  629. static void pfault_interrupt(struct ext_code ext_code,
  630. unsigned int param32, unsigned long param64)
  631. {
  632. struct task_struct *tsk;
  633. __u16 subcode;
  634. pid_t pid;
  635. /*
  636. * Get the external interruption subcode & pfault initial/completion
  637. * signal bit. VM stores this in the 'cpu address' field associated
  638. * with the external interrupt.
  639. */
  640. subcode = ext_code.subcode;
  641. if ((subcode & 0xff00) != __SUBCODE_MASK)
  642. return;
  643. inc_irq_stat(IRQEXT_PFL);
  644. /* Get the token (= pid of the affected task). */
  645. pid = param64 & LPP_PID_MASK;
  646. rcu_read_lock();
  647. tsk = find_task_by_pid_ns(pid, &init_pid_ns);
  648. if (tsk)
  649. get_task_struct(tsk);
  650. rcu_read_unlock();
  651. if (!tsk)
  652. return;
  653. spin_lock(&pfault_lock);
  654. if (subcode & PF_COMPLETE) {
  655. /* signal bit is set -> a page has been swapped in by VM */
  656. if (tsk->thread.pfault_wait == 1) {
  657. /* Initial interrupt was faster than the completion
  658. * interrupt. pfault_wait is valid. Set pfault_wait
  659. * back to zero and wake up the process. This can
  660. * safely be done because the task is still sleeping
  661. * and can't produce new pfaults. */
  662. tsk->thread.pfault_wait = 0;
  663. list_del(&tsk->thread.list);
  664. wake_up_process(tsk);
  665. put_task_struct(tsk);
  666. } else {
  667. /* Completion interrupt was faster than initial
  668. * interrupt. Set pfault_wait to -1 so the initial
  669. * interrupt doesn't put the task to sleep.
  670. * If the task is not running, ignore the completion
  671. * interrupt since it must be a leftover of a PFAULT
  672. * CANCEL operation which didn't remove all pending
  673. * completion interrupts. */
  674. if (tsk->state == TASK_RUNNING)
  675. tsk->thread.pfault_wait = -1;
  676. }
  677. } else {
  678. /* signal bit not set -> a real page is missing. */
  679. if (WARN_ON_ONCE(tsk != current))
  680. goto out;
  681. if (tsk->thread.pfault_wait == 1) {
  682. /* Already on the list with a reference: put to sleep */
  683. goto block;
  684. } else if (tsk->thread.pfault_wait == -1) {
  685. /* Completion interrupt was faster than the initial
  686. * interrupt (pfault_wait == -1). Set pfault_wait
  687. * back to zero and exit. */
  688. tsk->thread.pfault_wait = 0;
  689. } else {
  690. /* Initial interrupt arrived before completion
  691. * interrupt. Let the task sleep.
  692. * An extra task reference is needed since a different
  693. * cpu may set the task state to TASK_RUNNING again
  694. * before the scheduler is reached. */
  695. get_task_struct(tsk);
  696. tsk->thread.pfault_wait = 1;
  697. list_add(&tsk->thread.list, &pfault_list);
  698. block:
  699. /* Since this must be a userspace fault, there
  700. * is no kernel task state to trample. Rely on the
  701. * return to userspace schedule() to block. */
  702. __set_current_state(TASK_UNINTERRUPTIBLE);
  703. set_tsk_need_resched(tsk);
  704. set_preempt_need_resched();
  705. }
  706. }
  707. out:
  708. spin_unlock(&pfault_lock);
  709. put_task_struct(tsk);
  710. }
  711. static int pfault_cpu_dead(unsigned int cpu)
  712. {
  713. struct thread_struct *thread, *next;
  714. struct task_struct *tsk;
  715. spin_lock_irq(&pfault_lock);
  716. list_for_each_entry_safe(thread, next, &pfault_list, list) {
  717. thread->pfault_wait = 0;
  718. list_del(&thread->list);
  719. tsk = container_of(thread, struct task_struct, thread);
  720. wake_up_process(tsk);
  721. put_task_struct(tsk);
  722. }
  723. spin_unlock_irq(&pfault_lock);
  724. return 0;
  725. }
  726. static int __init pfault_irq_init(void)
  727. {
  728. int rc;
  729. rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
  730. if (rc)
  731. goto out_extint;
  732. rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
  733. if (rc)
  734. goto out_pfault;
  735. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  736. cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
  737. NULL, pfault_cpu_dead);
  738. return 0;
  739. out_pfault:
  740. unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
  741. out_extint:
  742. pfault_disable = 1;
  743. return rc;
  744. }
  745. early_initcall(pfault_irq_init);
  746. #endif /* CONFIG_PFAULT */
  747. #if IS_ENABLED(CONFIG_PGSTE)
  748. void do_secure_storage_access(struct pt_regs *regs)
  749. {
  750. unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
  751. struct vm_area_struct *vma;
  752. struct mm_struct *mm;
  753. struct page *page;
  754. int rc;
  755. switch (get_fault_type(regs)) {
  756. case USER_FAULT:
  757. mm = current->mm;
  758. mmap_read_lock(mm);
  759. vma = find_vma(mm, addr);
  760. if (!vma) {
  761. mmap_read_unlock(mm);
  762. do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
  763. break;
  764. }
  765. page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
  766. if (IS_ERR_OR_NULL(page)) {
  767. mmap_read_unlock(mm);
  768. break;
  769. }
  770. if (arch_make_page_accessible(page))
  771. send_sig(SIGSEGV, current, 0);
  772. put_page(page);
  773. mmap_read_unlock(mm);
  774. break;
  775. case KERNEL_FAULT:
  776. page = phys_to_page(addr);
  777. if (unlikely(!try_get_page(page)))
  778. break;
  779. rc = arch_make_page_accessible(page);
  780. put_page(page);
  781. if (rc)
  782. BUG();
  783. break;
  784. case VDSO_FAULT:
  785. case GMAP_FAULT:
  786. default:
  787. do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
  788. WARN_ON_ONCE(1);
  789. }
  790. }
  791. NOKPROBE_SYMBOL(do_secure_storage_access);
  792. void do_non_secure_storage_access(struct pt_regs *regs)
  793. {
  794. unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
  795. struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
  796. if (get_fault_type(regs) != GMAP_FAULT) {
  797. do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
  798. WARN_ON_ONCE(1);
  799. return;
  800. }
  801. if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
  802. send_sig(SIGSEGV, current, 0);
  803. }
  804. NOKPROBE_SYMBOL(do_non_secure_storage_access);
  805. #else
  806. void do_secure_storage_access(struct pt_regs *regs)
  807. {
  808. default_trap_handler(regs);
  809. }
  810. void do_non_secure_storage_access(struct pt_regs *regs)
  811. {
  812. default_trap_handler(regs);
  813. }
  814. #endif