intercept.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * in-kernel handling for sie intercepts
  4. *
  5. * Copyright IBM Corp. 2008, 2020
  6. *
  7. * Author(s): Carsten Otte <[email protected]>
  8. * Christian Borntraeger <[email protected]>
  9. */
  10. #include <linux/kvm_host.h>
  11. #include <linux/errno.h>
  12. #include <linux/pagemap.h>
  13. #include <asm/asm-offsets.h>
  14. #include <asm/irq.h>
  15. #include <asm/sysinfo.h>
  16. #include <asm/uv.h>
  17. #include "kvm-s390.h"
  18. #include "gaccess.h"
  19. #include "trace.h"
  20. #include "trace-s390.h"
  21. u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
  22. {
  23. struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
  24. u8 ilen = 0;
  25. switch (vcpu->arch.sie_block->icptcode) {
  26. case ICPT_INST:
  27. case ICPT_INSTPROGI:
  28. case ICPT_OPEREXC:
  29. case ICPT_PARTEXEC:
  30. case ICPT_IOINST:
  31. /* instruction only stored for these icptcodes */
  32. ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
  33. /* Use the length of the EXECUTE instruction if necessary */
  34. if (sie_block->icptstatus & 1) {
  35. ilen = (sie_block->icptstatus >> 4) & 0x6;
  36. if (!ilen)
  37. ilen = 4;
  38. }
  39. break;
  40. case ICPT_PROGI:
  41. /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
  42. ilen = vcpu->arch.sie_block->pgmilc & 0x6;
  43. break;
  44. }
  45. return ilen;
  46. }
  47. static int handle_stop(struct kvm_vcpu *vcpu)
  48. {
  49. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  50. int rc = 0;
  51. uint8_t flags, stop_pending;
  52. vcpu->stat.exit_stop_request++;
  53. /* delay the stop if any non-stop irq is pending */
  54. if (kvm_s390_vcpu_has_irq(vcpu, 1))
  55. return 0;
  56. /* avoid races with the injection/SIGP STOP code */
  57. spin_lock(&li->lock);
  58. flags = li->irq.stop.flags;
  59. stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
  60. spin_unlock(&li->lock);
  61. trace_kvm_s390_stop_request(stop_pending, flags);
  62. if (!stop_pending)
  63. return 0;
  64. if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
  65. rc = kvm_s390_vcpu_store_status(vcpu,
  66. KVM_S390_STORE_STATUS_NOADDR);
  67. if (rc)
  68. return rc;
  69. }
  70. /*
  71. * no need to check the return value of vcpu_stop as it can only have
  72. * an error for protvirt, but protvirt means user cpu state
  73. */
  74. if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
  75. kvm_s390_vcpu_stop(vcpu);
  76. return -EOPNOTSUPP;
  77. }
  78. static int handle_validity(struct kvm_vcpu *vcpu)
  79. {
  80. int viwhy = vcpu->arch.sie_block->ipb >> 16;
  81. vcpu->stat.exit_validity++;
  82. trace_kvm_s390_intercept_validity(vcpu, viwhy);
  83. KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
  84. current->pid, vcpu->kvm);
  85. /* do not warn on invalid runtime instrumentation mode */
  86. WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
  87. viwhy);
  88. return -EINVAL;
  89. }
  90. static int handle_instruction(struct kvm_vcpu *vcpu)
  91. {
  92. vcpu->stat.exit_instruction++;
  93. trace_kvm_s390_intercept_instruction(vcpu,
  94. vcpu->arch.sie_block->ipa,
  95. vcpu->arch.sie_block->ipb);
  96. switch (vcpu->arch.sie_block->ipa >> 8) {
  97. case 0x01:
  98. return kvm_s390_handle_01(vcpu);
  99. case 0x82:
  100. return kvm_s390_handle_lpsw(vcpu);
  101. case 0x83:
  102. return kvm_s390_handle_diag(vcpu);
  103. case 0xaa:
  104. return kvm_s390_handle_aa(vcpu);
  105. case 0xae:
  106. return kvm_s390_handle_sigp(vcpu);
  107. case 0xb2:
  108. return kvm_s390_handle_b2(vcpu);
  109. case 0xb6:
  110. return kvm_s390_handle_stctl(vcpu);
  111. case 0xb7:
  112. return kvm_s390_handle_lctl(vcpu);
  113. case 0xb9:
  114. return kvm_s390_handle_b9(vcpu);
  115. case 0xe3:
  116. return kvm_s390_handle_e3(vcpu);
  117. case 0xe5:
  118. return kvm_s390_handle_e5(vcpu);
  119. case 0xeb:
  120. return kvm_s390_handle_eb(vcpu);
  121. default:
  122. return -EOPNOTSUPP;
  123. }
  124. }
  125. static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
  126. {
  127. struct kvm_s390_pgm_info pgm_info = {
  128. .code = vcpu->arch.sie_block->iprcc,
  129. /* the PSW has already been rewound */
  130. .flags = KVM_S390_PGM_FLAGS_NO_REWIND,
  131. };
  132. switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
  133. case PGM_AFX_TRANSLATION:
  134. case PGM_ASX_TRANSLATION:
  135. case PGM_EX_TRANSLATION:
  136. case PGM_LFX_TRANSLATION:
  137. case PGM_LSTE_SEQUENCE:
  138. case PGM_LSX_TRANSLATION:
  139. case PGM_LX_TRANSLATION:
  140. case PGM_PRIMARY_AUTHORITY:
  141. case PGM_SECONDARY_AUTHORITY:
  142. case PGM_SPACE_SWITCH:
  143. pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
  144. break;
  145. case PGM_ALEN_TRANSLATION:
  146. case PGM_ALE_SEQUENCE:
  147. case PGM_ASTE_INSTANCE:
  148. case PGM_ASTE_SEQUENCE:
  149. case PGM_ASTE_VALIDITY:
  150. case PGM_EXTENDED_AUTHORITY:
  151. pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
  152. break;
  153. case PGM_ASCE_TYPE:
  154. case PGM_PAGE_TRANSLATION:
  155. case PGM_REGION_FIRST_TRANS:
  156. case PGM_REGION_SECOND_TRANS:
  157. case PGM_REGION_THIRD_TRANS:
  158. case PGM_SEGMENT_TRANSLATION:
  159. pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
  160. pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
  161. pgm_info.op_access_id = vcpu->arch.sie_block->oai;
  162. break;
  163. case PGM_MONITOR:
  164. pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
  165. pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
  166. break;
  167. case PGM_VECTOR_PROCESSING:
  168. case PGM_DATA:
  169. pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
  170. break;
  171. case PGM_PROTECTION:
  172. pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
  173. pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
  174. break;
  175. default:
  176. break;
  177. }
  178. if (vcpu->arch.sie_block->iprcc & PGM_PER) {
  179. pgm_info.per_code = vcpu->arch.sie_block->perc;
  180. pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
  181. pgm_info.per_address = vcpu->arch.sie_block->peraddr;
  182. pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
  183. }
  184. return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
  185. }
  186. /*
  187. * restore ITDB to program-interruption TDB in guest lowcore
  188. * and set TX abort indication if required
  189. */
  190. static int handle_itdb(struct kvm_vcpu *vcpu)
  191. {
  192. struct kvm_s390_itdb *itdb;
  193. int rc;
  194. if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
  195. return 0;
  196. if (current->thread.per_flags & PER_FLAG_NO_TE)
  197. return 0;
  198. itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
  199. rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
  200. if (rc)
  201. return rc;
  202. memset(itdb, 0, sizeof(*itdb));
  203. return 0;
  204. }
  205. #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
  206. static int handle_prog(struct kvm_vcpu *vcpu)
  207. {
  208. psw_t psw;
  209. int rc;
  210. vcpu->stat.exit_program_interruption++;
  211. /*
  212. * Intercept 8 indicates a loop of specification exceptions
  213. * for protected guests.
  214. */
  215. if (kvm_s390_pv_cpu_is_protected(vcpu))
  216. return -EOPNOTSUPP;
  217. if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
  218. rc = kvm_s390_handle_per_event(vcpu);
  219. if (rc)
  220. return rc;
  221. /* the interrupt might have been filtered out completely */
  222. if (vcpu->arch.sie_block->iprcc == 0)
  223. return 0;
  224. }
  225. trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
  226. if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
  227. rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
  228. if (rc)
  229. return rc;
  230. /* Avoid endless loops of specification exceptions */
  231. if (!is_valid_psw(&psw))
  232. return -EOPNOTSUPP;
  233. }
  234. rc = handle_itdb(vcpu);
  235. if (rc)
  236. return rc;
  237. return inject_prog_on_prog_intercept(vcpu);
  238. }
  239. /**
  240. * handle_external_interrupt - used for external interruption interceptions
  241. * @vcpu: virtual cpu
  242. *
  243. * This interception occurs if:
  244. * - the CPUSTAT_EXT_INT bit was already set when the external interrupt
  245. * occurred. In this case, the interrupt needs to be injected manually to
  246. * preserve interrupt priority.
  247. * - the external new PSW has external interrupts enabled, which will cause an
  248. * interruption loop. We drop to userspace in this case.
  249. *
  250. * The latter case can be detected by inspecting the external mask bit in the
  251. * external new psw.
  252. *
  253. * Under PV, only the latter case can occur, since interrupt priorities are
  254. * handled in the ultravisor.
  255. */
  256. static int handle_external_interrupt(struct kvm_vcpu *vcpu)
  257. {
  258. u16 eic = vcpu->arch.sie_block->eic;
  259. struct kvm_s390_irq irq;
  260. psw_t newpsw;
  261. int rc;
  262. vcpu->stat.exit_external_interrupt++;
  263. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  264. newpsw = vcpu->arch.sie_block->gpsw;
  265. } else {
  266. rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
  267. if (rc)
  268. return rc;
  269. }
  270. /*
  271. * Clock comparator or timer interrupt with external interrupt enabled
  272. * will cause interrupt loop. Drop to userspace.
  273. */
  274. if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
  275. (newpsw.mask & PSW_MASK_EXT))
  276. return -EOPNOTSUPP;
  277. switch (eic) {
  278. case EXT_IRQ_CLK_COMP:
  279. irq.type = KVM_S390_INT_CLOCK_COMP;
  280. break;
  281. case EXT_IRQ_CPU_TIMER:
  282. irq.type = KVM_S390_INT_CPU_TIMER;
  283. break;
  284. case EXT_IRQ_EXTERNAL_CALL:
  285. irq.type = KVM_S390_INT_EXTERNAL_CALL;
  286. irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
  287. rc = kvm_s390_inject_vcpu(vcpu, &irq);
  288. /* ignore if another external call is already pending */
  289. if (rc == -EBUSY)
  290. return 0;
  291. return rc;
  292. default:
  293. return -EOPNOTSUPP;
  294. }
  295. return kvm_s390_inject_vcpu(vcpu, &irq);
  296. }
  297. /**
  298. * handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
  299. * @vcpu: virtual cpu
  300. *
  301. * This interception can only happen for guests with DAT disabled and
  302. * addresses that are currently not mapped in the host. Thus we try to
  303. * set up the mappings for the corresponding user pages here (or throw
  304. * addressing exceptions in case of illegal guest addresses).
  305. */
  306. static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
  307. {
  308. unsigned long srcaddr, dstaddr;
  309. int reg1, reg2, rc;
  310. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  311. /* Ensure that the source is paged-in, no actual access -> no key checking */
  312. rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg2],
  313. reg2, &srcaddr, GACC_FETCH, 0);
  314. if (rc)
  315. return kvm_s390_inject_prog_cond(vcpu, rc);
  316. rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
  317. if (rc != 0)
  318. return rc;
  319. /* Ensure that the source is paged-in, no actual access -> no key checking */
  320. rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg1],
  321. reg1, &dstaddr, GACC_STORE, 0);
  322. if (rc)
  323. return kvm_s390_inject_prog_cond(vcpu, rc);
  324. rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
  325. if (rc != 0)
  326. return rc;
  327. kvm_s390_retry_instr(vcpu);
  328. return 0;
  329. }
  330. static int handle_partial_execution(struct kvm_vcpu *vcpu)
  331. {
  332. vcpu->stat.exit_pei++;
  333. if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
  334. return handle_mvpg_pei(vcpu);
  335. if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
  336. return kvm_s390_handle_sigp_pei(vcpu);
  337. return -EOPNOTSUPP;
  338. }
  339. /*
  340. * Handle the sthyi instruction that provides the guest with system
  341. * information, like current CPU resources available at each level of
  342. * the machine.
  343. */
  344. int handle_sthyi(struct kvm_vcpu *vcpu)
  345. {
  346. int reg1, reg2, cc = 0, r = 0;
  347. u64 code, addr, rc = 0;
  348. struct sthyi_sctns *sctns = NULL;
  349. if (!test_kvm_facility(vcpu->kvm, 74))
  350. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  351. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  352. code = vcpu->run->s.regs.gprs[reg1];
  353. addr = vcpu->run->s.regs.gprs[reg2];
  354. vcpu->stat.instruction_sthyi++;
  355. VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
  356. trace_kvm_s390_handle_sthyi(vcpu, code, addr);
  357. if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
  358. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  359. if (code & 0xffff) {
  360. cc = 3;
  361. rc = 4;
  362. goto out;
  363. }
  364. if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
  365. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  366. sctns = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
  367. if (!sctns)
  368. return -ENOMEM;
  369. cc = sthyi_fill(sctns, &rc);
  370. if (cc < 0) {
  371. free_page((unsigned long)sctns);
  372. return cc;
  373. }
  374. out:
  375. if (!cc) {
  376. if (kvm_s390_pv_cpu_is_protected(vcpu)) {
  377. memcpy((void *)(sida_origin(vcpu->arch.sie_block)),
  378. sctns, PAGE_SIZE);
  379. } else {
  380. r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
  381. if (r) {
  382. free_page((unsigned long)sctns);
  383. return kvm_s390_inject_prog_cond(vcpu, r);
  384. }
  385. }
  386. }
  387. free_page((unsigned long)sctns);
  388. vcpu->run->s.regs.gprs[reg2 + 1] = rc;
  389. kvm_s390_set_psw_cc(vcpu, cc);
  390. return r;
  391. }
  392. static int handle_operexc(struct kvm_vcpu *vcpu)
  393. {
  394. psw_t oldpsw, newpsw;
  395. int rc;
  396. vcpu->stat.exit_operation_exception++;
  397. trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
  398. vcpu->arch.sie_block->ipb);
  399. if (vcpu->arch.sie_block->ipa == 0xb256)
  400. return handle_sthyi(vcpu);
  401. if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
  402. return -EOPNOTSUPP;
  403. rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
  404. if (rc)
  405. return rc;
  406. /*
  407. * Avoid endless loops of operation exceptions, if the pgm new
  408. * PSW will cause a new operation exception.
  409. * The heuristic checks if the pgm new psw is within 6 bytes before
  410. * the faulting psw address (with same DAT, AS settings) and the
  411. * new psw is not a wait psw and the fault was not triggered by
  412. * problem state.
  413. */
  414. oldpsw = vcpu->arch.sie_block->gpsw;
  415. if (oldpsw.addr - newpsw.addr <= 6 &&
  416. !(newpsw.mask & PSW_MASK_WAIT) &&
  417. !(oldpsw.mask & PSW_MASK_PSTATE) &&
  418. (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
  419. (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT))
  420. return -EOPNOTSUPP;
  421. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  422. }
  423. static int handle_pv_spx(struct kvm_vcpu *vcpu)
  424. {
  425. u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;
  426. kvm_s390_set_prefix(vcpu, pref);
  427. trace_kvm_s390_handle_prefix(vcpu, 1, pref);
  428. return 0;
  429. }
  430. static int handle_pv_sclp(struct kvm_vcpu *vcpu)
  431. {
  432. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  433. spin_lock(&fi->lock);
  434. /*
  435. * 2 cases:
  436. * a: an sccb answering interrupt was already pending or in flight.
  437. * As the sccb value is not known we can simply set some value to
  438. * trigger delivery of a saved SCCB. UV will then use its saved
  439. * copy of the SCCB value.
  440. * b: an error SCCB interrupt needs to be injected so we also inject
  441. * a fake SCCB address. Firmware will use the proper one.
  442. * This makes sure, that both errors and real sccb returns will only
  443. * be delivered after a notification intercept (instruction has
  444. * finished) but not after others.
  445. */
  446. fi->srv_signal.ext_params |= 0x43000;
  447. set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
  448. clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
  449. spin_unlock(&fi->lock);
  450. return 0;
  451. }
  452. static int handle_pv_uvc(struct kvm_vcpu *vcpu)
  453. {
  454. struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
  455. struct uv_cb_cts uvcb = {
  456. .header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
  457. .header.len = sizeof(uvcb),
  458. .guest_handle = kvm_s390_pv_get_handle(vcpu->kvm),
  459. .gaddr = guest_uvcb->paddr,
  460. };
  461. int rc;
  462. if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) {
  463. WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n",
  464. guest_uvcb->header.cmd);
  465. return 0;
  466. }
  467. rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
  468. /*
  469. * If the unpin did not succeed, the guest will exit again for the UVC
  470. * and we will retry the unpin.
  471. */
  472. if (rc == -EINVAL)
  473. return 0;
  474. /*
  475. * If we got -EAGAIN here, we simply return it. It will eventually
  476. * get propagated all the way to userspace, which should then try
  477. * again.
  478. */
  479. return rc;
  480. }
  481. static int handle_pv_notification(struct kvm_vcpu *vcpu)
  482. {
  483. int ret;
  484. if (vcpu->arch.sie_block->ipa == 0xb210)
  485. return handle_pv_spx(vcpu);
  486. if (vcpu->arch.sie_block->ipa == 0xb220)
  487. return handle_pv_sclp(vcpu);
  488. if (vcpu->arch.sie_block->ipa == 0xb9a4)
  489. return handle_pv_uvc(vcpu);
  490. if (vcpu->arch.sie_block->ipa >> 8 == 0xae) {
  491. /*
  492. * Besides external call, other SIGP orders also cause a
  493. * 108 (pv notify) intercept. In contrast to external call,
  494. * these orders need to be emulated and hence the appropriate
  495. * place to handle them is in handle_instruction().
  496. * So first try kvm_s390_handle_sigp_pei() and if that isn't
  497. * successful, go on with handle_instruction().
  498. */
  499. ret = kvm_s390_handle_sigp_pei(vcpu);
  500. if (!ret)
  501. return ret;
  502. }
  503. return handle_instruction(vcpu);
  504. }
  505. int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
  506. {
  507. int rc, per_rc = 0;
  508. if (kvm_is_ucontrol(vcpu->kvm))
  509. return -EOPNOTSUPP;
  510. switch (vcpu->arch.sie_block->icptcode) {
  511. case ICPT_EXTREQ:
  512. vcpu->stat.exit_external_request++;
  513. return 0;
  514. case ICPT_IOREQ:
  515. vcpu->stat.exit_io_request++;
  516. return 0;
  517. case ICPT_INST:
  518. rc = handle_instruction(vcpu);
  519. break;
  520. case ICPT_PROGI:
  521. return handle_prog(vcpu);
  522. case ICPT_EXTINT:
  523. return handle_external_interrupt(vcpu);
  524. case ICPT_WAIT:
  525. return kvm_s390_handle_wait(vcpu);
  526. case ICPT_VALIDITY:
  527. return handle_validity(vcpu);
  528. case ICPT_STOP:
  529. return handle_stop(vcpu);
  530. case ICPT_OPEREXC:
  531. rc = handle_operexc(vcpu);
  532. break;
  533. case ICPT_PARTEXEC:
  534. rc = handle_partial_execution(vcpu);
  535. break;
  536. case ICPT_KSS:
  537. rc = kvm_s390_skey_check_enable(vcpu);
  538. break;
  539. case ICPT_MCHKREQ:
  540. case ICPT_INT_ENABLE:
  541. /*
  542. * PSW bit 13 or a CR (0, 6, 14) changed and we might
  543. * now be able to deliver interrupts. The pre-run code
  544. * will take care of this.
  545. */
  546. rc = 0;
  547. break;
  548. case ICPT_PV_INSTR:
  549. rc = handle_instruction(vcpu);
  550. break;
  551. case ICPT_PV_NOTIFY:
  552. rc = handle_pv_notification(vcpu);
  553. break;
  554. case ICPT_PV_PREF:
  555. rc = 0;
  556. gmap_convert_to_secure(vcpu->arch.gmap,
  557. kvm_s390_get_prefix(vcpu));
  558. gmap_convert_to_secure(vcpu->arch.gmap,
  559. kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
  560. break;
  561. default:
  562. return -EOPNOTSUPP;
  563. }
  564. /* process PER, also if the instrution is processed in user space */
  565. if (vcpu->arch.sie_block->icptstatus & 0x02 &&
  566. (!rc || rc == -EOPNOTSUPP))
  567. per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
  568. return per_rc ? per_rc : rc;
  569. }