sigp.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * handling interprocessor communication
  4. *
  5. * Copyright IBM Corp. 2008, 2013
  6. *
  7. * Author(s): Carsten Otte <[email protected]>
  8. * Christian Borntraeger <[email protected]>
  9. * Christian Ehrhardt <[email protected]>
  10. */
  11. #include <linux/kvm.h>
  12. #include <linux/kvm_host.h>
  13. #include <linux/slab.h>
  14. #include <asm/sigp.h>
  15. #include "gaccess.h"
  16. #include "kvm-s390.h"
  17. #include "trace.h"
  18. static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
  19. u64 *reg)
  20. {
  21. const bool stopped = kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED);
  22. int rc;
  23. int ext_call_pending;
  24. ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
  25. if (!stopped && !ext_call_pending)
  26. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  27. else {
  28. *reg &= 0xffffffff00000000UL;
  29. if (ext_call_pending)
  30. *reg |= SIGP_STATUS_EXT_CALL_PENDING;
  31. if (stopped)
  32. *reg |= SIGP_STATUS_STOPPED;
  33. rc = SIGP_CC_STATUS_STORED;
  34. }
  35. VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
  36. rc);
  37. return rc;
  38. }
  39. static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
  40. struct kvm_vcpu *dst_vcpu)
  41. {
  42. struct kvm_s390_irq irq = {
  43. .type = KVM_S390_INT_EMERGENCY,
  44. .u.emerg.code = vcpu->vcpu_id,
  45. };
  46. int rc = 0;
  47. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  48. if (!rc)
  49. VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
  50. dst_vcpu->vcpu_id);
  51. return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
  52. }
  53. static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
  54. {
  55. return __inject_sigp_emergency(vcpu, dst_vcpu);
  56. }
  57. static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
  58. struct kvm_vcpu *dst_vcpu,
  59. u16 asn, u64 *reg)
  60. {
  61. const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
  62. u16 p_asn, s_asn;
  63. psw_t *psw;
  64. bool idle;
  65. idle = is_vcpu_idle(vcpu);
  66. psw = &dst_vcpu->arch.sie_block->gpsw;
  67. p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
  68. s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
  69. /* Inject the emergency signal? */
  70. if (!is_vcpu_stopped(vcpu)
  71. || (psw->mask & psw_int_mask) != psw_int_mask
  72. || (idle && psw->addr != 0)
  73. || (!idle && (asn == p_asn || asn == s_asn))) {
  74. return __inject_sigp_emergency(vcpu, dst_vcpu);
  75. } else {
  76. *reg &= 0xffffffff00000000UL;
  77. *reg |= SIGP_STATUS_INCORRECT_STATE;
  78. return SIGP_CC_STATUS_STORED;
  79. }
  80. }
  81. static int __sigp_external_call(struct kvm_vcpu *vcpu,
  82. struct kvm_vcpu *dst_vcpu, u64 *reg)
  83. {
  84. struct kvm_s390_irq irq = {
  85. .type = KVM_S390_INT_EXTERNAL_CALL,
  86. .u.extcall.code = vcpu->vcpu_id,
  87. };
  88. int rc;
  89. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  90. if (rc == -EBUSY) {
  91. *reg &= 0xffffffff00000000UL;
  92. *reg |= SIGP_STATUS_EXT_CALL_PENDING;
  93. return SIGP_CC_STATUS_STORED;
  94. } else if (rc == 0) {
  95. VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
  96. dst_vcpu->vcpu_id);
  97. }
  98. return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
  99. }
  100. static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
  101. {
  102. struct kvm_s390_irq irq = {
  103. .type = KVM_S390_SIGP_STOP,
  104. };
  105. int rc;
  106. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  107. if (rc == -EBUSY)
  108. rc = SIGP_CC_BUSY;
  109. else if (rc == 0)
  110. VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
  111. dst_vcpu->vcpu_id);
  112. return rc;
  113. }
  114. static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
  115. struct kvm_vcpu *dst_vcpu, u64 *reg)
  116. {
  117. struct kvm_s390_irq irq = {
  118. .type = KVM_S390_SIGP_STOP,
  119. .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
  120. };
  121. int rc;
  122. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  123. if (rc == -EBUSY)
  124. rc = SIGP_CC_BUSY;
  125. else if (rc == 0)
  126. VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
  127. dst_vcpu->vcpu_id);
  128. return rc;
  129. }
  130. static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter,
  131. u64 *status_reg)
  132. {
  133. *status_reg &= 0xffffffff00000000UL;
  134. /* Reject set arch order, with czam we're always in z/Arch mode. */
  135. *status_reg |= SIGP_STATUS_INVALID_PARAMETER;
  136. return SIGP_CC_STATUS_STORED;
  137. }
  138. static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
  139. u32 address, u64 *reg)
  140. {
  141. struct kvm_s390_irq irq = {
  142. .type = KVM_S390_SIGP_SET_PREFIX,
  143. .u.prefix.address = address & 0x7fffe000u,
  144. };
  145. int rc;
  146. /*
  147. * Make sure the new value is valid memory. We only need to check the
  148. * first page, since address is 8k aligned and memory pieces are always
  149. * at least 1MB aligned and have at least a size of 1MB.
  150. */
  151. if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
  152. *reg &= 0xffffffff00000000UL;
  153. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  154. return SIGP_CC_STATUS_STORED;
  155. }
  156. rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
  157. if (rc == -EBUSY) {
  158. *reg &= 0xffffffff00000000UL;
  159. *reg |= SIGP_STATUS_INCORRECT_STATE;
  160. return SIGP_CC_STATUS_STORED;
  161. }
  162. return rc;
  163. }
  164. static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
  165. struct kvm_vcpu *dst_vcpu,
  166. u32 addr, u64 *reg)
  167. {
  168. int rc;
  169. if (!kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED)) {
  170. *reg &= 0xffffffff00000000UL;
  171. *reg |= SIGP_STATUS_INCORRECT_STATE;
  172. return SIGP_CC_STATUS_STORED;
  173. }
  174. addr &= 0x7ffffe00;
  175. rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
  176. if (rc == -EFAULT) {
  177. *reg &= 0xffffffff00000000UL;
  178. *reg |= SIGP_STATUS_INVALID_PARAMETER;
  179. rc = SIGP_CC_STATUS_STORED;
  180. }
  181. return rc;
  182. }
  183. static int __sigp_sense_running(struct kvm_vcpu *vcpu,
  184. struct kvm_vcpu *dst_vcpu, u64 *reg)
  185. {
  186. int rc;
  187. if (!test_kvm_facility(vcpu->kvm, 9)) {
  188. *reg &= 0xffffffff00000000UL;
  189. *reg |= SIGP_STATUS_INVALID_ORDER;
  190. return SIGP_CC_STATUS_STORED;
  191. }
  192. if (kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_RUNNING)) {
  193. /* running */
  194. rc = SIGP_CC_ORDER_CODE_ACCEPTED;
  195. } else {
  196. /* not running */
  197. *reg &= 0xffffffff00000000UL;
  198. *reg |= SIGP_STATUS_NOT_RUNNING;
  199. rc = SIGP_CC_STATUS_STORED;
  200. }
  201. VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
  202. dst_vcpu->vcpu_id, rc);
  203. return rc;
  204. }
  205. static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
  206. struct kvm_vcpu *dst_vcpu, u8 order_code)
  207. {
  208. struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
  209. /* handle (RE)START in user space */
  210. int rc = -EOPNOTSUPP;
  211. /* make sure we don't race with STOP irq injection */
  212. spin_lock(&li->lock);
  213. if (kvm_s390_is_stop_irq_pending(dst_vcpu))
  214. rc = SIGP_CC_BUSY;
  215. spin_unlock(&li->lock);
  216. return rc;
  217. }
  218. static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
  219. struct kvm_vcpu *dst_vcpu, u8 order_code)
  220. {
  221. /* handle (INITIAL) CPU RESET in user space */
  222. return -EOPNOTSUPP;
  223. }
  224. static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
  225. struct kvm_vcpu *dst_vcpu)
  226. {
  227. /* handle unknown orders in user space */
  228. return -EOPNOTSUPP;
  229. }
  230. static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
  231. u16 cpu_addr, u32 parameter, u64 *status_reg)
  232. {
  233. int rc;
  234. struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
  235. if (!dst_vcpu)
  236. return SIGP_CC_NOT_OPERATIONAL;
  237. /*
  238. * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
  239. * are processed asynchronously. Until the affected VCPU finishes
  240. * its work and calls back into KVM to clear the (RESTART or STOP)
  241. * interrupt, we need to return any new non-reset orders "busy".
  242. *
  243. * This is important because a single VCPU could issue:
  244. * 1) SIGP STOP $DESTINATION
  245. * 2) SIGP SENSE $DESTINATION
  246. *
  247. * If the SIGP SENSE would not be rejected as "busy", it could
  248. * return an incorrect answer as to whether the VCPU is STOPPED
  249. * or OPERATING.
  250. */
  251. if (order_code != SIGP_INITIAL_CPU_RESET &&
  252. order_code != SIGP_CPU_RESET) {
  253. /*
  254. * Lockless check. Both SIGP STOP and SIGP (RE)START
  255. * properly synchronize everything while processing
  256. * their orders, while the guest cannot observe a
  257. * difference when issuing other orders from two
  258. * different VCPUs.
  259. */
  260. if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
  261. kvm_s390_is_restart_irq_pending(dst_vcpu))
  262. return SIGP_CC_BUSY;
  263. }
  264. switch (order_code) {
  265. case SIGP_SENSE:
  266. vcpu->stat.instruction_sigp_sense++;
  267. rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
  268. break;
  269. case SIGP_EXTERNAL_CALL:
  270. vcpu->stat.instruction_sigp_external_call++;
  271. rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
  272. break;
  273. case SIGP_EMERGENCY_SIGNAL:
  274. vcpu->stat.instruction_sigp_emergency++;
  275. rc = __sigp_emergency(vcpu, dst_vcpu);
  276. break;
  277. case SIGP_STOP:
  278. vcpu->stat.instruction_sigp_stop++;
  279. rc = __sigp_stop(vcpu, dst_vcpu);
  280. break;
  281. case SIGP_STOP_AND_STORE_STATUS:
  282. vcpu->stat.instruction_sigp_stop_store_status++;
  283. rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
  284. break;
  285. case SIGP_STORE_STATUS_AT_ADDRESS:
  286. vcpu->stat.instruction_sigp_store_status++;
  287. rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
  288. status_reg);
  289. break;
  290. case SIGP_SET_PREFIX:
  291. vcpu->stat.instruction_sigp_prefix++;
  292. rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
  293. break;
  294. case SIGP_COND_EMERGENCY_SIGNAL:
  295. vcpu->stat.instruction_sigp_cond_emergency++;
  296. rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
  297. status_reg);
  298. break;
  299. case SIGP_SENSE_RUNNING:
  300. vcpu->stat.instruction_sigp_sense_running++;
  301. rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
  302. break;
  303. case SIGP_START:
  304. vcpu->stat.instruction_sigp_start++;
  305. rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
  306. break;
  307. case SIGP_RESTART:
  308. vcpu->stat.instruction_sigp_restart++;
  309. rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
  310. break;
  311. case SIGP_INITIAL_CPU_RESET:
  312. vcpu->stat.instruction_sigp_init_cpu_reset++;
  313. rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
  314. break;
  315. case SIGP_CPU_RESET:
  316. vcpu->stat.instruction_sigp_cpu_reset++;
  317. rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
  318. break;
  319. default:
  320. vcpu->stat.instruction_sigp_unknown++;
  321. rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
  322. }
  323. if (rc == -EOPNOTSUPP)
  324. VCPU_EVENT(vcpu, 4,
  325. "sigp order %u -> cpu %x: handled in user space",
  326. order_code, dst_vcpu->vcpu_id);
  327. return rc;
  328. }
  329. static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
  330. u16 cpu_addr)
  331. {
  332. if (!vcpu->kvm->arch.user_sigp)
  333. return 0;
  334. switch (order_code) {
  335. case SIGP_SENSE:
  336. case SIGP_EXTERNAL_CALL:
  337. case SIGP_EMERGENCY_SIGNAL:
  338. case SIGP_COND_EMERGENCY_SIGNAL:
  339. case SIGP_SENSE_RUNNING:
  340. return 0;
  341. /* update counters as we're directly dropping to user space */
  342. case SIGP_STOP:
  343. vcpu->stat.instruction_sigp_stop++;
  344. break;
  345. case SIGP_STOP_AND_STORE_STATUS:
  346. vcpu->stat.instruction_sigp_stop_store_status++;
  347. break;
  348. case SIGP_STORE_STATUS_AT_ADDRESS:
  349. vcpu->stat.instruction_sigp_store_status++;
  350. break;
  351. case SIGP_STORE_ADDITIONAL_STATUS:
  352. vcpu->stat.instruction_sigp_store_adtl_status++;
  353. break;
  354. case SIGP_SET_PREFIX:
  355. vcpu->stat.instruction_sigp_prefix++;
  356. break;
  357. case SIGP_START:
  358. vcpu->stat.instruction_sigp_start++;
  359. break;
  360. case SIGP_RESTART:
  361. vcpu->stat.instruction_sigp_restart++;
  362. break;
  363. case SIGP_INITIAL_CPU_RESET:
  364. vcpu->stat.instruction_sigp_init_cpu_reset++;
  365. break;
  366. case SIGP_CPU_RESET:
  367. vcpu->stat.instruction_sigp_cpu_reset++;
  368. break;
  369. default:
  370. vcpu->stat.instruction_sigp_unknown++;
  371. }
  372. VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
  373. order_code, cpu_addr);
  374. return 1;
  375. }
  376. int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
  377. {
  378. int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  379. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  380. u32 parameter;
  381. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  382. u8 order_code;
  383. int rc;
  384. /* sigp in userspace can exit */
  385. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  386. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  387. order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
  388. if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
  389. return -EOPNOTSUPP;
  390. if (r1 % 2)
  391. parameter = vcpu->run->s.regs.gprs[r1];
  392. else
  393. parameter = vcpu->run->s.regs.gprs[r1 + 1];
  394. trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
  395. switch (order_code) {
  396. case SIGP_SET_ARCHITECTURE:
  397. vcpu->stat.instruction_sigp_arch++;
  398. rc = __sigp_set_arch(vcpu, parameter,
  399. &vcpu->run->s.regs.gprs[r1]);
  400. break;
  401. default:
  402. rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
  403. parameter,
  404. &vcpu->run->s.regs.gprs[r1]);
  405. }
  406. if (rc < 0)
  407. return rc;
  408. kvm_s390_set_psw_cc(vcpu, rc);
  409. return 0;
  410. }
  411. /*
  412. * Handle SIGP partial execution interception.
  413. *
  414. * This interception will occur at the source cpu when a source cpu sends an
  415. * external call to a target cpu and the target cpu has the WAIT bit set in
  416. * its cpuflags. Interception will occurr after the interrupt indicator bits at
  417. * the target cpu have been set. All error cases will lead to instruction
  418. * interception, therefore nothing is to be checked or prepared.
  419. */
  420. int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
  421. {
  422. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  423. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  424. struct kvm_vcpu *dest_vcpu;
  425. u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
  426. if (order_code == SIGP_EXTERNAL_CALL) {
  427. trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
  428. dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
  429. BUG_ON(dest_vcpu == NULL);
  430. kvm_s390_vcpu_wakeup(dest_vcpu);
  431. kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
  432. return 0;
  433. }
  434. return -EOPNOTSUPP;
  435. }