diag.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * handling diagnose instructions
  4. *
  5. * Copyright IBM Corp. 2008, 2020
  6. *
  7. * Author(s): Carsten Otte <[email protected]>
  8. * Christian Borntraeger <[email protected]>
  9. */
  10. #include <linux/kvm.h>
  11. #include <linux/kvm_host.h>
  12. #include <asm/gmap.h>
  13. #include <asm/virtio-ccw.h>
  14. #include "kvm-s390.h"
  15. #include "trace.h"
  16. #include "trace-s390.h"
  17. #include "gaccess.h"
  18. static int diag_release_pages(struct kvm_vcpu *vcpu)
  19. {
  20. unsigned long start, end;
  21. unsigned long prefix = kvm_s390_get_prefix(vcpu);
  22. start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
  23. end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
  24. vcpu->stat.instruction_diagnose_10++;
  25. if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
  26. || start < 2 * PAGE_SIZE)
  27. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  28. VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
  29. /*
  30. * We checked for start >= end above, so lets check for the
  31. * fast path (no prefix swap page involved)
  32. */
  33. if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
  34. gmap_discard(vcpu->arch.gmap, start, end);
  35. } else {
  36. /*
  37. * This is slow path. gmap_discard will check for start
  38. * so lets split this into before prefix, prefix, after
  39. * prefix and let gmap_discard make some of these calls
  40. * NOPs.
  41. */
  42. gmap_discard(vcpu->arch.gmap, start, prefix);
  43. if (start <= prefix)
  44. gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE);
  45. if (end > prefix + PAGE_SIZE)
  46. gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE);
  47. gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
  48. }
  49. return 0;
  50. }
  51. static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
  52. {
  53. struct prs_parm {
  54. u16 code;
  55. u16 subcode;
  56. u16 parm_len;
  57. u16 parm_version;
  58. u64 token_addr;
  59. u64 select_mask;
  60. u64 compare_mask;
  61. u64 zarch;
  62. };
  63. struct prs_parm parm;
  64. int rc;
  65. u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
  66. u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
  67. VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
  68. vcpu->run->s.regs.gprs[rx]);
  69. vcpu->stat.instruction_diagnose_258++;
  70. if (vcpu->run->s.regs.gprs[rx] & 7)
  71. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  72. rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
  73. if (rc)
  74. return kvm_s390_inject_prog_cond(vcpu, rc);
  75. if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
  76. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  77. switch (parm.subcode) {
  78. case 0: /* TOKEN */
  79. VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
  80. "select mask 0x%llx compare mask 0x%llx",
  81. parm.token_addr, parm.select_mask, parm.compare_mask);
  82. if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
  83. /*
  84. * If the pagefault handshake is already activated,
  85. * the token must not be changed. We have to return
  86. * decimal 8 instead, as mandated in SC24-6084.
  87. */
  88. vcpu->run->s.regs.gprs[ry] = 8;
  89. return 0;
  90. }
  91. if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
  92. parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
  93. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  94. if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
  95. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  96. vcpu->arch.pfault_token = parm.token_addr;
  97. vcpu->arch.pfault_select = parm.select_mask;
  98. vcpu->arch.pfault_compare = parm.compare_mask;
  99. vcpu->run->s.regs.gprs[ry] = 0;
  100. rc = 0;
  101. break;
  102. case 1: /*
  103. * CANCEL
  104. * Specification allows to let already pending tokens survive
  105. * the cancel, therefore to reduce code complexity, we assume
  106. * all outstanding tokens are already pending.
  107. */
  108. VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
  109. if (parm.token_addr || parm.select_mask ||
  110. parm.compare_mask || parm.zarch)
  111. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  112. vcpu->run->s.regs.gprs[ry] = 0;
  113. /*
  114. * If the pfault handling was not established or is already
  115. * canceled SC24-6084 requests to return decimal 4.
  116. */
  117. if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
  118. vcpu->run->s.regs.gprs[ry] = 4;
  119. else
  120. vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
  121. rc = 0;
  122. break;
  123. default:
  124. rc = -EOPNOTSUPP;
  125. break;
  126. }
  127. return rc;
  128. }
  129. static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
  130. {
  131. VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
  132. vcpu->stat.instruction_diagnose_44++;
  133. kvm_vcpu_on_spin(vcpu, true);
  134. return 0;
  135. }
  136. static int forward_cnt;
  137. static unsigned long cur_slice;
  138. static int diag9c_forwarding_overrun(void)
  139. {
  140. /* Reset the count on a new slice */
  141. if (time_after(jiffies, cur_slice)) {
  142. cur_slice = jiffies;
  143. forward_cnt = diag9c_forwarding_hz / HZ;
  144. }
  145. return forward_cnt-- <= 0 ? 1 : 0;
  146. }
  147. static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
  148. {
  149. struct kvm_vcpu *tcpu;
  150. int tcpu_cpu;
  151. int tid;
  152. tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
  153. vcpu->stat.instruction_diagnose_9c++;
  154. /* yield to self */
  155. if (tid == vcpu->vcpu_id)
  156. goto no_yield;
  157. /* yield to invalid */
  158. tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
  159. if (!tcpu)
  160. goto no_yield;
  161. /* target guest VCPU already running */
  162. tcpu_cpu = READ_ONCE(tcpu->cpu);
  163. if (tcpu_cpu >= 0) {
  164. if (!diag9c_forwarding_hz || diag9c_forwarding_overrun())
  165. goto no_yield;
  166. /* target host CPU already running */
  167. if (!vcpu_is_preempted(tcpu_cpu))
  168. goto no_yield;
  169. smp_yield_cpu(tcpu_cpu);
  170. VCPU_EVENT(vcpu, 5,
  171. "diag time slice end directed to %d: yield forwarded",
  172. tid);
  173. vcpu->stat.diag_9c_forward++;
  174. return 0;
  175. }
  176. if (kvm_vcpu_yield_to(tcpu) <= 0)
  177. goto no_yield;
  178. VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
  179. return 0;
  180. no_yield:
  181. VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
  182. vcpu->stat.diag_9c_ignored++;
  183. return 0;
  184. }
  185. static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
  186. {
  187. unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
  188. unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
  189. VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
  190. vcpu->stat.instruction_diagnose_308++;
  191. switch (subcode) {
  192. case 3:
  193. vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
  194. break;
  195. case 4:
  196. vcpu->run->s390_reset_flags = 0;
  197. break;
  198. default:
  199. return -EOPNOTSUPP;
  200. }
  201. /*
  202. * no need to check the return value of vcpu_stop as it can only have
  203. * an error for protvirt, but protvirt means user cpu state
  204. */
  205. if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
  206. kvm_s390_vcpu_stop(vcpu);
  207. vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
  208. vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
  209. vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
  210. vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
  211. VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
  212. vcpu->run->s390_reset_flags);
  213. trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
  214. return -EREMOTE;
  215. }
  216. static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
  217. {
  218. int ret;
  219. vcpu->stat.instruction_diagnose_500++;
  220. /* No virtio-ccw notification? Get out quickly. */
  221. if (!vcpu->kvm->arch.css_support ||
  222. (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
  223. return -EOPNOTSUPP;
  224. VCPU_EVENT(vcpu, 4, "diag 0x500 schid 0x%8.8x queue 0x%x cookie 0x%llx",
  225. (u32) vcpu->run->s.regs.gprs[2],
  226. (u32) vcpu->run->s.regs.gprs[3],
  227. vcpu->run->s.regs.gprs[4]);
  228. /*
  229. * The layout is as follows:
  230. * - gpr 2 contains the subchannel id (passed as addr)
  231. * - gpr 3 contains the virtqueue index (passed as datamatch)
  232. * - gpr 4 contains the index on the bus (optionally)
  233. */
  234. ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
  235. vcpu->run->s.regs.gprs[2] & 0xffffffff,
  236. 8, &vcpu->run->s.regs.gprs[3],
  237. vcpu->run->s.regs.gprs[4]);
  238. /*
  239. * Return cookie in gpr 2, but don't overwrite the register if the
  240. * diagnose will be handled by userspace.
  241. */
  242. if (ret != -EOPNOTSUPP)
  243. vcpu->run->s.regs.gprs[2] = ret;
  244. /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
  245. return ret < 0 ? ret : 0;
  246. }
  247. int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
  248. {
  249. int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
  250. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  251. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  252. trace_kvm_s390_handle_diag(vcpu, code);
  253. switch (code) {
  254. case 0x10:
  255. return diag_release_pages(vcpu);
  256. case 0x44:
  257. return __diag_time_slice_end(vcpu);
  258. case 0x9c:
  259. return __diag_time_slice_end_directed(vcpu);
  260. case 0x258:
  261. return __diag_page_ref_service(vcpu);
  262. case 0x308:
  263. return __diag_ipl_functions(vcpu);
  264. case 0x500:
  265. return __diag_virtio_hypercall(vcpu);
  266. default:
  267. vcpu->stat.instruction_diagnose_other++;
  268. return -EOPNOTSUPP;
  269. }
  270. }