vsie.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * kvm nested virtualization support for s390x
  4. *
  5. * Copyright IBM Corp. 2016, 2018
  6. *
  7. * Author(s): David Hildenbrand <[email protected]>
  8. */
  9. #include <linux/vmalloc.h>
  10. #include <linux/kvm_host.h>
  11. #include <linux/bug.h>
  12. #include <linux/list.h>
  13. #include <linux/bitmap.h>
  14. #include <linux/sched/signal.h>
  15. #include <asm/gmap.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/sclp.h>
  18. #include <asm/nmi.h>
  19. #include <asm/dis.h>
  20. #include <asm/fpu/api.h>
  21. #include "kvm-s390.h"
  22. #include "gaccess.h"
  23. struct vsie_page {
  24. struct kvm_s390_sie_block scb_s; /* 0x0000 */
  25. /*
  26. * the backup info for machine check. ensure it's at
  27. * the same offset as that in struct sie_page!
  28. */
  29. struct mcck_volatile_info mcck_info; /* 0x0200 */
  30. /*
  31. * The pinned original scb. Be aware that other VCPUs can modify
  32. * it while we read from it. Values that are used for conditions or
  33. * are reused conditionally, should be accessed via READ_ONCE.
  34. */
  35. struct kvm_s390_sie_block *scb_o; /* 0x0218 */
  36. /* the shadow gmap in use by the vsie_page */
  37. struct gmap *gmap; /* 0x0220 */
  38. /* address of the last reported fault to guest2 */
  39. unsigned long fault_addr; /* 0x0228 */
  40. /* calculated guest addresses of satellite control blocks */
  41. gpa_t sca_gpa; /* 0x0230 */
  42. gpa_t itdba_gpa; /* 0x0238 */
  43. gpa_t gvrd_gpa; /* 0x0240 */
  44. gpa_t riccbd_gpa; /* 0x0248 */
  45. gpa_t sdnx_gpa; /* 0x0250 */
  46. __u8 reserved[0x0700 - 0x0258]; /* 0x0258 */
  47. struct kvm_s390_crypto_cb crycb; /* 0x0700 */
  48. __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
  49. };
  50. /* trigger a validity icpt for the given scb */
  51. static int set_validity_icpt(struct kvm_s390_sie_block *scb,
  52. __u16 reason_code)
  53. {
  54. scb->ipa = 0x1000;
  55. scb->ipb = ((__u32) reason_code) << 16;
  56. scb->icptcode = ICPT_VALIDITY;
  57. return 1;
  58. }
  59. /* mark the prefix as unmapped, this will block the VSIE */
  60. static void prefix_unmapped(struct vsie_page *vsie_page)
  61. {
  62. atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
  63. }
  64. /* mark the prefix as unmapped and wait until the VSIE has been left */
  65. static void prefix_unmapped_sync(struct vsie_page *vsie_page)
  66. {
  67. prefix_unmapped(vsie_page);
  68. if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  69. atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
  70. while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
  71. cpu_relax();
  72. }
  73. /* mark the prefix as mapped, this will allow the VSIE to run */
  74. static void prefix_mapped(struct vsie_page *vsie_page)
  75. {
  76. atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
  77. }
  78. /* test if the prefix is mapped into the gmap shadow */
  79. static int prefix_is_mapped(struct vsie_page *vsie_page)
  80. {
  81. return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
  82. }
  83. /* copy the updated intervention request bits into the shadow scb */
  84. static void update_intervention_requests(struct vsie_page *vsie_page)
  85. {
  86. const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
  87. int cpuflags;
  88. cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
  89. atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
  90. atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
  91. }
  92. /* shadow (filter and validate) the cpuflags */
  93. static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  94. {
  95. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  96. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  97. int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
  98. /* we don't allow ESA/390 guests */
  99. if (!(cpuflags & CPUSTAT_ZARCH))
  100. return set_validity_icpt(scb_s, 0x0001U);
  101. if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
  102. return set_validity_icpt(scb_s, 0x0001U);
  103. else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
  104. return set_validity_icpt(scb_s, 0x0007U);
  105. /* intervention requests will be set later */
  106. newflags = CPUSTAT_ZARCH;
  107. if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
  108. newflags |= CPUSTAT_GED;
  109. if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
  110. if (cpuflags & CPUSTAT_GED)
  111. return set_validity_icpt(scb_s, 0x0001U);
  112. newflags |= CPUSTAT_GED2;
  113. }
  114. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
  115. newflags |= cpuflags & CPUSTAT_P;
  116. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
  117. newflags |= cpuflags & CPUSTAT_SM;
  118. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
  119. newflags |= cpuflags & CPUSTAT_IBS;
  120. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
  121. newflags |= cpuflags & CPUSTAT_KSS;
  122. atomic_set(&scb_s->cpuflags, newflags);
  123. return 0;
  124. }
  125. /* Copy to APCB FORMAT1 from APCB FORMAT0 */
  126. static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
  127. unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
  128. {
  129. struct kvm_s390_apcb0 tmp;
  130. if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
  131. return -EFAULT;
  132. apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
  133. apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
  134. apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
  135. return 0;
  136. }
  137. /**
  138. * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
  139. * @vcpu: pointer to the virtual CPU
  140. * @apcb_s: pointer to start of apcb in the shadow crycb
  141. * @apcb_o: pointer to start of original apcb in the guest2
  142. * @apcb_h: pointer to start of apcb in the guest1
  143. *
  144. * Returns 0 and -EFAULT on error reading guest apcb
  145. */
  146. static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
  147. unsigned long apcb_o, unsigned long *apcb_h)
  148. {
  149. if (read_guest_real(vcpu, apcb_o, apcb_s,
  150. sizeof(struct kvm_s390_apcb0)))
  151. return -EFAULT;
  152. bitmap_and(apcb_s, apcb_s, apcb_h,
  153. BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
  154. return 0;
  155. }
  156. /**
  157. * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
  158. * @vcpu: pointer to the virtual CPU
  159. * @apcb_s: pointer to start of apcb in the shadow crycb
  160. * @apcb_o: pointer to start of original guest apcb
  161. * @apcb_h: pointer to start of apcb in the host
  162. *
  163. * Returns 0 and -EFAULT on error reading guest apcb
  164. */
  165. static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
  166. unsigned long apcb_o,
  167. unsigned long *apcb_h)
  168. {
  169. if (read_guest_real(vcpu, apcb_o, apcb_s,
  170. sizeof(struct kvm_s390_apcb1)))
  171. return -EFAULT;
  172. bitmap_and(apcb_s, apcb_s, apcb_h,
  173. BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
  174. return 0;
  175. }
  176. /**
  177. * setup_apcb - Create a shadow copy of the apcb.
  178. * @vcpu: pointer to the virtual CPU
  179. * @crycb_s: pointer to shadow crycb
  180. * @crycb_o: pointer to original guest crycb
  181. * @crycb_h: pointer to the host crycb
  182. * @fmt_o: format of the original guest crycb.
  183. * @fmt_h: format of the host crycb.
  184. *
  185. * Checks the compatibility between the guest and host crycb and calls the
  186. * appropriate copy function.
  187. *
  188. * Return 0 or an error number if the guest and host crycb are incompatible.
  189. */
  190. static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
  191. const u32 crycb_o,
  192. struct kvm_s390_crypto_cb *crycb_h,
  193. int fmt_o, int fmt_h)
  194. {
  195. struct kvm_s390_crypto_cb *crycb;
  196. crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
  197. switch (fmt_o) {
  198. case CRYCB_FORMAT2:
  199. if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
  200. return -EACCES;
  201. if (fmt_h != CRYCB_FORMAT2)
  202. return -EINVAL;
  203. return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
  204. (unsigned long) &crycb->apcb1,
  205. (unsigned long *)&crycb_h->apcb1);
  206. case CRYCB_FORMAT1:
  207. switch (fmt_h) {
  208. case CRYCB_FORMAT2:
  209. return setup_apcb10(vcpu, &crycb_s->apcb1,
  210. (unsigned long) &crycb->apcb0,
  211. &crycb_h->apcb1);
  212. case CRYCB_FORMAT1:
  213. return setup_apcb00(vcpu,
  214. (unsigned long *) &crycb_s->apcb0,
  215. (unsigned long) &crycb->apcb0,
  216. (unsigned long *) &crycb_h->apcb0);
  217. }
  218. break;
  219. case CRYCB_FORMAT0:
  220. if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
  221. return -EACCES;
  222. switch (fmt_h) {
  223. case CRYCB_FORMAT2:
  224. return setup_apcb10(vcpu, &crycb_s->apcb1,
  225. (unsigned long) &crycb->apcb0,
  226. &crycb_h->apcb1);
  227. case CRYCB_FORMAT1:
  228. case CRYCB_FORMAT0:
  229. return setup_apcb00(vcpu,
  230. (unsigned long *) &crycb_s->apcb0,
  231. (unsigned long) &crycb->apcb0,
  232. (unsigned long *) &crycb_h->apcb0);
  233. }
  234. }
  235. return -EINVAL;
  236. }
  237. /**
  238. * shadow_crycb - Create a shadow copy of the crycb block
  239. * @vcpu: a pointer to the virtual CPU
  240. * @vsie_page: a pointer to internal date used for the vSIE
  241. *
  242. * Create a shadow copy of the crycb block and setup key wrapping, if
  243. * requested for guest 3 and enabled for guest 2.
  244. *
  245. * We accept format-1 or format-2, but we convert format-1 into format-2
  246. * in the shadow CRYCB.
  247. * Using format-2 enables the firmware to choose the right format when
  248. * scheduling the SIE.
  249. * There is nothing to do for format-0.
  250. *
  251. * This function centralize the issuing of set_validity_icpt() for all
  252. * the subfunctions working on the crycb.
  253. *
  254. * Returns: - 0 if shadowed or nothing to do
  255. * - > 0 if control has to be given to guest 2
  256. */
  257. static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  258. {
  259. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  260. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  261. const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
  262. const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
  263. unsigned long *b1, *b2;
  264. u8 ecb3_flags;
  265. u32 ecd_flags;
  266. int apie_h;
  267. int apie_s;
  268. int key_msk = test_kvm_facility(vcpu->kvm, 76);
  269. int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
  270. int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
  271. int ret = 0;
  272. scb_s->crycbd = 0;
  273. apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
  274. apie_s = apie_h & scb_o->eca;
  275. if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
  276. return 0;
  277. if (!crycb_addr)
  278. return set_validity_icpt(scb_s, 0x0039U);
  279. if (fmt_o == CRYCB_FORMAT1)
  280. if ((crycb_addr & PAGE_MASK) !=
  281. ((crycb_addr + 128) & PAGE_MASK))
  282. return set_validity_icpt(scb_s, 0x003CU);
  283. if (apie_s) {
  284. ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
  285. vcpu->kvm->arch.crypto.crycb,
  286. fmt_o, fmt_h);
  287. if (ret)
  288. goto end;
  289. scb_s->eca |= scb_o->eca & ECA_APIE;
  290. }
  291. /* we may only allow it if enabled for guest 2 */
  292. ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
  293. (ECB3_AES | ECB3_DEA);
  294. ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
  295. if (!ecb3_flags && !ecd_flags)
  296. goto end;
  297. /* copy only the wrapping keys */
  298. if (read_guest_real(vcpu, crycb_addr + 72,
  299. vsie_page->crycb.dea_wrapping_key_mask, 56))
  300. return set_validity_icpt(scb_s, 0x0035U);
  301. scb_s->ecb3 |= ecb3_flags;
  302. scb_s->ecd |= ecd_flags;
  303. /* xor both blocks in one run */
  304. b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
  305. b2 = (unsigned long *)
  306. vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
  307. /* as 56%8 == 0, bitmap_xor won't overwrite any data */
  308. bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
  309. end:
  310. switch (ret) {
  311. case -EINVAL:
  312. return set_validity_icpt(scb_s, 0x0022U);
  313. case -EFAULT:
  314. return set_validity_icpt(scb_s, 0x0035U);
  315. case -EACCES:
  316. return set_validity_icpt(scb_s, 0x003CU);
  317. }
  318. scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
  319. return 0;
  320. }
  321. /* shadow (round up/down) the ibc to avoid validity icpt */
  322. static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  323. {
  324. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  325. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  326. /* READ_ONCE does not work on bitfields - use a temporary variable */
  327. const uint32_t __new_ibc = scb_o->ibc;
  328. const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
  329. __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
  330. scb_s->ibc = 0;
  331. /* ibc installed in g2 and requested for g3 */
  332. if (vcpu->kvm->arch.model.ibc && new_ibc) {
  333. scb_s->ibc = new_ibc;
  334. /* takte care of the minimum ibc level of the machine */
  335. if (scb_s->ibc < min_ibc)
  336. scb_s->ibc = min_ibc;
  337. /* take care of the maximum ibc level set for the guest */
  338. if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
  339. scb_s->ibc = vcpu->kvm->arch.model.ibc;
  340. }
  341. }
  342. /* unshadow the scb, copying parameters back to the real scb */
  343. static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  344. {
  345. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  346. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  347. /* interception */
  348. scb_o->icptcode = scb_s->icptcode;
  349. scb_o->icptstatus = scb_s->icptstatus;
  350. scb_o->ipa = scb_s->ipa;
  351. scb_o->ipb = scb_s->ipb;
  352. scb_o->gbea = scb_s->gbea;
  353. /* timer */
  354. scb_o->cputm = scb_s->cputm;
  355. scb_o->ckc = scb_s->ckc;
  356. scb_o->todpr = scb_s->todpr;
  357. /* guest state */
  358. scb_o->gpsw = scb_s->gpsw;
  359. scb_o->gg14 = scb_s->gg14;
  360. scb_o->gg15 = scb_s->gg15;
  361. memcpy(scb_o->gcr, scb_s->gcr, 128);
  362. scb_o->pp = scb_s->pp;
  363. /* branch prediction */
  364. if (test_kvm_facility(vcpu->kvm, 82)) {
  365. scb_o->fpf &= ~FPF_BPBC;
  366. scb_o->fpf |= scb_s->fpf & FPF_BPBC;
  367. }
  368. /* interrupt intercept */
  369. switch (scb_s->icptcode) {
  370. case ICPT_PROGI:
  371. case ICPT_INSTPROGI:
  372. case ICPT_EXTINT:
  373. memcpy((void *)((u64)scb_o + 0xc0),
  374. (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
  375. break;
  376. }
  377. if (scb_s->ihcpu != 0xffffU)
  378. scb_o->ihcpu = scb_s->ihcpu;
  379. }
  380. /*
  381. * Setup the shadow scb by copying and checking the relevant parts of the g2
  382. * provided scb.
  383. *
  384. * Returns: - 0 if the scb has been shadowed
  385. * - > 0 if control has to be given to guest 2
  386. */
  387. static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  388. {
  389. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  390. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  391. /* READ_ONCE does not work on bitfields - use a temporary variable */
  392. const uint32_t __new_prefix = scb_o->prefix;
  393. const uint32_t new_prefix = READ_ONCE(__new_prefix);
  394. const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
  395. bool had_tx = scb_s->ecb & ECB_TE;
  396. unsigned long new_mso = 0;
  397. int rc;
  398. /* make sure we don't have any leftovers when reusing the scb */
  399. scb_s->icptcode = 0;
  400. scb_s->eca = 0;
  401. scb_s->ecb = 0;
  402. scb_s->ecb2 = 0;
  403. scb_s->ecb3 = 0;
  404. scb_s->ecd = 0;
  405. scb_s->fac = 0;
  406. scb_s->fpf = 0;
  407. rc = prepare_cpuflags(vcpu, vsie_page);
  408. if (rc)
  409. goto out;
  410. /* timer */
  411. scb_s->cputm = scb_o->cputm;
  412. scb_s->ckc = scb_o->ckc;
  413. scb_s->todpr = scb_o->todpr;
  414. scb_s->epoch = scb_o->epoch;
  415. /* guest state */
  416. scb_s->gpsw = scb_o->gpsw;
  417. scb_s->gg14 = scb_o->gg14;
  418. scb_s->gg15 = scb_o->gg15;
  419. memcpy(scb_s->gcr, scb_o->gcr, 128);
  420. scb_s->pp = scb_o->pp;
  421. /* interception / execution handling */
  422. scb_s->gbea = scb_o->gbea;
  423. scb_s->lctl = scb_o->lctl;
  424. scb_s->svcc = scb_o->svcc;
  425. scb_s->ictl = scb_o->ictl;
  426. /*
  427. * SKEY handling functions can't deal with false setting of PTE invalid
  428. * bits. Therefore we cannot provide interpretation and would later
  429. * have to provide own emulation handlers.
  430. */
  431. if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
  432. scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
  433. scb_s->icpua = scb_o->icpua;
  434. if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
  435. new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
  436. /* if the hva of the prefix changes, we have to remap the prefix */
  437. if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
  438. prefix_unmapped(vsie_page);
  439. /* SIE will do mso/msl validity and exception checks for us */
  440. scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
  441. scb_s->mso = new_mso;
  442. scb_s->prefix = new_prefix;
  443. /* We have to definetly flush the tlb if this scb never ran */
  444. if (scb_s->ihcpu != 0xffffU)
  445. scb_s->ihcpu = scb_o->ihcpu;
  446. /* MVPG and Protection Exception Interpretation are always available */
  447. scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
  448. /* Host-protection-interruption introduced with ESOP */
  449. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
  450. scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
  451. /*
  452. * CPU Topology
  453. * This facility only uses the utility field of the SCA and none of
  454. * the cpu entries that are problematic with the other interpretation
  455. * facilities so we can pass it through
  456. */
  457. if (test_kvm_facility(vcpu->kvm, 11))
  458. scb_s->ecb |= scb_o->ecb & ECB_PTF;
  459. /* transactional execution */
  460. if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
  461. /* remap the prefix is tx is toggled on */
  462. if (!had_tx)
  463. prefix_unmapped(vsie_page);
  464. scb_s->ecb |= ECB_TE;
  465. }
  466. /* specification exception interpretation */
  467. scb_s->ecb |= scb_o->ecb & ECB_SPECI;
  468. /* branch prediction */
  469. if (test_kvm_facility(vcpu->kvm, 82))
  470. scb_s->fpf |= scb_o->fpf & FPF_BPBC;
  471. /* SIMD */
  472. if (test_kvm_facility(vcpu->kvm, 129)) {
  473. scb_s->eca |= scb_o->eca & ECA_VX;
  474. scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
  475. }
  476. /* Run-time-Instrumentation */
  477. if (test_kvm_facility(vcpu->kvm, 64))
  478. scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
  479. /* Instruction Execution Prevention */
  480. if (test_kvm_facility(vcpu->kvm, 130))
  481. scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
  482. /* Guarded Storage */
  483. if (test_kvm_facility(vcpu->kvm, 133)) {
  484. scb_s->ecb |= scb_o->ecb & ECB_GS;
  485. scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
  486. }
  487. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
  488. scb_s->eca |= scb_o->eca & ECA_SII;
  489. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
  490. scb_s->eca |= scb_o->eca & ECA_IB;
  491. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
  492. scb_s->eca |= scb_o->eca & ECA_CEI;
  493. /* Epoch Extension */
  494. if (test_kvm_facility(vcpu->kvm, 139)) {
  495. scb_s->ecd |= scb_o->ecd & ECD_MEF;
  496. scb_s->epdx = scb_o->epdx;
  497. }
  498. /* etoken */
  499. if (test_kvm_facility(vcpu->kvm, 156))
  500. scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
  501. scb_s->hpid = HPID_VSIE;
  502. scb_s->cpnc = scb_o->cpnc;
  503. prepare_ibc(vcpu, vsie_page);
  504. rc = shadow_crycb(vcpu, vsie_page);
  505. out:
  506. if (rc)
  507. unshadow_scb(vcpu, vsie_page);
  508. return rc;
  509. }
  510. void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
  511. unsigned long end)
  512. {
  513. struct kvm *kvm = gmap->private;
  514. struct vsie_page *cur;
  515. unsigned long prefix;
  516. struct page *page;
  517. int i;
  518. if (!gmap_is_shadow(gmap))
  519. return;
  520. if (start >= 1UL << 31)
  521. /* We are only interested in prefix pages */
  522. return;
  523. /*
  524. * Only new shadow blocks are added to the list during runtime,
  525. * therefore we can safely reference them all the time.
  526. */
  527. for (i = 0; i < kvm->arch.vsie.page_count; i++) {
  528. page = READ_ONCE(kvm->arch.vsie.pages[i]);
  529. if (!page)
  530. continue;
  531. cur = page_to_virt(page);
  532. if (READ_ONCE(cur->gmap) != gmap)
  533. continue;
  534. prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
  535. /* with mso/msl, the prefix lies at an offset */
  536. prefix += cur->scb_s.mso;
  537. if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
  538. prefix_unmapped_sync(cur);
  539. }
  540. }
  541. /*
  542. * Map the first prefix page and if tx is enabled also the second prefix page.
  543. *
  544. * The prefix will be protected, a gmap notifier will inform about unmaps.
  545. * The shadow scb must not be executed until the prefix is remapped, this is
  546. * guaranteed by properly handling PROG_REQUEST.
  547. *
  548. * Returns: - 0 on if successfully mapped or already mapped
  549. * - > 0 if control has to be given to guest 2
  550. * - -EAGAIN if the caller can retry immediately
  551. * - -ENOMEM if out of memory
  552. */
  553. static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  554. {
  555. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  556. u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
  557. int rc;
  558. if (prefix_is_mapped(vsie_page))
  559. return 0;
  560. /* mark it as mapped so we can catch any concurrent unmappers */
  561. prefix_mapped(vsie_page);
  562. /* with mso/msl, the prefix lies at offset *mso* */
  563. prefix += scb_s->mso;
  564. rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
  565. if (!rc && (scb_s->ecb & ECB_TE))
  566. rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
  567. prefix + PAGE_SIZE, NULL);
  568. /*
  569. * We don't have to mprotect, we will be called for all unshadows.
  570. * SIE will detect if protection applies and trigger a validity.
  571. */
  572. if (rc)
  573. prefix_unmapped(vsie_page);
  574. if (rc > 0 || rc == -EFAULT)
  575. rc = set_validity_icpt(scb_s, 0x0037U);
  576. return rc;
  577. }
  578. /*
  579. * Pin the guest page given by gpa and set hpa to the pinned host address.
  580. * Will always be pinned writable.
  581. *
  582. * Returns: - 0 on success
  583. * - -EINVAL if the gpa is not valid guest storage
  584. */
  585. static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
  586. {
  587. struct page *page;
  588. page = gfn_to_page(kvm, gpa_to_gfn(gpa));
  589. if (is_error_page(page))
  590. return -EINVAL;
  591. *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
  592. return 0;
  593. }
  594. /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
  595. static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
  596. {
  597. kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
  598. /* mark the page always as dirty for migration */
  599. mark_page_dirty(kvm, gpa_to_gfn(gpa));
  600. }
  601. /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
  602. static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  603. {
  604. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  605. hpa_t hpa;
  606. hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
  607. if (hpa) {
  608. unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
  609. vsie_page->sca_gpa = 0;
  610. scb_s->scaol = 0;
  611. scb_s->scaoh = 0;
  612. }
  613. hpa = scb_s->itdba;
  614. if (hpa) {
  615. unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
  616. vsie_page->itdba_gpa = 0;
  617. scb_s->itdba = 0;
  618. }
  619. hpa = scb_s->gvrd;
  620. if (hpa) {
  621. unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
  622. vsie_page->gvrd_gpa = 0;
  623. scb_s->gvrd = 0;
  624. }
  625. hpa = scb_s->riccbd;
  626. if (hpa) {
  627. unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
  628. vsie_page->riccbd_gpa = 0;
  629. scb_s->riccbd = 0;
  630. }
  631. hpa = scb_s->sdnxo;
  632. if (hpa) {
  633. unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
  634. vsie_page->sdnx_gpa = 0;
  635. scb_s->sdnxo = 0;
  636. }
  637. }
  638. /*
  639. * Instead of shadowing some blocks, we can simply forward them because the
  640. * addresses in the scb are 64 bit long.
  641. *
  642. * This works as long as the data lies in one page. If blocks ever exceed one
  643. * page, we have to fall back to shadowing.
  644. *
  645. * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
  646. * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
  647. *
  648. * Returns: - 0 if all blocks were pinned.
  649. * - > 0 if control has to be given to guest 2
  650. * - -ENOMEM if out of memory
  651. */
  652. static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  653. {
  654. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  655. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  656. hpa_t hpa;
  657. gpa_t gpa;
  658. int rc = 0;
  659. gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
  660. if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
  661. gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
  662. if (gpa) {
  663. if (gpa < 2 * PAGE_SIZE)
  664. rc = set_validity_icpt(scb_s, 0x0038U);
  665. else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
  666. rc = set_validity_icpt(scb_s, 0x0011U);
  667. else if ((gpa & PAGE_MASK) !=
  668. ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
  669. rc = set_validity_icpt(scb_s, 0x003bU);
  670. if (!rc) {
  671. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  672. if (rc)
  673. rc = set_validity_icpt(scb_s, 0x0034U);
  674. }
  675. if (rc)
  676. goto unpin;
  677. vsie_page->sca_gpa = gpa;
  678. scb_s->scaoh = (u32)((u64)hpa >> 32);
  679. scb_s->scaol = (u32)(u64)hpa;
  680. }
  681. gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
  682. if (gpa && (scb_s->ecb & ECB_TE)) {
  683. if (gpa < 2 * PAGE_SIZE) {
  684. rc = set_validity_icpt(scb_s, 0x0080U);
  685. goto unpin;
  686. }
  687. /* 256 bytes cannot cross page boundaries */
  688. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  689. if (rc) {
  690. rc = set_validity_icpt(scb_s, 0x0080U);
  691. goto unpin;
  692. }
  693. vsie_page->itdba_gpa = gpa;
  694. scb_s->itdba = hpa;
  695. }
  696. gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
  697. if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
  698. if (gpa < 2 * PAGE_SIZE) {
  699. rc = set_validity_icpt(scb_s, 0x1310U);
  700. goto unpin;
  701. }
  702. /*
  703. * 512 bytes vector registers cannot cross page boundaries
  704. * if this block gets bigger, we have to shadow it.
  705. */
  706. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  707. if (rc) {
  708. rc = set_validity_icpt(scb_s, 0x1310U);
  709. goto unpin;
  710. }
  711. vsie_page->gvrd_gpa = gpa;
  712. scb_s->gvrd = hpa;
  713. }
  714. gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
  715. if (gpa && (scb_s->ecb3 & ECB3_RI)) {
  716. if (gpa < 2 * PAGE_SIZE) {
  717. rc = set_validity_icpt(scb_s, 0x0043U);
  718. goto unpin;
  719. }
  720. /* 64 bytes cannot cross page boundaries */
  721. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  722. if (rc) {
  723. rc = set_validity_icpt(scb_s, 0x0043U);
  724. goto unpin;
  725. }
  726. /* Validity 0x0044 will be checked by SIE */
  727. vsie_page->riccbd_gpa = gpa;
  728. scb_s->riccbd = hpa;
  729. }
  730. if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
  731. (scb_s->ecd & ECD_ETOKENF)) {
  732. unsigned long sdnxc;
  733. gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
  734. sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
  735. if (!gpa || gpa < 2 * PAGE_SIZE) {
  736. rc = set_validity_icpt(scb_s, 0x10b0U);
  737. goto unpin;
  738. }
  739. if (sdnxc < 6 || sdnxc > 12) {
  740. rc = set_validity_icpt(scb_s, 0x10b1U);
  741. goto unpin;
  742. }
  743. if (gpa & ((1 << sdnxc) - 1)) {
  744. rc = set_validity_icpt(scb_s, 0x10b2U);
  745. goto unpin;
  746. }
  747. /* Due to alignment rules (checked above) this cannot
  748. * cross page boundaries
  749. */
  750. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  751. if (rc) {
  752. rc = set_validity_icpt(scb_s, 0x10b0U);
  753. goto unpin;
  754. }
  755. vsie_page->sdnx_gpa = gpa;
  756. scb_s->sdnxo = hpa | sdnxc;
  757. }
  758. return 0;
  759. unpin:
  760. unpin_blocks(vcpu, vsie_page);
  761. return rc;
  762. }
  763. /* unpin the scb provided by guest 2, marking it as dirty */
  764. static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
  765. gpa_t gpa)
  766. {
  767. hpa_t hpa = (hpa_t) vsie_page->scb_o;
  768. if (hpa)
  769. unpin_guest_page(vcpu->kvm, gpa, hpa);
  770. vsie_page->scb_o = NULL;
  771. }
  772. /*
  773. * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
  774. *
  775. * Returns: - 0 if the scb was pinned.
  776. * - > 0 if control has to be given to guest 2
  777. */
  778. static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
  779. gpa_t gpa)
  780. {
  781. hpa_t hpa;
  782. int rc;
  783. rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
  784. if (rc) {
  785. rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  786. WARN_ON_ONCE(rc);
  787. return 1;
  788. }
  789. vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
  790. return 0;
  791. }
  792. /*
  793. * Inject a fault into guest 2.
  794. *
  795. * Returns: - > 0 if control has to be given to guest 2
  796. * < 0 if an error occurred during injection.
  797. */
  798. static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
  799. bool write_flag)
  800. {
  801. struct kvm_s390_pgm_info pgm = {
  802. .code = code,
  803. .trans_exc_code =
  804. /* 0-51: virtual address */
  805. (vaddr & 0xfffffffffffff000UL) |
  806. /* 52-53: store / fetch */
  807. (((unsigned int) !write_flag) + 1) << 10,
  808. /* 62-63: asce id (alway primary == 0) */
  809. .exc_access_id = 0, /* always primary */
  810. .op_access_id = 0, /* not MVPG */
  811. };
  812. int rc;
  813. if (code == PGM_PROTECTION)
  814. pgm.trans_exc_code |= 0x4UL;
  815. rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
  816. return rc ? rc : 1;
  817. }
  818. /*
  819. * Handle a fault during vsie execution on a gmap shadow.
  820. *
  821. * Returns: - 0 if the fault was resolved
  822. * - > 0 if control has to be given to guest 2
  823. * - < 0 if an error occurred
  824. */
  825. static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  826. {
  827. int rc;
  828. if (current->thread.gmap_int_code == PGM_PROTECTION)
  829. /* we can directly forward all protection exceptions */
  830. return inject_fault(vcpu, PGM_PROTECTION,
  831. current->thread.gmap_addr, 1);
  832. rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
  833. current->thread.gmap_addr, NULL);
  834. if (rc > 0) {
  835. rc = inject_fault(vcpu, rc,
  836. current->thread.gmap_addr,
  837. current->thread.gmap_write_flag);
  838. if (rc >= 0)
  839. vsie_page->fault_addr = current->thread.gmap_addr;
  840. }
  841. return rc;
  842. }
  843. /*
  844. * Retry the previous fault that required guest 2 intervention. This avoids
  845. * one superfluous SIE re-entry and direct exit.
  846. *
  847. * Will ignore any errors. The next SIE fault will do proper fault handling.
  848. */
  849. static void handle_last_fault(struct kvm_vcpu *vcpu,
  850. struct vsie_page *vsie_page)
  851. {
  852. if (vsie_page->fault_addr)
  853. kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
  854. vsie_page->fault_addr, NULL);
  855. vsie_page->fault_addr = 0;
  856. }
  857. static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
  858. {
  859. vsie_page->scb_s.icptcode = 0;
  860. }
  861. /* rewind the psw and clear the vsie icpt, so we can retry execution */
  862. static void retry_vsie_icpt(struct vsie_page *vsie_page)
  863. {
  864. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  865. int ilen = insn_length(scb_s->ipa >> 8);
  866. /* take care of EXECUTE instructions */
  867. if (scb_s->icptstatus & 1) {
  868. ilen = (scb_s->icptstatus >> 4) & 0x6;
  869. if (!ilen)
  870. ilen = 4;
  871. }
  872. scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
  873. clear_vsie_icpt(vsie_page);
  874. }
  875. /*
  876. * Try to shadow + enable the guest 2 provided facility list.
  877. * Retry instruction execution if enabled for and provided by guest 2.
  878. *
  879. * Returns: - 0 if handled (retry or guest 2 icpt)
  880. * - > 0 if control has to be given to guest 2
  881. */
  882. static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  883. {
  884. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  885. __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
  886. if (fac && test_kvm_facility(vcpu->kvm, 7)) {
  887. retry_vsie_icpt(vsie_page);
  888. if (read_guest_real(vcpu, fac, &vsie_page->fac,
  889. sizeof(vsie_page->fac)))
  890. return set_validity_icpt(scb_s, 0x1090U);
  891. scb_s->fac = (__u32)(__u64) &vsie_page->fac;
  892. }
  893. return 0;
  894. }
  895. /*
  896. * Get a register for a nested guest.
  897. * @vcpu the vcpu of the guest
  898. * @vsie_page the vsie_page for the nested guest
  899. * @reg the register number, the upper 4 bits are ignored.
  900. * returns: the value of the register.
  901. */
  902. static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
  903. {
  904. /* no need to validate the parameter and/or perform error handling */
  905. reg &= 0xf;
  906. switch (reg) {
  907. case 15:
  908. return vsie_page->scb_s.gg15;
  909. case 14:
  910. return vsie_page->scb_s.gg14;
  911. default:
  912. return vcpu->run->s.regs.gprs[reg];
  913. }
  914. }
  915. static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  916. {
  917. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  918. unsigned long pei_dest, pei_src, src, dest, mask, prefix;
  919. u64 *pei_block = &vsie_page->scb_o->mcic;
  920. int edat, rc_dest, rc_src;
  921. union ctlreg0 cr0;
  922. cr0.val = vcpu->arch.sie_block->gcr[0];
  923. edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
  924. mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
  925. prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
  926. dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
  927. dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
  928. src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
  929. src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
  930. rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
  931. rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
  932. /*
  933. * Either everything went well, or something non-critical went wrong
  934. * e.g. because of a race. In either case, simply retry.
  935. */
  936. if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
  937. retry_vsie_icpt(vsie_page);
  938. return -EAGAIN;
  939. }
  940. /* Something more serious went wrong, propagate the error */
  941. if (rc_dest < 0)
  942. return rc_dest;
  943. if (rc_src < 0)
  944. return rc_src;
  945. /* The only possible suppressing exception: just deliver it */
  946. if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
  947. clear_vsie_icpt(vsie_page);
  948. rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
  949. WARN_ON_ONCE(rc_dest);
  950. return 1;
  951. }
  952. /*
  953. * Forward the PEI intercept to the guest if it was a page fault, or
  954. * also for segment and region table faults if EDAT applies.
  955. */
  956. if (edat) {
  957. rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
  958. rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
  959. } else {
  960. rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
  961. rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
  962. }
  963. if (!rc_dest && !rc_src) {
  964. pei_block[0] = pei_dest;
  965. pei_block[1] = pei_src;
  966. return 1;
  967. }
  968. retry_vsie_icpt(vsie_page);
  969. /*
  970. * The host has edat, and the guest does not, or it was an ASCE type
  971. * exception. The host needs to inject the appropriate DAT interrupts
  972. * into the guest.
  973. */
  974. if (rc_dest)
  975. return inject_fault(vcpu, rc_dest, dest, 1);
  976. return inject_fault(vcpu, rc_src, src, 0);
  977. }
  978. /*
  979. * Run the vsie on a shadow scb and a shadow gmap, without any further
  980. * sanity checks, handling SIE faults.
  981. *
  982. * Returns: - 0 everything went fine
  983. * - > 0 if control has to be given to guest 2
  984. * - < 0 if an error occurred
  985. */
  986. static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  987. __releases(vcpu->kvm->srcu)
  988. __acquires(vcpu->kvm->srcu)
  989. {
  990. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  991. struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
  992. int guest_bp_isolation;
  993. int rc = 0;
  994. handle_last_fault(vcpu, vsie_page);
  995. kvm_vcpu_srcu_read_unlock(vcpu);
  996. /* save current guest state of bp isolation override */
  997. guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
  998. /*
  999. * The guest is running with BPBC, so we have to force it on for our
  1000. * nested guest. This is done by enabling BPBC globally, so the BPBC
  1001. * control in the SCB (which the nested guest can modify) is simply
  1002. * ignored.
  1003. */
  1004. if (test_kvm_facility(vcpu->kvm, 82) &&
  1005. vcpu->arch.sie_block->fpf & FPF_BPBC)
  1006. set_thread_flag(TIF_ISOLATE_BP_GUEST);
  1007. local_irq_disable();
  1008. guest_enter_irqoff();
  1009. local_irq_enable();
  1010. /*
  1011. * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
  1012. * and VCPU requests also hinder the vSIE from running and lead
  1013. * to an immediate exit. kvm_s390_vsie_kick() has to be used to
  1014. * also kick the vSIE.
  1015. */
  1016. vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
  1017. barrier();
  1018. if (test_cpu_flag(CIF_FPU))
  1019. load_fpu_regs();
  1020. if (!kvm_s390_vcpu_sie_inhibited(vcpu))
  1021. rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
  1022. barrier();
  1023. vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
  1024. local_irq_disable();
  1025. guest_exit_irqoff();
  1026. local_irq_enable();
  1027. /* restore guest state for bp isolation override */
  1028. if (!guest_bp_isolation)
  1029. clear_thread_flag(TIF_ISOLATE_BP_GUEST);
  1030. kvm_vcpu_srcu_read_lock(vcpu);
  1031. if (rc == -EINTR) {
  1032. VCPU_EVENT(vcpu, 3, "%s", "machine check");
  1033. kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
  1034. return 0;
  1035. }
  1036. if (rc > 0)
  1037. rc = 0; /* we could still have an icpt */
  1038. else if (rc == -EFAULT)
  1039. return handle_fault(vcpu, vsie_page);
  1040. switch (scb_s->icptcode) {
  1041. case ICPT_INST:
  1042. if (scb_s->ipa == 0xb2b0)
  1043. rc = handle_stfle(vcpu, vsie_page);
  1044. break;
  1045. case ICPT_STOP:
  1046. /* stop not requested by g2 - must have been a kick */
  1047. if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
  1048. clear_vsie_icpt(vsie_page);
  1049. break;
  1050. case ICPT_VALIDITY:
  1051. if ((scb_s->ipa & 0xf000) != 0xf000)
  1052. scb_s->ipa += 0x1000;
  1053. break;
  1054. case ICPT_PARTEXEC:
  1055. if (scb_s->ipa == 0xb254)
  1056. rc = vsie_handle_mvpg(vcpu, vsie_page);
  1057. break;
  1058. }
  1059. return rc;
  1060. }
  1061. static void release_gmap_shadow(struct vsie_page *vsie_page)
  1062. {
  1063. if (vsie_page->gmap)
  1064. gmap_put(vsie_page->gmap);
  1065. WRITE_ONCE(vsie_page->gmap, NULL);
  1066. prefix_unmapped(vsie_page);
  1067. }
  1068. static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
  1069. struct vsie_page *vsie_page)
  1070. {
  1071. unsigned long asce;
  1072. union ctlreg0 cr0;
  1073. struct gmap *gmap;
  1074. int edat;
  1075. asce = vcpu->arch.sie_block->gcr[1];
  1076. cr0.val = vcpu->arch.sie_block->gcr[0];
  1077. edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
  1078. edat += edat && test_kvm_facility(vcpu->kvm, 78);
  1079. /*
  1080. * ASCE or EDAT could have changed since last icpt, or the gmap
  1081. * we're holding has been unshadowed. If the gmap is still valid,
  1082. * we can safely reuse it.
  1083. */
  1084. if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
  1085. return 0;
  1086. /* release the old shadow - if any, and mark the prefix as unmapped */
  1087. release_gmap_shadow(vsie_page);
  1088. gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
  1089. if (IS_ERR(gmap))
  1090. return PTR_ERR(gmap);
  1091. gmap->private = vcpu->kvm;
  1092. WRITE_ONCE(vsie_page->gmap, gmap);
  1093. return 0;
  1094. }
  1095. /*
  1096. * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
  1097. */
  1098. static void register_shadow_scb(struct kvm_vcpu *vcpu,
  1099. struct vsie_page *vsie_page)
  1100. {
  1101. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  1102. WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
  1103. /*
  1104. * External calls have to lead to a kick of the vcpu and
  1105. * therefore the vsie -> Simulate Wait state.
  1106. */
  1107. kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
  1108. /*
  1109. * We have to adjust the g3 epoch by the g2 epoch. The epoch will
  1110. * automatically be adjusted on tod clock changes via kvm_sync_clock.
  1111. */
  1112. preempt_disable();
  1113. scb_s->epoch += vcpu->kvm->arch.epoch;
  1114. if (scb_s->ecd & ECD_MEF) {
  1115. scb_s->epdx += vcpu->kvm->arch.epdx;
  1116. if (scb_s->epoch < vcpu->kvm->arch.epoch)
  1117. scb_s->epdx += 1;
  1118. }
  1119. preempt_enable();
  1120. }
  1121. /*
  1122. * Unregister a shadow scb from a VCPU.
  1123. */
  1124. static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
  1125. {
  1126. kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
  1127. WRITE_ONCE(vcpu->arch.vsie_block, NULL);
  1128. }
  1129. /*
  1130. * Run the vsie on a shadowed scb, managing the gmap shadow, handling
  1131. * prefix pages and faults.
  1132. *
  1133. * Returns: - 0 if no errors occurred
  1134. * - > 0 if control has to be given to guest 2
  1135. * - -ENOMEM if out of memory
  1136. */
  1137. static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
  1138. {
  1139. struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
  1140. int rc = 0;
  1141. while (1) {
  1142. rc = acquire_gmap_shadow(vcpu, vsie_page);
  1143. if (!rc)
  1144. rc = map_prefix(vcpu, vsie_page);
  1145. if (!rc) {
  1146. gmap_enable(vsie_page->gmap);
  1147. update_intervention_requests(vsie_page);
  1148. rc = do_vsie_run(vcpu, vsie_page);
  1149. gmap_enable(vcpu->arch.gmap);
  1150. }
  1151. atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
  1152. if (rc == -EAGAIN)
  1153. rc = 0;
  1154. if (rc || scb_s->icptcode || signal_pending(current) ||
  1155. kvm_s390_vcpu_has_irq(vcpu, 0) ||
  1156. kvm_s390_vcpu_sie_inhibited(vcpu))
  1157. break;
  1158. cond_resched();
  1159. }
  1160. if (rc == -EFAULT) {
  1161. /*
  1162. * Addressing exceptions are always presentes as intercepts.
  1163. * As addressing exceptions are suppressing and our guest 3 PSW
  1164. * points at the responsible instruction, we have to
  1165. * forward the PSW and set the ilc. If we can't read guest 3
  1166. * instruction, we can use an arbitrary ilc. Let's always use
  1167. * ilen = 4 for now, so we can avoid reading in guest 3 virtual
  1168. * memory. (we could also fake the shadow so the hardware
  1169. * handles it).
  1170. */
  1171. scb_s->icptcode = ICPT_PROGI;
  1172. scb_s->iprcc = PGM_ADDRESSING;
  1173. scb_s->pgmilc = 4;
  1174. scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
  1175. rc = 1;
  1176. }
  1177. return rc;
  1178. }
  1179. /*
  1180. * Get or create a vsie page for a scb address.
  1181. *
  1182. * Returns: - address of a vsie page (cached or new one)
  1183. * - NULL if the same scb address is already used by another VCPU
  1184. * - ERR_PTR(-ENOMEM) if out of memory
  1185. */
  1186. static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
  1187. {
  1188. struct vsie_page *vsie_page;
  1189. struct page *page;
  1190. int nr_vcpus;
  1191. rcu_read_lock();
  1192. page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
  1193. rcu_read_unlock();
  1194. if (page) {
  1195. if (page_ref_inc_return(page) == 2)
  1196. return page_to_virt(page);
  1197. page_ref_dec(page);
  1198. }
  1199. /*
  1200. * We want at least #online_vcpus shadows, so every VCPU can execute
  1201. * the VSIE in parallel.
  1202. */
  1203. nr_vcpus = atomic_read(&kvm->online_vcpus);
  1204. mutex_lock(&kvm->arch.vsie.mutex);
  1205. if (kvm->arch.vsie.page_count < nr_vcpus) {
  1206. page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
  1207. if (!page) {
  1208. mutex_unlock(&kvm->arch.vsie.mutex);
  1209. return ERR_PTR(-ENOMEM);
  1210. }
  1211. page_ref_inc(page);
  1212. kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
  1213. kvm->arch.vsie.page_count++;
  1214. } else {
  1215. /* reuse an existing entry that belongs to nobody */
  1216. while (true) {
  1217. page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
  1218. if (page_ref_inc_return(page) == 2)
  1219. break;
  1220. page_ref_dec(page);
  1221. kvm->arch.vsie.next++;
  1222. kvm->arch.vsie.next %= nr_vcpus;
  1223. }
  1224. radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
  1225. }
  1226. page->index = addr;
  1227. /* double use of the same address */
  1228. if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
  1229. page_ref_dec(page);
  1230. mutex_unlock(&kvm->arch.vsie.mutex);
  1231. return NULL;
  1232. }
  1233. mutex_unlock(&kvm->arch.vsie.mutex);
  1234. vsie_page = page_to_virt(page);
  1235. memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
  1236. release_gmap_shadow(vsie_page);
  1237. vsie_page->fault_addr = 0;
  1238. vsie_page->scb_s.ihcpu = 0xffffU;
  1239. return vsie_page;
  1240. }
  1241. /* put a vsie page acquired via get_vsie_page */
  1242. static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
  1243. {
  1244. struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
  1245. page_ref_dec(page);
  1246. }
  1247. int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
  1248. {
  1249. struct vsie_page *vsie_page;
  1250. unsigned long scb_addr;
  1251. int rc;
  1252. vcpu->stat.instruction_sie++;
  1253. if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
  1254. return -EOPNOTSUPP;
  1255. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  1256. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  1257. BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
  1258. scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
  1259. /* 512 byte alignment */
  1260. if (unlikely(scb_addr & 0x1ffUL))
  1261. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  1262. if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
  1263. kvm_s390_vcpu_sie_inhibited(vcpu))
  1264. return 0;
  1265. vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
  1266. if (IS_ERR(vsie_page))
  1267. return PTR_ERR(vsie_page);
  1268. else if (!vsie_page)
  1269. /* double use of sie control block - simply do nothing */
  1270. return 0;
  1271. rc = pin_scb(vcpu, vsie_page, scb_addr);
  1272. if (rc)
  1273. goto out_put;
  1274. rc = shadow_scb(vcpu, vsie_page);
  1275. if (rc)
  1276. goto out_unpin_scb;
  1277. rc = pin_blocks(vcpu, vsie_page);
  1278. if (rc)
  1279. goto out_unshadow;
  1280. register_shadow_scb(vcpu, vsie_page);
  1281. rc = vsie_run(vcpu, vsie_page);
  1282. unregister_shadow_scb(vcpu);
  1283. unpin_blocks(vcpu, vsie_page);
  1284. out_unshadow:
  1285. unshadow_scb(vcpu, vsie_page);
  1286. out_unpin_scb:
  1287. unpin_scb(vcpu, vsie_page, scb_addr);
  1288. out_put:
  1289. put_vsie_page(vcpu->kvm, vsie_page);
  1290. return rc < 0 ? rc : 0;
  1291. }
  1292. /* Init the vsie data structures. To be called when a vm is initialized. */
  1293. void kvm_s390_vsie_init(struct kvm *kvm)
  1294. {
  1295. mutex_init(&kvm->arch.vsie.mutex);
  1296. INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL_ACCOUNT);
  1297. }
  1298. /* Destroy the vsie data structures. To be called when a vm is destroyed. */
  1299. void kvm_s390_vsie_destroy(struct kvm *kvm)
  1300. {
  1301. struct vsie_page *vsie_page;
  1302. struct page *page;
  1303. int i;
  1304. mutex_lock(&kvm->arch.vsie.mutex);
  1305. for (i = 0; i < kvm->arch.vsie.page_count; i++) {
  1306. page = kvm->arch.vsie.pages[i];
  1307. kvm->arch.vsie.pages[i] = NULL;
  1308. vsie_page = page_to_virt(page);
  1309. release_gmap_shadow(vsie_page);
  1310. /* free the radix tree entry */
  1311. radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
  1312. __free_page(page);
  1313. }
  1314. kvm->arch.vsie.page_count = 0;
  1315. mutex_unlock(&kvm->arch.vsie.mutex);
  1316. }
  1317. void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
  1318. {
  1319. struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
  1320. /*
  1321. * Even if the VCPU lets go of the shadow sie block reference, it is
  1322. * still valid in the cache. So we can safely kick it.
  1323. */
  1324. if (scb) {
  1325. atomic_or(PROG_BLOCK_SIE, &scb->prog20);
  1326. if (scb->prog0c & PROG_IN_SIE)
  1327. atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
  1328. }
  1329. }