gaccess.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * guest access functions
  4. *
  5. * Copyright IBM Corp. 2014
  6. *
  7. */
  8. #include <linux/vmalloc.h>
  9. #include <linux/mm_types.h>
  10. #include <linux/err.h>
  11. #include <linux/pgtable.h>
  12. #include <linux/bitfield.h>
  13. #include <asm/gmap.h>
  14. #include "kvm-s390.h"
  15. #include "gaccess.h"
  16. #include <asm/switch_to.h>
  17. union asce {
  18. unsigned long val;
  19. struct {
  20. unsigned long origin : 52; /* Region- or Segment-Table Origin */
  21. unsigned long : 2;
  22. unsigned long g : 1; /* Subspace Group Control */
  23. unsigned long p : 1; /* Private Space Control */
  24. unsigned long s : 1; /* Storage-Alteration-Event Control */
  25. unsigned long x : 1; /* Space-Switch-Event Control */
  26. unsigned long r : 1; /* Real-Space Control */
  27. unsigned long : 1;
  28. unsigned long dt : 2; /* Designation-Type Control */
  29. unsigned long tl : 2; /* Region- or Segment-Table Length */
  30. };
  31. };
  32. enum {
  33. ASCE_TYPE_SEGMENT = 0,
  34. ASCE_TYPE_REGION3 = 1,
  35. ASCE_TYPE_REGION2 = 2,
  36. ASCE_TYPE_REGION1 = 3
  37. };
  38. union region1_table_entry {
  39. unsigned long val;
  40. struct {
  41. unsigned long rto: 52;/* Region-Table Origin */
  42. unsigned long : 2;
  43. unsigned long p : 1; /* DAT-Protection Bit */
  44. unsigned long : 1;
  45. unsigned long tf : 2; /* Region-Second-Table Offset */
  46. unsigned long i : 1; /* Region-Invalid Bit */
  47. unsigned long : 1;
  48. unsigned long tt : 2; /* Table-Type Bits */
  49. unsigned long tl : 2; /* Region-Second-Table Length */
  50. };
  51. };
  52. union region2_table_entry {
  53. unsigned long val;
  54. struct {
  55. unsigned long rto: 52;/* Region-Table Origin */
  56. unsigned long : 2;
  57. unsigned long p : 1; /* DAT-Protection Bit */
  58. unsigned long : 1;
  59. unsigned long tf : 2; /* Region-Third-Table Offset */
  60. unsigned long i : 1; /* Region-Invalid Bit */
  61. unsigned long : 1;
  62. unsigned long tt : 2; /* Table-Type Bits */
  63. unsigned long tl : 2; /* Region-Third-Table Length */
  64. };
  65. };
  66. struct region3_table_entry_fc0 {
  67. unsigned long sto: 52;/* Segment-Table Origin */
  68. unsigned long : 1;
  69. unsigned long fc : 1; /* Format-Control */
  70. unsigned long p : 1; /* DAT-Protection Bit */
  71. unsigned long : 1;
  72. unsigned long tf : 2; /* Segment-Table Offset */
  73. unsigned long i : 1; /* Region-Invalid Bit */
  74. unsigned long cr : 1; /* Common-Region Bit */
  75. unsigned long tt : 2; /* Table-Type Bits */
  76. unsigned long tl : 2; /* Segment-Table Length */
  77. };
  78. struct region3_table_entry_fc1 {
  79. unsigned long rfaa : 33; /* Region-Frame Absolute Address */
  80. unsigned long : 14;
  81. unsigned long av : 1; /* ACCF-Validity Control */
  82. unsigned long acc: 4; /* Access-Control Bits */
  83. unsigned long f : 1; /* Fetch-Protection Bit */
  84. unsigned long fc : 1; /* Format-Control */
  85. unsigned long p : 1; /* DAT-Protection Bit */
  86. unsigned long iep: 1; /* Instruction-Execution-Protection */
  87. unsigned long : 2;
  88. unsigned long i : 1; /* Region-Invalid Bit */
  89. unsigned long cr : 1; /* Common-Region Bit */
  90. unsigned long tt : 2; /* Table-Type Bits */
  91. unsigned long : 2;
  92. };
  93. union region3_table_entry {
  94. unsigned long val;
  95. struct region3_table_entry_fc0 fc0;
  96. struct region3_table_entry_fc1 fc1;
  97. struct {
  98. unsigned long : 53;
  99. unsigned long fc : 1; /* Format-Control */
  100. unsigned long : 4;
  101. unsigned long i : 1; /* Region-Invalid Bit */
  102. unsigned long cr : 1; /* Common-Region Bit */
  103. unsigned long tt : 2; /* Table-Type Bits */
  104. unsigned long : 2;
  105. };
  106. };
  107. struct segment_entry_fc0 {
  108. unsigned long pto: 53;/* Page-Table Origin */
  109. unsigned long fc : 1; /* Format-Control */
  110. unsigned long p : 1; /* DAT-Protection Bit */
  111. unsigned long : 3;
  112. unsigned long i : 1; /* Segment-Invalid Bit */
  113. unsigned long cs : 1; /* Common-Segment Bit */
  114. unsigned long tt : 2; /* Table-Type Bits */
  115. unsigned long : 2;
  116. };
  117. struct segment_entry_fc1 {
  118. unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
  119. unsigned long : 3;
  120. unsigned long av : 1; /* ACCF-Validity Control */
  121. unsigned long acc: 4; /* Access-Control Bits */
  122. unsigned long f : 1; /* Fetch-Protection Bit */
  123. unsigned long fc : 1; /* Format-Control */
  124. unsigned long p : 1; /* DAT-Protection Bit */
  125. unsigned long iep: 1; /* Instruction-Execution-Protection */
  126. unsigned long : 2;
  127. unsigned long i : 1; /* Segment-Invalid Bit */
  128. unsigned long cs : 1; /* Common-Segment Bit */
  129. unsigned long tt : 2; /* Table-Type Bits */
  130. unsigned long : 2;
  131. };
  132. union segment_table_entry {
  133. unsigned long val;
  134. struct segment_entry_fc0 fc0;
  135. struct segment_entry_fc1 fc1;
  136. struct {
  137. unsigned long : 53;
  138. unsigned long fc : 1; /* Format-Control */
  139. unsigned long : 4;
  140. unsigned long i : 1; /* Segment-Invalid Bit */
  141. unsigned long cs : 1; /* Common-Segment Bit */
  142. unsigned long tt : 2; /* Table-Type Bits */
  143. unsigned long : 2;
  144. };
  145. };
  146. enum {
  147. TABLE_TYPE_SEGMENT = 0,
  148. TABLE_TYPE_REGION3 = 1,
  149. TABLE_TYPE_REGION2 = 2,
  150. TABLE_TYPE_REGION1 = 3
  151. };
  152. union page_table_entry {
  153. unsigned long val;
  154. struct {
  155. unsigned long pfra : 52; /* Page-Frame Real Address */
  156. unsigned long z : 1; /* Zero Bit */
  157. unsigned long i : 1; /* Page-Invalid Bit */
  158. unsigned long p : 1; /* DAT-Protection Bit */
  159. unsigned long iep: 1; /* Instruction-Execution-Protection */
  160. unsigned long : 8;
  161. };
  162. };
  163. /*
  164. * vaddress union in order to easily decode a virtual address into its
  165. * region first index, region second index etc. parts.
  166. */
  167. union vaddress {
  168. unsigned long addr;
  169. struct {
  170. unsigned long rfx : 11;
  171. unsigned long rsx : 11;
  172. unsigned long rtx : 11;
  173. unsigned long sx : 11;
  174. unsigned long px : 8;
  175. unsigned long bx : 12;
  176. };
  177. struct {
  178. unsigned long rfx01 : 2;
  179. unsigned long : 9;
  180. unsigned long rsx01 : 2;
  181. unsigned long : 9;
  182. unsigned long rtx01 : 2;
  183. unsigned long : 9;
  184. unsigned long sx01 : 2;
  185. unsigned long : 29;
  186. };
  187. };
  188. /*
  189. * raddress union which will contain the result (real or absolute address)
  190. * after a page table walk. The rfaa, sfaa and pfra members are used to
  191. * simply assign them the value of a region, segment or page table entry.
  192. */
  193. union raddress {
  194. unsigned long addr;
  195. unsigned long rfaa : 33; /* Region-Frame Absolute Address */
  196. unsigned long sfaa : 44; /* Segment-Frame Absolute Address */
  197. unsigned long pfra : 52; /* Page-Frame Real Address */
  198. };
  199. union alet {
  200. u32 val;
  201. struct {
  202. u32 reserved : 7;
  203. u32 p : 1;
  204. u32 alesn : 8;
  205. u32 alen : 16;
  206. };
  207. };
  208. union ald {
  209. u32 val;
  210. struct {
  211. u32 : 1;
  212. u32 alo : 24;
  213. u32 all : 7;
  214. };
  215. };
  216. struct ale {
  217. unsigned long i : 1; /* ALEN-Invalid Bit */
  218. unsigned long : 5;
  219. unsigned long fo : 1; /* Fetch-Only Bit */
  220. unsigned long p : 1; /* Private Bit */
  221. unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
  222. unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
  223. unsigned long : 32;
  224. unsigned long : 1;
  225. unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
  226. unsigned long : 6;
  227. unsigned long astesn : 32; /* ASTE Sequence Number */
  228. };
  229. struct aste {
  230. unsigned long i : 1; /* ASX-Invalid Bit */
  231. unsigned long ato : 29; /* Authority-Table Origin */
  232. unsigned long : 1;
  233. unsigned long b : 1; /* Base-Space Bit */
  234. unsigned long ax : 16; /* Authorization Index */
  235. unsigned long atl : 12; /* Authority-Table Length */
  236. unsigned long : 2;
  237. unsigned long ca : 1; /* Controlled-ASN Bit */
  238. unsigned long ra : 1; /* Reusable-ASN Bit */
  239. unsigned long asce : 64; /* Address-Space-Control Element */
  240. unsigned long ald : 32;
  241. unsigned long astesn : 32;
  242. /* .. more fields there */
  243. };
  244. int ipte_lock_held(struct kvm *kvm)
  245. {
  246. if (sclp.has_siif) {
  247. int rc;
  248. read_lock(&kvm->arch.sca_lock);
  249. rc = kvm_s390_get_ipte_control(kvm)->kh != 0;
  250. read_unlock(&kvm->arch.sca_lock);
  251. return rc;
  252. }
  253. return kvm->arch.ipte_lock_count != 0;
  254. }
  255. static void ipte_lock_simple(struct kvm *kvm)
  256. {
  257. union ipte_control old, new, *ic;
  258. mutex_lock(&kvm->arch.ipte_mutex);
  259. kvm->arch.ipte_lock_count++;
  260. if (kvm->arch.ipte_lock_count > 1)
  261. goto out;
  262. retry:
  263. read_lock(&kvm->arch.sca_lock);
  264. ic = kvm_s390_get_ipte_control(kvm);
  265. do {
  266. old = READ_ONCE(*ic);
  267. if (old.k) {
  268. read_unlock(&kvm->arch.sca_lock);
  269. cond_resched();
  270. goto retry;
  271. }
  272. new = old;
  273. new.k = 1;
  274. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  275. read_unlock(&kvm->arch.sca_lock);
  276. out:
  277. mutex_unlock(&kvm->arch.ipte_mutex);
  278. }
  279. static void ipte_unlock_simple(struct kvm *kvm)
  280. {
  281. union ipte_control old, new, *ic;
  282. mutex_lock(&kvm->arch.ipte_mutex);
  283. kvm->arch.ipte_lock_count--;
  284. if (kvm->arch.ipte_lock_count)
  285. goto out;
  286. read_lock(&kvm->arch.sca_lock);
  287. ic = kvm_s390_get_ipte_control(kvm);
  288. do {
  289. old = READ_ONCE(*ic);
  290. new = old;
  291. new.k = 0;
  292. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  293. read_unlock(&kvm->arch.sca_lock);
  294. wake_up(&kvm->arch.ipte_wq);
  295. out:
  296. mutex_unlock(&kvm->arch.ipte_mutex);
  297. }
  298. static void ipte_lock_siif(struct kvm *kvm)
  299. {
  300. union ipte_control old, new, *ic;
  301. retry:
  302. read_lock(&kvm->arch.sca_lock);
  303. ic = kvm_s390_get_ipte_control(kvm);
  304. do {
  305. old = READ_ONCE(*ic);
  306. if (old.kg) {
  307. read_unlock(&kvm->arch.sca_lock);
  308. cond_resched();
  309. goto retry;
  310. }
  311. new = old;
  312. new.k = 1;
  313. new.kh++;
  314. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  315. read_unlock(&kvm->arch.sca_lock);
  316. }
  317. static void ipte_unlock_siif(struct kvm *kvm)
  318. {
  319. union ipte_control old, new, *ic;
  320. read_lock(&kvm->arch.sca_lock);
  321. ic = kvm_s390_get_ipte_control(kvm);
  322. do {
  323. old = READ_ONCE(*ic);
  324. new = old;
  325. new.kh--;
  326. if (!new.kh)
  327. new.k = 0;
  328. } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
  329. read_unlock(&kvm->arch.sca_lock);
  330. if (!new.kh)
  331. wake_up(&kvm->arch.ipte_wq);
  332. }
  333. void ipte_lock(struct kvm *kvm)
  334. {
  335. if (sclp.has_siif)
  336. ipte_lock_siif(kvm);
  337. else
  338. ipte_lock_simple(kvm);
  339. }
  340. void ipte_unlock(struct kvm *kvm)
  341. {
  342. if (sclp.has_siif)
  343. ipte_unlock_siif(kvm);
  344. else
  345. ipte_unlock_simple(kvm);
  346. }
  347. static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
  348. enum gacc_mode mode)
  349. {
  350. union alet alet;
  351. struct ale ale;
  352. struct aste aste;
  353. unsigned long ald_addr, authority_table_addr;
  354. union ald ald;
  355. int eax, rc;
  356. u8 authority_table;
  357. if (ar >= NUM_ACRS)
  358. return -EINVAL;
  359. save_access_regs(vcpu->run->s.regs.acrs);
  360. alet.val = vcpu->run->s.regs.acrs[ar];
  361. if (ar == 0 || alet.val == 0) {
  362. asce->val = vcpu->arch.sie_block->gcr[1];
  363. return 0;
  364. } else if (alet.val == 1) {
  365. asce->val = vcpu->arch.sie_block->gcr[7];
  366. return 0;
  367. }
  368. if (alet.reserved)
  369. return PGM_ALET_SPECIFICATION;
  370. if (alet.p)
  371. ald_addr = vcpu->arch.sie_block->gcr[5];
  372. else
  373. ald_addr = vcpu->arch.sie_block->gcr[2];
  374. ald_addr &= 0x7fffffc0;
  375. rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
  376. if (rc)
  377. return rc;
  378. if (alet.alen / 8 > ald.all)
  379. return PGM_ALEN_TRANSLATION;
  380. if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
  381. return PGM_ADDRESSING;
  382. rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
  383. sizeof(struct ale));
  384. if (rc)
  385. return rc;
  386. if (ale.i == 1)
  387. return PGM_ALEN_TRANSLATION;
  388. if (ale.alesn != alet.alesn)
  389. return PGM_ALE_SEQUENCE;
  390. rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
  391. if (rc)
  392. return rc;
  393. if (aste.i)
  394. return PGM_ASTE_VALIDITY;
  395. if (aste.astesn != ale.astesn)
  396. return PGM_ASTE_SEQUENCE;
  397. if (ale.p == 1) {
  398. eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
  399. if (ale.aleax != eax) {
  400. if (eax / 16 > aste.atl)
  401. return PGM_EXTENDED_AUTHORITY;
  402. authority_table_addr = aste.ato * 4 + eax / 4;
  403. rc = read_guest_real(vcpu, authority_table_addr,
  404. &authority_table,
  405. sizeof(u8));
  406. if (rc)
  407. return rc;
  408. if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
  409. return PGM_EXTENDED_AUTHORITY;
  410. }
  411. }
  412. if (ale.fo == 1 && mode == GACC_STORE)
  413. return PGM_PROTECTION;
  414. asce->val = aste.asce;
  415. return 0;
  416. }
  417. struct trans_exc_code_bits {
  418. unsigned long addr : 52; /* Translation-exception Address */
  419. unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
  420. unsigned long : 2;
  421. unsigned long b56 : 1;
  422. unsigned long : 3;
  423. unsigned long b60 : 1;
  424. unsigned long b61 : 1;
  425. unsigned long as : 2; /* ASCE Identifier */
  426. };
  427. enum {
  428. FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
  429. FSI_STORE = 1, /* Exception was due to store operation */
  430. FSI_FETCH = 2 /* Exception was due to fetch operation */
  431. };
  432. enum prot_type {
  433. PROT_TYPE_LA = 0,
  434. PROT_TYPE_KEYC = 1,
  435. PROT_TYPE_ALC = 2,
  436. PROT_TYPE_DAT = 3,
  437. PROT_TYPE_IEP = 4,
  438. /* Dummy value for passing an initialized value when code != PGM_PROTECTION */
  439. PROT_NONE,
  440. };
  441. static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
  442. enum gacc_mode mode, enum prot_type prot, bool terminate)
  443. {
  444. struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
  445. struct trans_exc_code_bits *tec;
  446. memset(pgm, 0, sizeof(*pgm));
  447. pgm->code = code;
  448. tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
  449. switch (code) {
  450. case PGM_PROTECTION:
  451. switch (prot) {
  452. case PROT_NONE:
  453. /* We should never get here, acts like termination */
  454. WARN_ON_ONCE(1);
  455. break;
  456. case PROT_TYPE_IEP:
  457. tec->b61 = 1;
  458. fallthrough;
  459. case PROT_TYPE_LA:
  460. tec->b56 = 1;
  461. break;
  462. case PROT_TYPE_KEYC:
  463. tec->b60 = 1;
  464. break;
  465. case PROT_TYPE_ALC:
  466. tec->b60 = 1;
  467. fallthrough;
  468. case PROT_TYPE_DAT:
  469. tec->b61 = 1;
  470. break;
  471. }
  472. if (terminate) {
  473. tec->b56 = 0;
  474. tec->b60 = 0;
  475. tec->b61 = 0;
  476. }
  477. fallthrough;
  478. case PGM_ASCE_TYPE:
  479. case PGM_PAGE_TRANSLATION:
  480. case PGM_REGION_FIRST_TRANS:
  481. case PGM_REGION_SECOND_TRANS:
  482. case PGM_REGION_THIRD_TRANS:
  483. case PGM_SEGMENT_TRANSLATION:
  484. /*
  485. * op_access_id only applies to MOVE_PAGE -> set bit 61
  486. * exc_access_id has to be set to 0 for some instructions. Both
  487. * cases have to be handled by the caller.
  488. */
  489. tec->addr = gva >> PAGE_SHIFT;
  490. tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
  491. tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
  492. fallthrough;
  493. case PGM_ALEN_TRANSLATION:
  494. case PGM_ALE_SEQUENCE:
  495. case PGM_ASTE_VALIDITY:
  496. case PGM_ASTE_SEQUENCE:
  497. case PGM_EXTENDED_AUTHORITY:
  498. /*
  499. * We can always store exc_access_id, as it is
  500. * undefined for non-ar cases. It is undefined for
  501. * most DAT protection exceptions.
  502. */
  503. pgm->exc_access_id = ar;
  504. break;
  505. }
  506. return code;
  507. }
  508. static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
  509. enum gacc_mode mode, enum prot_type prot)
  510. {
  511. return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false);
  512. }
  513. static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
  514. unsigned long ga, u8 ar, enum gacc_mode mode)
  515. {
  516. int rc;
  517. struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
  518. if (!psw.dat) {
  519. asce->val = 0;
  520. asce->r = 1;
  521. return 0;
  522. }
  523. if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME))
  524. psw.as = PSW_BITS_AS_PRIMARY;
  525. switch (psw.as) {
  526. case PSW_BITS_AS_PRIMARY:
  527. asce->val = vcpu->arch.sie_block->gcr[1];
  528. return 0;
  529. case PSW_BITS_AS_SECONDARY:
  530. asce->val = vcpu->arch.sie_block->gcr[7];
  531. return 0;
  532. case PSW_BITS_AS_HOME:
  533. asce->val = vcpu->arch.sie_block->gcr[13];
  534. return 0;
  535. case PSW_BITS_AS_ACCREG:
  536. rc = ar_translation(vcpu, asce, ar, mode);
  537. if (rc > 0)
  538. return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
  539. return rc;
  540. }
  541. return 0;
  542. }
  543. static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
  544. {
  545. return kvm_read_guest(kvm, gpa, val, sizeof(*val));
  546. }
  547. /**
  548. * guest_translate - translate a guest virtual into a guest absolute address
  549. * @vcpu: virtual cpu
  550. * @gva: guest virtual address
  551. * @gpa: points to where guest physical (absolute) address should be stored
  552. * @asce: effective asce
  553. * @mode: indicates the access mode to be used
  554. * @prot: returns the type for protection exceptions
  555. *
  556. * Translate a guest virtual address into a guest absolute address by means
  557. * of dynamic address translation as specified by the architecture.
  558. * If the resulting absolute address is not available in the configuration
  559. * an addressing exception is indicated and @gpa will not be changed.
  560. *
  561. * Returns: - zero on success; @gpa contains the resulting absolute address
  562. * - a negative value if guest access failed due to e.g. broken
  563. * guest mapping
  564. * - a positve value if an access exception happened. In this case
  565. * the returned value is the program interruption code as defined
  566. * by the architecture
  567. */
  568. static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
  569. unsigned long *gpa, const union asce asce,
  570. enum gacc_mode mode, enum prot_type *prot)
  571. {
  572. union vaddress vaddr = {.addr = gva};
  573. union raddress raddr = {.addr = gva};
  574. union page_table_entry pte;
  575. int dat_protection = 0;
  576. int iep_protection = 0;
  577. union ctlreg0 ctlreg0;
  578. unsigned long ptr;
  579. int edat1, edat2, iep;
  580. ctlreg0.val = vcpu->arch.sie_block->gcr[0];
  581. edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
  582. edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
  583. iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
  584. if (asce.r)
  585. goto real_address;
  586. ptr = asce.origin * PAGE_SIZE;
  587. switch (asce.dt) {
  588. case ASCE_TYPE_REGION1:
  589. if (vaddr.rfx01 > asce.tl)
  590. return PGM_REGION_FIRST_TRANS;
  591. ptr += vaddr.rfx * 8;
  592. break;
  593. case ASCE_TYPE_REGION2:
  594. if (vaddr.rfx)
  595. return PGM_ASCE_TYPE;
  596. if (vaddr.rsx01 > asce.tl)
  597. return PGM_REGION_SECOND_TRANS;
  598. ptr += vaddr.rsx * 8;
  599. break;
  600. case ASCE_TYPE_REGION3:
  601. if (vaddr.rfx || vaddr.rsx)
  602. return PGM_ASCE_TYPE;
  603. if (vaddr.rtx01 > asce.tl)
  604. return PGM_REGION_THIRD_TRANS;
  605. ptr += vaddr.rtx * 8;
  606. break;
  607. case ASCE_TYPE_SEGMENT:
  608. if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
  609. return PGM_ASCE_TYPE;
  610. if (vaddr.sx01 > asce.tl)
  611. return PGM_SEGMENT_TRANSLATION;
  612. ptr += vaddr.sx * 8;
  613. break;
  614. }
  615. switch (asce.dt) {
  616. case ASCE_TYPE_REGION1: {
  617. union region1_table_entry rfte;
  618. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  619. return PGM_ADDRESSING;
  620. if (deref_table(vcpu->kvm, ptr, &rfte.val))
  621. return -EFAULT;
  622. if (rfte.i)
  623. return PGM_REGION_FIRST_TRANS;
  624. if (rfte.tt != TABLE_TYPE_REGION1)
  625. return PGM_TRANSLATION_SPEC;
  626. if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
  627. return PGM_REGION_SECOND_TRANS;
  628. if (edat1)
  629. dat_protection |= rfte.p;
  630. ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
  631. }
  632. fallthrough;
  633. case ASCE_TYPE_REGION2: {
  634. union region2_table_entry rste;
  635. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  636. return PGM_ADDRESSING;
  637. if (deref_table(vcpu->kvm, ptr, &rste.val))
  638. return -EFAULT;
  639. if (rste.i)
  640. return PGM_REGION_SECOND_TRANS;
  641. if (rste.tt != TABLE_TYPE_REGION2)
  642. return PGM_TRANSLATION_SPEC;
  643. if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
  644. return PGM_REGION_THIRD_TRANS;
  645. if (edat1)
  646. dat_protection |= rste.p;
  647. ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
  648. }
  649. fallthrough;
  650. case ASCE_TYPE_REGION3: {
  651. union region3_table_entry rtte;
  652. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  653. return PGM_ADDRESSING;
  654. if (deref_table(vcpu->kvm, ptr, &rtte.val))
  655. return -EFAULT;
  656. if (rtte.i)
  657. return PGM_REGION_THIRD_TRANS;
  658. if (rtte.tt != TABLE_TYPE_REGION3)
  659. return PGM_TRANSLATION_SPEC;
  660. if (rtte.cr && asce.p && edat2)
  661. return PGM_TRANSLATION_SPEC;
  662. if (rtte.fc && edat2) {
  663. dat_protection |= rtte.fc1.p;
  664. iep_protection = rtte.fc1.iep;
  665. raddr.rfaa = rtte.fc1.rfaa;
  666. goto absolute_address;
  667. }
  668. if (vaddr.sx01 < rtte.fc0.tf)
  669. return PGM_SEGMENT_TRANSLATION;
  670. if (vaddr.sx01 > rtte.fc0.tl)
  671. return PGM_SEGMENT_TRANSLATION;
  672. if (edat1)
  673. dat_protection |= rtte.fc0.p;
  674. ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
  675. }
  676. fallthrough;
  677. case ASCE_TYPE_SEGMENT: {
  678. union segment_table_entry ste;
  679. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  680. return PGM_ADDRESSING;
  681. if (deref_table(vcpu->kvm, ptr, &ste.val))
  682. return -EFAULT;
  683. if (ste.i)
  684. return PGM_SEGMENT_TRANSLATION;
  685. if (ste.tt != TABLE_TYPE_SEGMENT)
  686. return PGM_TRANSLATION_SPEC;
  687. if (ste.cs && asce.p)
  688. return PGM_TRANSLATION_SPEC;
  689. if (ste.fc && edat1) {
  690. dat_protection |= ste.fc1.p;
  691. iep_protection = ste.fc1.iep;
  692. raddr.sfaa = ste.fc1.sfaa;
  693. goto absolute_address;
  694. }
  695. dat_protection |= ste.fc0.p;
  696. ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
  697. }
  698. }
  699. if (kvm_is_error_gpa(vcpu->kvm, ptr))
  700. return PGM_ADDRESSING;
  701. if (deref_table(vcpu->kvm, ptr, &pte.val))
  702. return -EFAULT;
  703. if (pte.i)
  704. return PGM_PAGE_TRANSLATION;
  705. if (pte.z)
  706. return PGM_TRANSLATION_SPEC;
  707. dat_protection |= pte.p;
  708. iep_protection = pte.iep;
  709. raddr.pfra = pte.pfra;
  710. real_address:
  711. raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
  712. absolute_address:
  713. if (mode == GACC_STORE && dat_protection) {
  714. *prot = PROT_TYPE_DAT;
  715. return PGM_PROTECTION;
  716. }
  717. if (mode == GACC_IFETCH && iep_protection && iep) {
  718. *prot = PROT_TYPE_IEP;
  719. return PGM_PROTECTION;
  720. }
  721. if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
  722. return PGM_ADDRESSING;
  723. *gpa = raddr.addr;
  724. return 0;
  725. }
  726. static inline int is_low_address(unsigned long ga)
  727. {
  728. /* Check for address ranges 0..511 and 4096..4607 */
  729. return (ga & ~0x11fful) == 0;
  730. }
  731. static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
  732. const union asce asce)
  733. {
  734. union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
  735. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  736. if (!ctlreg0.lap)
  737. return 0;
  738. if (psw_bits(*psw).dat && asce.p)
  739. return 0;
  740. return 1;
  741. }
  742. static int vm_check_access_key(struct kvm *kvm, u8 access_key,
  743. enum gacc_mode mode, gpa_t gpa)
  744. {
  745. u8 storage_key, access_control;
  746. bool fetch_protected;
  747. unsigned long hva;
  748. int r;
  749. if (access_key == 0)
  750. return 0;
  751. hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
  752. if (kvm_is_error_hva(hva))
  753. return PGM_ADDRESSING;
  754. mmap_read_lock(current->mm);
  755. r = get_guest_storage_key(current->mm, hva, &storage_key);
  756. mmap_read_unlock(current->mm);
  757. if (r)
  758. return r;
  759. access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
  760. if (access_control == access_key)
  761. return 0;
  762. fetch_protected = storage_key & _PAGE_FP_BIT;
  763. if ((mode == GACC_FETCH || mode == GACC_IFETCH) && !fetch_protected)
  764. return 0;
  765. return PGM_PROTECTION;
  766. }
  767. static bool fetch_prot_override_applicable(struct kvm_vcpu *vcpu, enum gacc_mode mode,
  768. union asce asce)
  769. {
  770. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  771. unsigned long override;
  772. if (mode == GACC_FETCH || mode == GACC_IFETCH) {
  773. /* check if fetch protection override enabled */
  774. override = vcpu->arch.sie_block->gcr[0];
  775. override &= CR0_FETCH_PROTECTION_OVERRIDE;
  776. /* not applicable if subject to DAT && private space */
  777. override = override && !(psw_bits(*psw).dat && asce.p);
  778. return override;
  779. }
  780. return false;
  781. }
  782. static bool fetch_prot_override_applies(unsigned long ga, unsigned int len)
  783. {
  784. return ga < 2048 && ga + len <= 2048;
  785. }
  786. static bool storage_prot_override_applicable(struct kvm_vcpu *vcpu)
  787. {
  788. /* check if storage protection override enabled */
  789. return vcpu->arch.sie_block->gcr[0] & CR0_STORAGE_PROTECTION_OVERRIDE;
  790. }
  791. static bool storage_prot_override_applies(u8 access_control)
  792. {
  793. /* matches special storage protection override key (9) -> allow */
  794. return access_control == PAGE_SPO_ACC;
  795. }
  796. static int vcpu_check_access_key(struct kvm_vcpu *vcpu, u8 access_key,
  797. enum gacc_mode mode, union asce asce, gpa_t gpa,
  798. unsigned long ga, unsigned int len)
  799. {
  800. u8 storage_key, access_control;
  801. unsigned long hva;
  802. int r;
  803. /* access key 0 matches any storage key -> allow */
  804. if (access_key == 0)
  805. return 0;
  806. /*
  807. * caller needs to ensure that gfn is accessible, so we can
  808. * assume that this cannot fail
  809. */
  810. hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa));
  811. mmap_read_lock(current->mm);
  812. r = get_guest_storage_key(current->mm, hva, &storage_key);
  813. mmap_read_unlock(current->mm);
  814. if (r)
  815. return r;
  816. access_control = FIELD_GET(_PAGE_ACC_BITS, storage_key);
  817. /* access key matches storage key -> allow */
  818. if (access_control == access_key)
  819. return 0;
  820. if (mode == GACC_FETCH || mode == GACC_IFETCH) {
  821. /* it is a fetch and fetch protection is off -> allow */
  822. if (!(storage_key & _PAGE_FP_BIT))
  823. return 0;
  824. if (fetch_prot_override_applicable(vcpu, mode, asce) &&
  825. fetch_prot_override_applies(ga, len))
  826. return 0;
  827. }
  828. if (storage_prot_override_applicable(vcpu) &&
  829. storage_prot_override_applies(access_control))
  830. return 0;
  831. return PGM_PROTECTION;
  832. }
  833. /**
  834. * guest_range_to_gpas() - Calculate guest physical addresses of page fragments
  835. * covering a logical range
  836. * @vcpu: virtual cpu
  837. * @ga: guest address, start of range
  838. * @ar: access register
  839. * @gpas: output argument, may be NULL
  840. * @len: length of range in bytes
  841. * @asce: address-space-control element to use for translation
  842. * @mode: access mode
  843. * @access_key: access key to mach the range's storage keys against
  844. *
  845. * Translate a logical range to a series of guest absolute addresses,
  846. * such that the concatenation of page fragments starting at each gpa make up
  847. * the whole range.
  848. * The translation is performed as if done by the cpu for the given @asce, @ar,
  849. * @mode and state of the @vcpu.
  850. * If the translation causes an exception, its program interruption code is
  851. * returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
  852. * such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
  853. * a correct exception into the guest.
  854. * The resulting gpas are stored into @gpas, unless it is NULL.
  855. *
  856. * Note: All fragments except the first one start at the beginning of a page.
  857. * When deriving the boundaries of a fragment from a gpa, all but the last
  858. * fragment end at the end of the page.
  859. *
  860. * Return:
  861. * * 0 - success
  862. * * <0 - translation could not be performed, for example if guest
  863. * memory could not be accessed
  864. * * >0 - an access exception occurred. In this case the returned value
  865. * is the program interruption code and the contents of pgm may
  866. * be used to inject an exception into the guest.
  867. */
  868. static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
  869. unsigned long *gpas, unsigned long len,
  870. const union asce asce, enum gacc_mode mode,
  871. u8 access_key)
  872. {
  873. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  874. unsigned int offset = offset_in_page(ga);
  875. unsigned int fragment_len;
  876. int lap_enabled, rc = 0;
  877. enum prot_type prot;
  878. unsigned long gpa;
  879. lap_enabled = low_address_protection_enabled(vcpu, asce);
  880. while (min(PAGE_SIZE - offset, len) > 0) {
  881. fragment_len = min(PAGE_SIZE - offset, len);
  882. ga = kvm_s390_logical_to_effective(vcpu, ga);
  883. if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
  884. return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
  885. PROT_TYPE_LA);
  886. if (psw_bits(*psw).dat) {
  887. rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
  888. if (rc < 0)
  889. return rc;
  890. } else {
  891. gpa = kvm_s390_real_to_abs(vcpu, ga);
  892. if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
  893. rc = PGM_ADDRESSING;
  894. prot = PROT_NONE;
  895. }
  896. }
  897. if (rc)
  898. return trans_exc(vcpu, rc, ga, ar, mode, prot);
  899. rc = vcpu_check_access_key(vcpu, access_key, mode, asce, gpa, ga,
  900. fragment_len);
  901. if (rc)
  902. return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_KEYC);
  903. if (gpas)
  904. *gpas++ = gpa;
  905. offset = 0;
  906. ga += fragment_len;
  907. len -= fragment_len;
  908. }
  909. return 0;
  910. }
  911. static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
  912. void *data, unsigned int len)
  913. {
  914. const unsigned int offset = offset_in_page(gpa);
  915. const gfn_t gfn = gpa_to_gfn(gpa);
  916. int rc;
  917. if (mode == GACC_STORE)
  918. rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
  919. else
  920. rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
  921. return rc;
  922. }
  923. static int
  924. access_guest_page_with_key(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
  925. void *data, unsigned int len, u8 access_key)
  926. {
  927. struct kvm_memory_slot *slot;
  928. bool writable;
  929. gfn_t gfn;
  930. hva_t hva;
  931. int rc;
  932. gfn = gpa >> PAGE_SHIFT;
  933. slot = gfn_to_memslot(kvm, gfn);
  934. hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
  935. if (kvm_is_error_hva(hva))
  936. return PGM_ADDRESSING;
  937. /*
  938. * Check if it's a ro memslot, even tho that can't occur (they're unsupported).
  939. * Don't try to actually handle that case.
  940. */
  941. if (!writable && mode == GACC_STORE)
  942. return -EOPNOTSUPP;
  943. hva += offset_in_page(gpa);
  944. if (mode == GACC_STORE)
  945. rc = copy_to_user_key((void __user *)hva, data, len, access_key);
  946. else
  947. rc = copy_from_user_key(data, (void __user *)hva, len, access_key);
  948. if (rc)
  949. return PGM_PROTECTION;
  950. if (mode == GACC_STORE)
  951. mark_page_dirty_in_slot(kvm, slot, gfn);
  952. return 0;
  953. }
  954. int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
  955. unsigned long len, enum gacc_mode mode, u8 access_key)
  956. {
  957. int offset = offset_in_page(gpa);
  958. int fragment_len;
  959. int rc;
  960. while (min(PAGE_SIZE - offset, len) > 0) {
  961. fragment_len = min(PAGE_SIZE - offset, len);
  962. rc = access_guest_page_with_key(kvm, mode, gpa, data, fragment_len, access_key);
  963. if (rc)
  964. return rc;
  965. offset = 0;
  966. len -= fragment_len;
  967. data += fragment_len;
  968. gpa += fragment_len;
  969. }
  970. return 0;
  971. }
  972. int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
  973. void *data, unsigned long len, enum gacc_mode mode,
  974. u8 access_key)
  975. {
  976. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  977. unsigned long nr_pages, idx;
  978. unsigned long gpa_array[2];
  979. unsigned int fragment_len;
  980. unsigned long *gpas;
  981. enum prot_type prot;
  982. int need_ipte_lock;
  983. union asce asce;
  984. bool try_storage_prot_override;
  985. bool try_fetch_prot_override;
  986. int rc;
  987. if (!len)
  988. return 0;
  989. ga = kvm_s390_logical_to_effective(vcpu, ga);
  990. rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
  991. if (rc)
  992. return rc;
  993. nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
  994. gpas = gpa_array;
  995. if (nr_pages > ARRAY_SIZE(gpa_array))
  996. gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
  997. if (!gpas)
  998. return -ENOMEM;
  999. try_fetch_prot_override = fetch_prot_override_applicable(vcpu, mode, asce);
  1000. try_storage_prot_override = storage_prot_override_applicable(vcpu);
  1001. need_ipte_lock = psw_bits(*psw).dat && !asce.r;
  1002. if (need_ipte_lock)
  1003. ipte_lock(vcpu->kvm);
  1004. /*
  1005. * Since we do the access further down ultimately via a move instruction
  1006. * that does key checking and returns an error in case of a protection
  1007. * violation, we don't need to do the check during address translation.
  1008. * Skip it by passing access key 0, which matches any storage key,
  1009. * obviating the need for any further checks. As a result the check is
  1010. * handled entirely in hardware on access, we only need to take care to
  1011. * forego key protection checking if fetch protection override applies or
  1012. * retry with the special key 9 in case of storage protection override.
  1013. */
  1014. rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode, 0);
  1015. if (rc)
  1016. goto out_unlock;
  1017. for (idx = 0; idx < nr_pages; idx++) {
  1018. fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
  1019. if (try_fetch_prot_override && fetch_prot_override_applies(ga, fragment_len)) {
  1020. rc = access_guest_page(vcpu->kvm, mode, gpas[idx],
  1021. data, fragment_len);
  1022. } else {
  1023. rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
  1024. data, fragment_len, access_key);
  1025. }
  1026. if (rc == PGM_PROTECTION && try_storage_prot_override)
  1027. rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
  1028. data, fragment_len, PAGE_SPO_ACC);
  1029. if (rc)
  1030. break;
  1031. len -= fragment_len;
  1032. data += fragment_len;
  1033. ga = kvm_s390_logical_to_effective(vcpu, ga + fragment_len);
  1034. }
  1035. if (rc > 0) {
  1036. bool terminate = (mode == GACC_STORE) && (idx > 0);
  1037. if (rc == PGM_PROTECTION)
  1038. prot = PROT_TYPE_KEYC;
  1039. else
  1040. prot = PROT_NONE;
  1041. rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
  1042. }
  1043. out_unlock:
  1044. if (need_ipte_lock)
  1045. ipte_unlock(vcpu->kvm);
  1046. if (nr_pages > ARRAY_SIZE(gpa_array))
  1047. vfree(gpas);
  1048. return rc;
  1049. }
  1050. int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
  1051. void *data, unsigned long len, enum gacc_mode mode)
  1052. {
  1053. unsigned int fragment_len;
  1054. unsigned long gpa;
  1055. int rc = 0;
  1056. while (len && !rc) {
  1057. gpa = kvm_s390_real_to_abs(vcpu, gra);
  1058. fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
  1059. rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
  1060. len -= fragment_len;
  1061. gra += fragment_len;
  1062. data += fragment_len;
  1063. }
  1064. return rc;
  1065. }
  1066. /**
  1067. * guest_translate_address_with_key - translate guest logical into guest absolute address
  1068. * @vcpu: virtual cpu
  1069. * @gva: Guest virtual address
  1070. * @ar: Access register
  1071. * @gpa: Guest physical address
  1072. * @mode: Translation access mode
  1073. * @access_key: access key to mach the storage key with
  1074. *
  1075. * Parameter semantics are the same as the ones from guest_translate.
  1076. * The memory contents at the guest address are not changed.
  1077. *
  1078. * Note: The IPTE lock is not taken during this function, so the caller
  1079. * has to take care of this.
  1080. */
  1081. int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
  1082. unsigned long *gpa, enum gacc_mode mode,
  1083. u8 access_key)
  1084. {
  1085. union asce asce;
  1086. int rc;
  1087. gva = kvm_s390_logical_to_effective(vcpu, gva);
  1088. rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
  1089. if (rc)
  1090. return rc;
  1091. return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode,
  1092. access_key);
  1093. }
  1094. /**
  1095. * check_gva_range - test a range of guest virtual addresses for accessibility
  1096. * @vcpu: virtual cpu
  1097. * @gva: Guest virtual address
  1098. * @ar: Access register
  1099. * @length: Length of test range
  1100. * @mode: Translation access mode
  1101. * @access_key: access key to mach the storage keys with
  1102. */
  1103. int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
  1104. unsigned long length, enum gacc_mode mode, u8 access_key)
  1105. {
  1106. union asce asce;
  1107. int rc = 0;
  1108. rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
  1109. if (rc)
  1110. return rc;
  1111. ipte_lock(vcpu->kvm);
  1112. rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode,
  1113. access_key);
  1114. ipte_unlock(vcpu->kvm);
  1115. return rc;
  1116. }
  1117. /**
  1118. * check_gpa_range - test a range of guest physical addresses for accessibility
  1119. * @kvm: virtual machine instance
  1120. * @gpa: guest physical address
  1121. * @length: length of test range
  1122. * @mode: access mode to test, relevant for storage keys
  1123. * @access_key: access key to mach the storage keys with
  1124. */
  1125. int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
  1126. enum gacc_mode mode, u8 access_key)
  1127. {
  1128. unsigned int fragment_len;
  1129. int rc = 0;
  1130. while (length && !rc) {
  1131. fragment_len = min(PAGE_SIZE - offset_in_page(gpa), length);
  1132. rc = vm_check_access_key(kvm, access_key, mode, gpa);
  1133. length -= fragment_len;
  1134. gpa += fragment_len;
  1135. }
  1136. return rc;
  1137. }
  1138. /**
  1139. * kvm_s390_check_low_addr_prot_real - check for low-address protection
  1140. * @vcpu: virtual cpu
  1141. * @gra: Guest real address
  1142. *
  1143. * Checks whether an address is subject to low-address protection and set
  1144. * up vcpu->arch.pgm accordingly if necessary.
  1145. *
  1146. * Return: 0 if no protection exception, or PGM_PROTECTION if protected.
  1147. */
  1148. int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
  1149. {
  1150. union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
  1151. if (!ctlreg0.lap || !is_low_address(gra))
  1152. return 0;
  1153. return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
  1154. }
  1155. /**
  1156. * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
  1157. * @sg: pointer to the shadow guest address space structure
  1158. * @saddr: faulting address in the shadow gmap
  1159. * @pgt: pointer to the beginning of the page table for the given address if
  1160. * successful (return value 0), or to the first invalid DAT entry in
  1161. * case of exceptions (return value > 0)
  1162. * @dat_protection: referenced memory is write protected
  1163. * @fake: pgt references contiguous guest memory block, not a pgtable
  1164. */
  1165. static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
  1166. unsigned long *pgt, int *dat_protection,
  1167. int *fake)
  1168. {
  1169. struct gmap *parent;
  1170. union asce asce;
  1171. union vaddress vaddr;
  1172. unsigned long ptr;
  1173. int rc;
  1174. *fake = 0;
  1175. *dat_protection = 0;
  1176. parent = sg->parent;
  1177. vaddr.addr = saddr;
  1178. asce.val = sg->orig_asce;
  1179. ptr = asce.origin * PAGE_SIZE;
  1180. if (asce.r) {
  1181. *fake = 1;
  1182. ptr = 0;
  1183. asce.dt = ASCE_TYPE_REGION1;
  1184. }
  1185. switch (asce.dt) {
  1186. case ASCE_TYPE_REGION1:
  1187. if (vaddr.rfx01 > asce.tl && !*fake)
  1188. return PGM_REGION_FIRST_TRANS;
  1189. break;
  1190. case ASCE_TYPE_REGION2:
  1191. if (vaddr.rfx)
  1192. return PGM_ASCE_TYPE;
  1193. if (vaddr.rsx01 > asce.tl)
  1194. return PGM_REGION_SECOND_TRANS;
  1195. break;
  1196. case ASCE_TYPE_REGION3:
  1197. if (vaddr.rfx || vaddr.rsx)
  1198. return PGM_ASCE_TYPE;
  1199. if (vaddr.rtx01 > asce.tl)
  1200. return PGM_REGION_THIRD_TRANS;
  1201. break;
  1202. case ASCE_TYPE_SEGMENT:
  1203. if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
  1204. return PGM_ASCE_TYPE;
  1205. if (vaddr.sx01 > asce.tl)
  1206. return PGM_SEGMENT_TRANSLATION;
  1207. break;
  1208. }
  1209. switch (asce.dt) {
  1210. case ASCE_TYPE_REGION1: {
  1211. union region1_table_entry rfte;
  1212. if (*fake) {
  1213. ptr += vaddr.rfx * _REGION1_SIZE;
  1214. rfte.val = ptr;
  1215. goto shadow_r2t;
  1216. }
  1217. *pgt = ptr + vaddr.rfx * 8;
  1218. rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
  1219. if (rc)
  1220. return rc;
  1221. if (rfte.i)
  1222. return PGM_REGION_FIRST_TRANS;
  1223. if (rfte.tt != TABLE_TYPE_REGION1)
  1224. return PGM_TRANSLATION_SPEC;
  1225. if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
  1226. return PGM_REGION_SECOND_TRANS;
  1227. if (sg->edat_level >= 1)
  1228. *dat_protection |= rfte.p;
  1229. ptr = rfte.rto * PAGE_SIZE;
  1230. shadow_r2t:
  1231. rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
  1232. if (rc)
  1233. return rc;
  1234. }
  1235. fallthrough;
  1236. case ASCE_TYPE_REGION2: {
  1237. union region2_table_entry rste;
  1238. if (*fake) {
  1239. ptr += vaddr.rsx * _REGION2_SIZE;
  1240. rste.val = ptr;
  1241. goto shadow_r3t;
  1242. }
  1243. *pgt = ptr + vaddr.rsx * 8;
  1244. rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
  1245. if (rc)
  1246. return rc;
  1247. if (rste.i)
  1248. return PGM_REGION_SECOND_TRANS;
  1249. if (rste.tt != TABLE_TYPE_REGION2)
  1250. return PGM_TRANSLATION_SPEC;
  1251. if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
  1252. return PGM_REGION_THIRD_TRANS;
  1253. if (sg->edat_level >= 1)
  1254. *dat_protection |= rste.p;
  1255. ptr = rste.rto * PAGE_SIZE;
  1256. shadow_r3t:
  1257. rste.p |= *dat_protection;
  1258. rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
  1259. if (rc)
  1260. return rc;
  1261. }
  1262. fallthrough;
  1263. case ASCE_TYPE_REGION3: {
  1264. union region3_table_entry rtte;
  1265. if (*fake) {
  1266. ptr += vaddr.rtx * _REGION3_SIZE;
  1267. rtte.val = ptr;
  1268. goto shadow_sgt;
  1269. }
  1270. *pgt = ptr + vaddr.rtx * 8;
  1271. rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
  1272. if (rc)
  1273. return rc;
  1274. if (rtte.i)
  1275. return PGM_REGION_THIRD_TRANS;
  1276. if (rtte.tt != TABLE_TYPE_REGION3)
  1277. return PGM_TRANSLATION_SPEC;
  1278. if (rtte.cr && asce.p && sg->edat_level >= 2)
  1279. return PGM_TRANSLATION_SPEC;
  1280. if (rtte.fc && sg->edat_level >= 2) {
  1281. *dat_protection |= rtte.fc0.p;
  1282. *fake = 1;
  1283. ptr = rtte.fc1.rfaa * _REGION3_SIZE;
  1284. rtte.val = ptr;
  1285. goto shadow_sgt;
  1286. }
  1287. if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
  1288. return PGM_SEGMENT_TRANSLATION;
  1289. if (sg->edat_level >= 1)
  1290. *dat_protection |= rtte.fc0.p;
  1291. ptr = rtte.fc0.sto * PAGE_SIZE;
  1292. shadow_sgt:
  1293. rtte.fc0.p |= *dat_protection;
  1294. rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
  1295. if (rc)
  1296. return rc;
  1297. }
  1298. fallthrough;
  1299. case ASCE_TYPE_SEGMENT: {
  1300. union segment_table_entry ste;
  1301. if (*fake) {
  1302. ptr += vaddr.sx * _SEGMENT_SIZE;
  1303. ste.val = ptr;
  1304. goto shadow_pgt;
  1305. }
  1306. *pgt = ptr + vaddr.sx * 8;
  1307. rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
  1308. if (rc)
  1309. return rc;
  1310. if (ste.i)
  1311. return PGM_SEGMENT_TRANSLATION;
  1312. if (ste.tt != TABLE_TYPE_SEGMENT)
  1313. return PGM_TRANSLATION_SPEC;
  1314. if (ste.cs && asce.p)
  1315. return PGM_TRANSLATION_SPEC;
  1316. *dat_protection |= ste.fc0.p;
  1317. if (ste.fc && sg->edat_level >= 1) {
  1318. *fake = 1;
  1319. ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
  1320. ste.val = ptr;
  1321. goto shadow_pgt;
  1322. }
  1323. ptr = ste.fc0.pto * (PAGE_SIZE / 2);
  1324. shadow_pgt:
  1325. ste.fc0.p |= *dat_protection;
  1326. rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
  1327. if (rc)
  1328. return rc;
  1329. }
  1330. }
  1331. /* Return the parent address of the page table */
  1332. *pgt = ptr;
  1333. return 0;
  1334. }
  1335. /**
  1336. * kvm_s390_shadow_fault - handle fault on a shadow page table
  1337. * @vcpu: virtual cpu
  1338. * @sg: pointer to the shadow guest address space structure
  1339. * @saddr: faulting address in the shadow gmap
  1340. * @datptr: will contain the address of the faulting DAT table entry, or of
  1341. * the valid leaf, plus some flags
  1342. *
  1343. * Returns: - 0 if the shadow fault was successfully resolved
  1344. * - > 0 (pgm exception code) on exceptions while faulting
  1345. * - -EAGAIN if the caller can retry immediately
  1346. * - -EFAULT when accessing invalid guest addresses
  1347. * - -ENOMEM if out of memory
  1348. */
  1349. int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
  1350. unsigned long saddr, unsigned long *datptr)
  1351. {
  1352. union vaddress vaddr;
  1353. union page_table_entry pte;
  1354. unsigned long pgt = 0;
  1355. int dat_protection, fake;
  1356. int rc;
  1357. mmap_read_lock(sg->mm);
  1358. /*
  1359. * We don't want any guest-2 tables to change - so the parent
  1360. * tables/pointers we read stay valid - unshadowing is however
  1361. * always possible - only guest_table_lock protects us.
  1362. */
  1363. ipte_lock(vcpu->kvm);
  1364. rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
  1365. if (rc)
  1366. rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
  1367. &fake);
  1368. vaddr.addr = saddr;
  1369. if (fake) {
  1370. pte.val = pgt + vaddr.px * PAGE_SIZE;
  1371. goto shadow_page;
  1372. }
  1373. switch (rc) {
  1374. case PGM_SEGMENT_TRANSLATION:
  1375. case PGM_REGION_THIRD_TRANS:
  1376. case PGM_REGION_SECOND_TRANS:
  1377. case PGM_REGION_FIRST_TRANS:
  1378. pgt |= PEI_NOT_PTE;
  1379. break;
  1380. case 0:
  1381. pgt += vaddr.px * 8;
  1382. rc = gmap_read_table(sg->parent, pgt, &pte.val);
  1383. }
  1384. if (datptr)
  1385. *datptr = pgt | dat_protection * PEI_DAT_PROT;
  1386. if (!rc && pte.i)
  1387. rc = PGM_PAGE_TRANSLATION;
  1388. if (!rc && pte.z)
  1389. rc = PGM_TRANSLATION_SPEC;
  1390. shadow_page:
  1391. pte.p |= dat_protection;
  1392. if (!rc)
  1393. rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
  1394. ipte_unlock(vcpu->kvm);
  1395. mmap_read_unlock(sg->mm);
  1396. return rc;
  1397. }