sthyi.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * store hypervisor information instruction emulation functions.
  4. *
  5. * Copyright IBM Corp. 2016
  6. * Author(s): Janosch Frank <[email protected]>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/syscalls.h>
  12. #include <linux/mutex.h>
  13. #include <asm/asm-offsets.h>
  14. #include <asm/sclp.h>
  15. #include <asm/diag.h>
  16. #include <asm/sysinfo.h>
  17. #include <asm/ebcdic.h>
  18. #include <asm/facility.h>
  19. #include <asm/sthyi.h>
  20. #include "entry.h"
  21. #define DED_WEIGHT 0xffff
  22. /*
  23. * CP and IFL as EBCDIC strings, SP/0x40 determines the end of string
  24. * as they are justified with spaces.
  25. */
  26. #define CP 0xc3d7404040404040UL
  27. #define IFL 0xc9c6d34040404040UL
  28. enum hdr_flags {
  29. HDR_NOT_LPAR = 0x10,
  30. HDR_STACK_INCM = 0x20,
  31. HDR_STSI_UNAV = 0x40,
  32. HDR_PERF_UNAV = 0x80,
  33. };
  34. enum mac_validity {
  35. MAC_NAME_VLD = 0x20,
  36. MAC_ID_VLD = 0x40,
  37. MAC_CNT_VLD = 0x80,
  38. };
  39. enum par_flag {
  40. PAR_MT_EN = 0x80,
  41. };
  42. enum par_validity {
  43. PAR_GRP_VLD = 0x08,
  44. PAR_ID_VLD = 0x10,
  45. PAR_ABS_VLD = 0x20,
  46. PAR_WGHT_VLD = 0x40,
  47. PAR_PCNT_VLD = 0x80,
  48. };
  49. struct hdr_sctn {
  50. u8 infhflg1;
  51. u8 infhflg2; /* reserved */
  52. u8 infhval1; /* reserved */
  53. u8 infhval2; /* reserved */
  54. u8 reserved[3];
  55. u8 infhygct;
  56. u16 infhtotl;
  57. u16 infhdln;
  58. u16 infmoff;
  59. u16 infmlen;
  60. u16 infpoff;
  61. u16 infplen;
  62. u16 infhoff1;
  63. u16 infhlen1;
  64. u16 infgoff1;
  65. u16 infglen1;
  66. u16 infhoff2;
  67. u16 infhlen2;
  68. u16 infgoff2;
  69. u16 infglen2;
  70. u16 infhoff3;
  71. u16 infhlen3;
  72. u16 infgoff3;
  73. u16 infglen3;
  74. u8 reserved2[4];
  75. } __packed;
  76. struct mac_sctn {
  77. u8 infmflg1; /* reserved */
  78. u8 infmflg2; /* reserved */
  79. u8 infmval1;
  80. u8 infmval2; /* reserved */
  81. u16 infmscps;
  82. u16 infmdcps;
  83. u16 infmsifl;
  84. u16 infmdifl;
  85. char infmname[8];
  86. char infmtype[4];
  87. char infmmanu[16];
  88. char infmseq[16];
  89. char infmpman[4];
  90. u8 reserved[4];
  91. } __packed;
  92. struct par_sctn {
  93. u8 infpflg1;
  94. u8 infpflg2; /* reserved */
  95. u8 infpval1;
  96. u8 infpval2; /* reserved */
  97. u16 infppnum;
  98. u16 infpscps;
  99. u16 infpdcps;
  100. u16 infpsifl;
  101. u16 infpdifl;
  102. u16 reserved;
  103. char infppnam[8];
  104. u32 infpwbcp;
  105. u32 infpabcp;
  106. u32 infpwbif;
  107. u32 infpabif;
  108. char infplgnm[8];
  109. u32 infplgcp;
  110. u32 infplgif;
  111. } __packed;
  112. struct sthyi_sctns {
  113. struct hdr_sctn hdr;
  114. struct mac_sctn mac;
  115. struct par_sctn par;
  116. } __packed;
  117. struct cpu_inf {
  118. u64 lpar_cap;
  119. u64 lpar_grp_cap;
  120. u64 lpar_weight;
  121. u64 all_weight;
  122. int cpu_num_ded;
  123. int cpu_num_shd;
  124. };
  125. struct lpar_cpu_inf {
  126. struct cpu_inf cp;
  127. struct cpu_inf ifl;
  128. };
  129. /*
  130. * STHYI requires extensive locking in the higher hypervisors
  131. * and is very computational/memory expensive. Therefore we
  132. * cache the retrieved data whose valid period is 1s.
  133. */
  134. #define CACHE_VALID_JIFFIES HZ
  135. struct sthyi_info {
  136. void *info;
  137. unsigned long end;
  138. };
  139. static DEFINE_MUTEX(sthyi_mutex);
  140. static struct sthyi_info sthyi_cache;
  141. static inline u64 cpu_id(u8 ctidx, void *diag224_buf)
  142. {
  143. return *((u64 *)(diag224_buf + (ctidx + 1) * DIAG204_CPU_NAME_LEN));
  144. }
  145. /*
  146. * Scales the cpu capping from the lpar range to the one expected in
  147. * sthyi data.
  148. *
  149. * diag204 reports a cap in hundredths of processor units.
  150. * z/VM's range for one core is 0 - 0x10000.
  151. */
  152. static u32 scale_cap(u32 in)
  153. {
  154. return (0x10000 * in) / 100;
  155. }
  156. static void fill_hdr(struct sthyi_sctns *sctns)
  157. {
  158. sctns->hdr.infhdln = sizeof(sctns->hdr);
  159. sctns->hdr.infmoff = sizeof(sctns->hdr);
  160. sctns->hdr.infmlen = sizeof(sctns->mac);
  161. sctns->hdr.infplen = sizeof(sctns->par);
  162. sctns->hdr.infpoff = sctns->hdr.infhdln + sctns->hdr.infmlen;
  163. sctns->hdr.infhtotl = sctns->hdr.infpoff + sctns->hdr.infplen;
  164. }
  165. static void fill_stsi_mac(struct sthyi_sctns *sctns,
  166. struct sysinfo_1_1_1 *sysinfo)
  167. {
  168. sclp_ocf_cpc_name_copy(sctns->mac.infmname);
  169. if (*(u64 *)sctns->mac.infmname != 0)
  170. sctns->mac.infmval1 |= MAC_NAME_VLD;
  171. if (stsi(sysinfo, 1, 1, 1))
  172. return;
  173. memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype));
  174. memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu));
  175. memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman));
  176. memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq));
  177. sctns->mac.infmval1 |= MAC_ID_VLD;
  178. }
  179. static void fill_stsi_par(struct sthyi_sctns *sctns,
  180. struct sysinfo_2_2_2 *sysinfo)
  181. {
  182. if (stsi(sysinfo, 2, 2, 2))
  183. return;
  184. sctns->par.infppnum = sysinfo->lpar_number;
  185. memcpy(sctns->par.infppnam, sysinfo->name, sizeof(sctns->par.infppnam));
  186. sctns->par.infpval1 |= PAR_ID_VLD;
  187. }
  188. static void fill_stsi(struct sthyi_sctns *sctns)
  189. {
  190. void *sysinfo;
  191. /* Errors are handled through the validity bits in the response. */
  192. sysinfo = (void *)__get_free_page(GFP_KERNEL);
  193. if (!sysinfo)
  194. return;
  195. fill_stsi_mac(sctns, sysinfo);
  196. fill_stsi_par(sctns, sysinfo);
  197. free_pages((unsigned long)sysinfo, 0);
  198. }
  199. static void fill_diag_mac(struct sthyi_sctns *sctns,
  200. struct diag204_x_phys_block *block,
  201. void *diag224_buf)
  202. {
  203. int i;
  204. for (i = 0; i < block->hdr.cpus; i++) {
  205. switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
  206. case CP:
  207. if (block->cpus[i].weight == DED_WEIGHT)
  208. sctns->mac.infmdcps++;
  209. else
  210. sctns->mac.infmscps++;
  211. break;
  212. case IFL:
  213. if (block->cpus[i].weight == DED_WEIGHT)
  214. sctns->mac.infmdifl++;
  215. else
  216. sctns->mac.infmsifl++;
  217. break;
  218. }
  219. }
  220. sctns->mac.infmval1 |= MAC_CNT_VLD;
  221. }
  222. /* Returns a pointer to the the next partition block. */
  223. static struct diag204_x_part_block *lpar_cpu_inf(struct lpar_cpu_inf *part_inf,
  224. bool this_lpar,
  225. void *diag224_buf,
  226. struct diag204_x_part_block *block)
  227. {
  228. int i, capped = 0, weight_cp = 0, weight_ifl = 0;
  229. struct cpu_inf *cpu_inf;
  230. for (i = 0; i < block->hdr.rcpus; i++) {
  231. if (!(block->cpus[i].cflag & DIAG204_CPU_ONLINE))
  232. continue;
  233. switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
  234. case CP:
  235. cpu_inf = &part_inf->cp;
  236. if (block->cpus[i].cur_weight < DED_WEIGHT)
  237. weight_cp |= block->cpus[i].cur_weight;
  238. break;
  239. case IFL:
  240. cpu_inf = &part_inf->ifl;
  241. if (block->cpus[i].cur_weight < DED_WEIGHT)
  242. weight_ifl |= block->cpus[i].cur_weight;
  243. break;
  244. default:
  245. continue;
  246. }
  247. if (!this_lpar)
  248. continue;
  249. capped |= block->cpus[i].cflag & DIAG204_CPU_CAPPED;
  250. cpu_inf->lpar_cap |= block->cpus[i].cpu_type_cap;
  251. cpu_inf->lpar_grp_cap |= block->cpus[i].group_cpu_type_cap;
  252. if (block->cpus[i].weight == DED_WEIGHT)
  253. cpu_inf->cpu_num_ded += 1;
  254. else
  255. cpu_inf->cpu_num_shd += 1;
  256. }
  257. if (this_lpar && capped) {
  258. part_inf->cp.lpar_weight = weight_cp;
  259. part_inf->ifl.lpar_weight = weight_ifl;
  260. }
  261. part_inf->cp.all_weight += weight_cp;
  262. part_inf->ifl.all_weight += weight_ifl;
  263. return (struct diag204_x_part_block *)&block->cpus[i];
  264. }
  265. static void fill_diag(struct sthyi_sctns *sctns)
  266. {
  267. int i, r, pages;
  268. bool this_lpar;
  269. void *diag204_buf;
  270. void *diag224_buf = NULL;
  271. struct diag204_x_info_blk_hdr *ti_hdr;
  272. struct diag204_x_part_block *part_block;
  273. struct diag204_x_phys_block *phys_block;
  274. struct lpar_cpu_inf lpar_inf = {};
  275. /* Errors are handled through the validity bits in the response. */
  276. pages = diag204((unsigned long)DIAG204_SUBC_RSI |
  277. (unsigned long)DIAG204_INFO_EXT, 0, NULL);
  278. if (pages <= 0)
  279. return;
  280. diag204_buf = vmalloc(array_size(pages, PAGE_SIZE));
  281. if (!diag204_buf)
  282. return;
  283. r = diag204((unsigned long)DIAG204_SUBC_STIB7 |
  284. (unsigned long)DIAG204_INFO_EXT, pages, diag204_buf);
  285. if (r < 0)
  286. goto out;
  287. diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
  288. if (!diag224_buf || diag224(diag224_buf))
  289. goto out;
  290. ti_hdr = diag204_buf;
  291. part_block = diag204_buf + sizeof(*ti_hdr);
  292. for (i = 0; i < ti_hdr->npar; i++) {
  293. /*
  294. * For the calling lpar we also need to get the cpu
  295. * caps and weights. The time information block header
  296. * specifies the offset to the partition block of the
  297. * caller lpar, so we know when we process its data.
  298. */
  299. this_lpar = (void *)part_block - diag204_buf == ti_hdr->this_part;
  300. part_block = lpar_cpu_inf(&lpar_inf, this_lpar, diag224_buf,
  301. part_block);
  302. }
  303. phys_block = (struct diag204_x_phys_block *)part_block;
  304. part_block = diag204_buf + ti_hdr->this_part;
  305. if (part_block->hdr.mtid)
  306. sctns->par.infpflg1 = PAR_MT_EN;
  307. sctns->par.infpval1 |= PAR_GRP_VLD;
  308. sctns->par.infplgcp = scale_cap(lpar_inf.cp.lpar_grp_cap);
  309. sctns->par.infplgif = scale_cap(lpar_inf.ifl.lpar_grp_cap);
  310. memcpy(sctns->par.infplgnm, part_block->hdr.hardware_group_name,
  311. sizeof(sctns->par.infplgnm));
  312. sctns->par.infpscps = lpar_inf.cp.cpu_num_shd;
  313. sctns->par.infpdcps = lpar_inf.cp.cpu_num_ded;
  314. sctns->par.infpsifl = lpar_inf.ifl.cpu_num_shd;
  315. sctns->par.infpdifl = lpar_inf.ifl.cpu_num_ded;
  316. sctns->par.infpval1 |= PAR_PCNT_VLD;
  317. sctns->par.infpabcp = scale_cap(lpar_inf.cp.lpar_cap);
  318. sctns->par.infpabif = scale_cap(lpar_inf.ifl.lpar_cap);
  319. sctns->par.infpval1 |= PAR_ABS_VLD;
  320. /*
  321. * Everything below needs global performance data to be
  322. * meaningful.
  323. */
  324. if (!(ti_hdr->flags & DIAG204_LPAR_PHYS_FLG)) {
  325. sctns->hdr.infhflg1 |= HDR_PERF_UNAV;
  326. goto out;
  327. }
  328. fill_diag_mac(sctns, phys_block, diag224_buf);
  329. if (lpar_inf.cp.lpar_weight) {
  330. sctns->par.infpwbcp = sctns->mac.infmscps * 0x10000 *
  331. lpar_inf.cp.lpar_weight / lpar_inf.cp.all_weight;
  332. }
  333. if (lpar_inf.ifl.lpar_weight) {
  334. sctns->par.infpwbif = sctns->mac.infmsifl * 0x10000 *
  335. lpar_inf.ifl.lpar_weight / lpar_inf.ifl.all_weight;
  336. }
  337. sctns->par.infpval1 |= PAR_WGHT_VLD;
  338. out:
  339. free_page((unsigned long)diag224_buf);
  340. vfree(diag204_buf);
  341. }
  342. static int sthyi(u64 vaddr, u64 *rc)
  343. {
  344. union register_pair r1 = { .even = 0, }; /* subcode */
  345. union register_pair r2 = { .even = vaddr, };
  346. int cc;
  347. asm volatile(
  348. ".insn rre,0xB2560000,%[r1],%[r2]\n"
  349. "ipm %[cc]\n"
  350. "srl %[cc],28\n"
  351. : [cc] "=&d" (cc), [r2] "+&d" (r2.pair)
  352. : [r1] "d" (r1.pair)
  353. : "memory", "cc");
  354. *rc = r2.odd;
  355. return cc;
  356. }
  357. static int fill_dst(void *dst, u64 *rc)
  358. {
  359. struct sthyi_sctns *sctns = (struct sthyi_sctns *)dst;
  360. /*
  361. * If the facility is on, we don't want to emulate the instruction.
  362. * We ask the hypervisor to provide the data.
  363. */
  364. if (test_facility(74))
  365. return sthyi((u64)dst, rc);
  366. fill_hdr(sctns);
  367. fill_stsi(sctns);
  368. fill_diag(sctns);
  369. *rc = 0;
  370. return 0;
  371. }
  372. static int sthyi_init_cache(void)
  373. {
  374. if (sthyi_cache.info)
  375. return 0;
  376. sthyi_cache.info = (void *)get_zeroed_page(GFP_KERNEL);
  377. if (!sthyi_cache.info)
  378. return -ENOMEM;
  379. sthyi_cache.end = jiffies - 1; /* expired */
  380. return 0;
  381. }
  382. static int sthyi_update_cache(u64 *rc)
  383. {
  384. int r;
  385. memset(sthyi_cache.info, 0, PAGE_SIZE);
  386. r = fill_dst(sthyi_cache.info, rc);
  387. if (r)
  388. return r;
  389. sthyi_cache.end = jiffies + CACHE_VALID_JIFFIES;
  390. return r;
  391. }
  392. /*
  393. * sthyi_fill - Fill page with data returned by the STHYI instruction
  394. *
  395. * @dst: Pointer to zeroed page
  396. * @rc: Pointer for storing the return code of the instruction
  397. *
  398. * Fills the destination with system information returned by the STHYI
  399. * instruction. The data is generated by emulation or execution of STHYI,
  400. * if available. The return value is either a negative error value or
  401. * the condition code that would be returned, the rc parameter is the
  402. * return code which is passed in register R2 + 1.
  403. */
  404. int sthyi_fill(void *dst, u64 *rc)
  405. {
  406. int r;
  407. mutex_lock(&sthyi_mutex);
  408. r = sthyi_init_cache();
  409. if (r)
  410. goto out;
  411. if (time_is_before_jiffies(sthyi_cache.end)) {
  412. /* cache expired */
  413. r = sthyi_update_cache(rc);
  414. if (r)
  415. goto out;
  416. }
  417. *rc = 0;
  418. memcpy(dst, sthyi_cache.info, PAGE_SIZE);
  419. out:
  420. mutex_unlock(&sthyi_mutex);
  421. return r;
  422. }
  423. EXPORT_SYMBOL_GPL(sthyi_fill);
  424. SYSCALL_DEFINE4(s390_sthyi, unsigned long, function_code, void __user *, buffer,
  425. u64 __user *, return_code, unsigned long, flags)
  426. {
  427. u64 sthyi_rc;
  428. void *info;
  429. int r;
  430. if (flags)
  431. return -EINVAL;
  432. if (function_code != STHYI_FC_CP_IFL_CAP)
  433. return -EOPNOTSUPP;
  434. info = (void *)get_zeroed_page(GFP_KERNEL);
  435. if (!info)
  436. return -ENOMEM;
  437. r = sthyi_fill(info, &sthyi_rc);
  438. if (r < 0)
  439. goto out;
  440. if (return_code && put_user(sthyi_rc, return_code)) {
  441. r = -EFAULT;
  442. goto out;
  443. }
  444. if (copy_to_user(buffer, info, PAGE_SIZE))
  445. r = -EFAULT;
  446. out:
  447. free_page((unsigned long)info);
  448. return r;
  449. }