uv.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Common Ultravisor functions and initialization
  4. *
  5. * Copyright IBM Corp. 2019, 2020
  6. */
  7. #define KMSG_COMPONENT "prot_virt"
  8. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/types.h>
  11. #include <linux/sizes.h>
  12. #include <linux/bitmap.h>
  13. #include <linux/memblock.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/swap.h>
  16. #include <asm/facility.h>
  17. #include <asm/sections.h>
  18. #include <asm/uv.h>
  19. /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
  20. #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
  21. int __bootdata_preserved(prot_virt_guest);
  22. #endif
  23. struct uv_info __bootdata_preserved(uv_info);
  24. #if IS_ENABLED(CONFIG_KVM)
  25. int __bootdata_preserved(prot_virt_host);
  26. EXPORT_SYMBOL(prot_virt_host);
  27. EXPORT_SYMBOL(uv_info);
  28. static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
  29. {
  30. struct uv_cb_init uvcb = {
  31. .header.cmd = UVC_CMD_INIT_UV,
  32. .header.len = sizeof(uvcb),
  33. .stor_origin = stor_base,
  34. .stor_len = stor_len,
  35. };
  36. if (uv_call(0, (uint64_t)&uvcb)) {
  37. pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
  38. uvcb.header.rc, uvcb.header.rrc);
  39. return -1;
  40. }
  41. return 0;
  42. }
  43. void __init setup_uv(void)
  44. {
  45. void *uv_stor_base;
  46. if (!is_prot_virt_host())
  47. return;
  48. uv_stor_base = memblock_alloc_try_nid(
  49. uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
  50. MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
  51. if (!uv_stor_base) {
  52. pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
  53. uv_info.uv_base_stor_len);
  54. goto fail;
  55. }
  56. if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
  57. memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
  58. goto fail;
  59. }
  60. pr_info("Reserving %luMB as ultravisor base storage\n",
  61. uv_info.uv_base_stor_len >> 20);
  62. return;
  63. fail:
  64. pr_info("Disabling support for protected virtualization");
  65. prot_virt_host = 0;
  66. }
  67. /*
  68. * Requests the Ultravisor to pin the page in the shared state. This will
  69. * cause an intercept when the guest attempts to unshare the pinned page.
  70. */
  71. static int uv_pin_shared(unsigned long paddr)
  72. {
  73. struct uv_cb_cfs uvcb = {
  74. .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
  75. .header.len = sizeof(uvcb),
  76. .paddr = paddr,
  77. };
  78. if (uv_call(0, (u64)&uvcb))
  79. return -EINVAL;
  80. return 0;
  81. }
  82. /*
  83. * Requests the Ultravisor to destroy a guest page and make it
  84. * accessible to the host. The destroy clears the page instead of
  85. * exporting.
  86. *
  87. * @paddr: Absolute host address of page to be destroyed
  88. */
  89. static int uv_destroy_page(unsigned long paddr)
  90. {
  91. struct uv_cb_cfs uvcb = {
  92. .header.cmd = UVC_CMD_DESTR_SEC_STOR,
  93. .header.len = sizeof(uvcb),
  94. .paddr = paddr
  95. };
  96. if (uv_call(0, (u64)&uvcb)) {
  97. /*
  98. * Older firmware uses 107/d as an indication of a non secure
  99. * page. Let us emulate the newer variant (no-op).
  100. */
  101. if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
  102. return 0;
  103. return -EINVAL;
  104. }
  105. return 0;
  106. }
  107. /*
  108. * The caller must already hold a reference to the page
  109. */
  110. int uv_destroy_owned_page(unsigned long paddr)
  111. {
  112. struct page *page = phys_to_page(paddr);
  113. int rc;
  114. get_page(page);
  115. rc = uv_destroy_page(paddr);
  116. if (!rc)
  117. clear_bit(PG_arch_1, &page->flags);
  118. put_page(page);
  119. return rc;
  120. }
  121. /*
  122. * Requests the Ultravisor to encrypt a guest page and make it
  123. * accessible to the host for paging (export).
  124. *
  125. * @paddr: Absolute host address of page to be exported
  126. */
  127. int uv_convert_from_secure(unsigned long paddr)
  128. {
  129. struct uv_cb_cfs uvcb = {
  130. .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
  131. .header.len = sizeof(uvcb),
  132. .paddr = paddr
  133. };
  134. if (uv_call(0, (u64)&uvcb))
  135. return -EINVAL;
  136. return 0;
  137. }
  138. /*
  139. * The caller must already hold a reference to the page
  140. */
  141. int uv_convert_owned_from_secure(unsigned long paddr)
  142. {
  143. struct page *page = phys_to_page(paddr);
  144. int rc;
  145. get_page(page);
  146. rc = uv_convert_from_secure(paddr);
  147. if (!rc)
  148. clear_bit(PG_arch_1, &page->flags);
  149. put_page(page);
  150. return rc;
  151. }
  152. /*
  153. * Calculate the expected ref_count for a page that would otherwise have no
  154. * further pins. This was cribbed from similar functions in other places in
  155. * the kernel, but with some slight modifications. We know that a secure
  156. * page can not be a huge page for example.
  157. */
  158. static int expected_page_refs(struct page *page)
  159. {
  160. int res;
  161. res = page_mapcount(page);
  162. if (PageSwapCache(page)) {
  163. res++;
  164. } else if (page_mapping(page)) {
  165. res++;
  166. if (page_has_private(page))
  167. res++;
  168. }
  169. return res;
  170. }
  171. static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
  172. {
  173. int expected, cc = 0;
  174. if (PageWriteback(page))
  175. return -EAGAIN;
  176. expected = expected_page_refs(page);
  177. if (!page_ref_freeze(page, expected))
  178. return -EBUSY;
  179. set_bit(PG_arch_1, &page->flags);
  180. /*
  181. * If the UVC does not succeed or fail immediately, we don't want to
  182. * loop for long, or we might get stall notifications.
  183. * On the other hand, this is a complex scenario and we are holding a lot of
  184. * locks, so we can't easily sleep and reschedule. We try only once,
  185. * and if the UVC returned busy or partial completion, we return
  186. * -EAGAIN and we let the callers deal with it.
  187. */
  188. cc = __uv_call(0, (u64)uvcb);
  189. page_ref_unfreeze(page, expected);
  190. /*
  191. * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
  192. * If busy or partially completed, return -EAGAIN.
  193. */
  194. if (cc == UVC_CC_OK)
  195. return 0;
  196. else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
  197. return -EAGAIN;
  198. return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
  199. }
  200. /**
  201. * should_export_before_import - Determine whether an export is needed
  202. * before an import-like operation
  203. * @uvcb: the Ultravisor control block of the UVC to be performed
  204. * @mm: the mm of the process
  205. *
  206. * Returns whether an export is needed before every import-like operation.
  207. * This is needed for shared pages, which don't trigger a secure storage
  208. * exception when accessed from a different guest.
  209. *
  210. * Although considered as one, the Unpin Page UVC is not an actual import,
  211. * so it is not affected.
  212. *
  213. * No export is needed also when there is only one protected VM, because the
  214. * page cannot belong to the wrong VM in that case (there is no "other VM"
  215. * it can belong to).
  216. *
  217. * Return: true if an export is needed before every import, otherwise false.
  218. */
  219. static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
  220. {
  221. if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
  222. return false;
  223. return atomic_read(&mm->context.protected_count) > 1;
  224. }
  225. /*
  226. * Requests the Ultravisor to make a page accessible to a guest.
  227. * If it's brought in the first time, it will be cleared. If
  228. * it has been exported before, it will be decrypted and integrity
  229. * checked.
  230. */
  231. int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
  232. {
  233. struct vm_area_struct *vma;
  234. bool local_drain = false;
  235. spinlock_t *ptelock;
  236. unsigned long uaddr;
  237. struct page *page;
  238. pte_t *ptep;
  239. int rc;
  240. again:
  241. rc = -EFAULT;
  242. mmap_read_lock(gmap->mm);
  243. uaddr = __gmap_translate(gmap, gaddr);
  244. if (IS_ERR_VALUE(uaddr))
  245. goto out;
  246. vma = vma_lookup(gmap->mm, uaddr);
  247. if (!vma)
  248. goto out;
  249. /*
  250. * Secure pages cannot be huge and userspace should not combine both.
  251. * In case userspace does it anyway this will result in an -EFAULT for
  252. * the unpack. The guest is thus never reaching secure mode. If
  253. * userspace is playing dirty tricky with mapping huge pages later
  254. * on this will result in a segmentation fault.
  255. */
  256. if (is_vm_hugetlb_page(vma))
  257. goto out;
  258. rc = -ENXIO;
  259. ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
  260. if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
  261. page = pte_page(*ptep);
  262. rc = -EAGAIN;
  263. if (trylock_page(page)) {
  264. if (should_export_before_import(uvcb, gmap->mm))
  265. uv_convert_from_secure(page_to_phys(page));
  266. rc = make_page_secure(page, uvcb);
  267. unlock_page(page);
  268. }
  269. }
  270. pte_unmap_unlock(ptep, ptelock);
  271. out:
  272. mmap_read_unlock(gmap->mm);
  273. if (rc == -EAGAIN) {
  274. /*
  275. * If we are here because the UVC returned busy or partial
  276. * completion, this is just a useless check, but it is safe.
  277. */
  278. wait_on_page_writeback(page);
  279. } else if (rc == -EBUSY) {
  280. /*
  281. * If we have tried a local drain and the page refcount
  282. * still does not match our expected safe value, try with a
  283. * system wide drain. This is needed if the pagevecs holding
  284. * the page are on a different CPU.
  285. */
  286. if (local_drain) {
  287. lru_add_drain_all();
  288. /* We give up here, and let the caller try again */
  289. return -EAGAIN;
  290. }
  291. /*
  292. * We are here if the page refcount does not match the
  293. * expected safe value. The main culprits are usually
  294. * pagevecs. With lru_add_drain() we drain the pagevecs
  295. * on the local CPU so that hopefully the refcount will
  296. * reach the expected safe value.
  297. */
  298. lru_add_drain();
  299. local_drain = true;
  300. /* And now we try again immediately after draining */
  301. goto again;
  302. } else if (rc == -ENXIO) {
  303. if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
  304. return -EFAULT;
  305. return -EAGAIN;
  306. }
  307. return rc;
  308. }
  309. EXPORT_SYMBOL_GPL(gmap_make_secure);
  310. int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
  311. {
  312. struct uv_cb_cts uvcb = {
  313. .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
  314. .header.len = sizeof(uvcb),
  315. .guest_handle = gmap->guest_handle,
  316. .gaddr = gaddr,
  317. };
  318. return gmap_make_secure(gmap, gaddr, &uvcb);
  319. }
  320. EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
  321. /**
  322. * gmap_destroy_page - Destroy a guest page.
  323. * @gmap: the gmap of the guest
  324. * @gaddr: the guest address to destroy
  325. *
  326. * An attempt will be made to destroy the given guest page. If the attempt
  327. * fails, an attempt is made to export the page. If both attempts fail, an
  328. * appropriate error is returned.
  329. */
  330. int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
  331. {
  332. struct vm_area_struct *vma;
  333. unsigned long uaddr;
  334. struct page *page;
  335. int rc;
  336. rc = -EFAULT;
  337. mmap_read_lock(gmap->mm);
  338. uaddr = __gmap_translate(gmap, gaddr);
  339. if (IS_ERR_VALUE(uaddr))
  340. goto out;
  341. vma = vma_lookup(gmap->mm, uaddr);
  342. if (!vma)
  343. goto out;
  344. /*
  345. * Huge pages should not be able to become secure
  346. */
  347. if (is_vm_hugetlb_page(vma))
  348. goto out;
  349. rc = 0;
  350. /* we take an extra reference here */
  351. page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
  352. if (IS_ERR_OR_NULL(page))
  353. goto out;
  354. rc = uv_destroy_owned_page(page_to_phys(page));
  355. /*
  356. * Fault handlers can race; it is possible that two CPUs will fault
  357. * on the same secure page. One CPU can destroy the page, reboot,
  358. * re-enter secure mode and import it, while the second CPU was
  359. * stuck at the beginning of the handler. At some point the second
  360. * CPU will be able to progress, and it will not be able to destroy
  361. * the page. In that case we do not want to terminate the process,
  362. * we instead try to export the page.
  363. */
  364. if (rc)
  365. rc = uv_convert_owned_from_secure(page_to_phys(page));
  366. put_page(page);
  367. out:
  368. mmap_read_unlock(gmap->mm);
  369. return rc;
  370. }
  371. EXPORT_SYMBOL_GPL(gmap_destroy_page);
  372. /*
  373. * To be called with the page locked or with an extra reference! This will
  374. * prevent gmap_make_secure from touching the page concurrently. Having 2
  375. * parallel make_page_accessible is fine, as the UV calls will become a
  376. * no-op if the page is already exported.
  377. */
  378. int arch_make_page_accessible(struct page *page)
  379. {
  380. int rc = 0;
  381. /* Hugepage cannot be protected, so nothing to do */
  382. if (PageHuge(page))
  383. return 0;
  384. /*
  385. * PG_arch_1 is used in 3 places:
  386. * 1. for kernel page tables during early boot
  387. * 2. for storage keys of huge pages and KVM
  388. * 3. As an indication that this page might be secure. This can
  389. * overindicate, e.g. we set the bit before calling
  390. * convert_to_secure.
  391. * As secure pages are never huge, all 3 variants can co-exists.
  392. */
  393. if (!test_bit(PG_arch_1, &page->flags))
  394. return 0;
  395. rc = uv_pin_shared(page_to_phys(page));
  396. if (!rc) {
  397. clear_bit(PG_arch_1, &page->flags);
  398. return 0;
  399. }
  400. rc = uv_convert_from_secure(page_to_phys(page));
  401. if (!rc) {
  402. clear_bit(PG_arch_1, &page->flags);
  403. return 0;
  404. }
  405. return rc;
  406. }
  407. EXPORT_SYMBOL_GPL(arch_make_page_accessible);
  408. #endif
  409. #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
  410. static ssize_t uv_query_facilities(struct kobject *kobj,
  411. struct kobj_attribute *attr, char *page)
  412. {
  413. return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
  414. uv_info.inst_calls_list[0],
  415. uv_info.inst_calls_list[1],
  416. uv_info.inst_calls_list[2],
  417. uv_info.inst_calls_list[3]);
  418. }
  419. static struct kobj_attribute uv_query_facilities_attr =
  420. __ATTR(facilities, 0444, uv_query_facilities, NULL);
  421. static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
  422. struct kobj_attribute *attr, char *buf)
  423. {
  424. return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
  425. }
  426. static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
  427. __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
  428. static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
  429. struct kobj_attribute *attr, char *buf)
  430. {
  431. return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
  432. }
  433. static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
  434. __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
  435. static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
  436. struct kobj_attribute *attr, char *page)
  437. {
  438. return scnprintf(page, PAGE_SIZE, "%lx\n",
  439. uv_info.guest_cpu_stor_len);
  440. }
  441. static struct kobj_attribute uv_query_dump_cpu_len_attr =
  442. __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
  443. static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
  444. struct kobj_attribute *attr, char *page)
  445. {
  446. return scnprintf(page, PAGE_SIZE, "%lx\n",
  447. uv_info.conf_dump_storage_state_len);
  448. }
  449. static struct kobj_attribute uv_query_dump_storage_state_len_attr =
  450. __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
  451. static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
  452. struct kobj_attribute *attr, char *page)
  453. {
  454. return scnprintf(page, PAGE_SIZE, "%lx\n",
  455. uv_info.conf_dump_finalize_len);
  456. }
  457. static struct kobj_attribute uv_query_dump_finalize_len_attr =
  458. __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
  459. static ssize_t uv_query_feature_indications(struct kobject *kobj,
  460. struct kobj_attribute *attr, char *buf)
  461. {
  462. return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
  463. }
  464. static struct kobj_attribute uv_query_feature_indications_attr =
  465. __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
  466. static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
  467. struct kobj_attribute *attr, char *page)
  468. {
  469. return scnprintf(page, PAGE_SIZE, "%d\n",
  470. uv_info.max_guest_cpu_id + 1);
  471. }
  472. static struct kobj_attribute uv_query_max_guest_cpus_attr =
  473. __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
  474. static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
  475. struct kobj_attribute *attr, char *page)
  476. {
  477. return scnprintf(page, PAGE_SIZE, "%d\n",
  478. uv_info.max_num_sec_conf);
  479. }
  480. static struct kobj_attribute uv_query_max_guest_vms_attr =
  481. __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
  482. static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
  483. struct kobj_attribute *attr, char *page)
  484. {
  485. return scnprintf(page, PAGE_SIZE, "%lx\n",
  486. uv_info.max_sec_stor_addr);
  487. }
  488. static struct kobj_attribute uv_query_max_guest_addr_attr =
  489. __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
  490. static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
  491. struct kobj_attribute *attr, char *page)
  492. {
  493. return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_req_hdr_ver);
  494. }
  495. static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
  496. __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
  497. static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
  498. struct kobj_attribute *attr, char *page)
  499. {
  500. return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_pflags);
  501. }
  502. static struct kobj_attribute uv_query_supp_att_pflags_attr =
  503. __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
  504. static struct attribute *uv_query_attrs[] = {
  505. &uv_query_facilities_attr.attr,
  506. &uv_query_feature_indications_attr.attr,
  507. &uv_query_max_guest_cpus_attr.attr,
  508. &uv_query_max_guest_vms_attr.attr,
  509. &uv_query_max_guest_addr_attr.attr,
  510. &uv_query_supp_se_hdr_ver_attr.attr,
  511. &uv_query_supp_se_hdr_pcf_attr.attr,
  512. &uv_query_dump_storage_state_len_attr.attr,
  513. &uv_query_dump_finalize_len_attr.attr,
  514. &uv_query_dump_cpu_len_attr.attr,
  515. &uv_query_supp_att_req_hdr_ver_attr.attr,
  516. &uv_query_supp_att_pflags_attr.attr,
  517. NULL,
  518. };
  519. static struct attribute_group uv_query_attr_group = {
  520. .attrs = uv_query_attrs,
  521. };
  522. static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
  523. struct kobj_attribute *attr, char *page)
  524. {
  525. int val = 0;
  526. #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
  527. val = prot_virt_guest;
  528. #endif
  529. return scnprintf(page, PAGE_SIZE, "%d\n", val);
  530. }
  531. static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
  532. struct kobj_attribute *attr, char *page)
  533. {
  534. int val = 0;
  535. #if IS_ENABLED(CONFIG_KVM)
  536. val = prot_virt_host;
  537. #endif
  538. return scnprintf(page, PAGE_SIZE, "%d\n", val);
  539. }
  540. static struct kobj_attribute uv_prot_virt_guest =
  541. __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
  542. static struct kobj_attribute uv_prot_virt_host =
  543. __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
  544. static const struct attribute *uv_prot_virt_attrs[] = {
  545. &uv_prot_virt_guest.attr,
  546. &uv_prot_virt_host.attr,
  547. NULL,
  548. };
  549. static struct kset *uv_query_kset;
  550. static struct kobject *uv_kobj;
  551. static int __init uv_info_init(void)
  552. {
  553. int rc = -ENOMEM;
  554. if (!test_facility(158))
  555. return 0;
  556. uv_kobj = kobject_create_and_add("uv", firmware_kobj);
  557. if (!uv_kobj)
  558. return -ENOMEM;
  559. rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
  560. if (rc)
  561. goto out_kobj;
  562. uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
  563. if (!uv_query_kset) {
  564. rc = -ENOMEM;
  565. goto out_ind_files;
  566. }
  567. rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
  568. if (!rc)
  569. return 0;
  570. kset_unregister(uv_query_kset);
  571. out_ind_files:
  572. sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
  573. out_kobj:
  574. kobject_del(uv_kobj);
  575. kobject_put(uv_kobj);
  576. return rc;
  577. }
  578. device_initcall(uv_info_init);
  579. #endif