svm.c 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Secure VM platform
  4. *
  5. * Copyright 2018 IBM Corporation
  6. * Author: Anshuman Khandual <[email protected]>
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/memblock.h>
  10. #include <linux/cc_platform.h>
  11. #include <asm/machdep.h>
  12. #include <asm/svm.h>
  13. #include <asm/swiotlb.h>
  14. #include <asm/ultravisor.h>
  15. #include <asm/dtl.h>
  16. static int __init init_svm(void)
  17. {
  18. if (!is_secure_guest())
  19. return 0;
  20. /* Don't release the SWIOTLB buffer. */
  21. ppc_swiotlb_enable = 1;
  22. /*
  23. * Since the guest memory is inaccessible to the host, devices always
  24. * need to use the SWIOTLB buffer for DMA even if dma_capable() says
  25. * otherwise.
  26. */
  27. ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE;
  28. /* Share the SWIOTLB buffer with the host. */
  29. swiotlb_update_mem_attributes();
  30. return 0;
  31. }
  32. machine_early_initcall(pseries, init_svm);
  33. int set_memory_encrypted(unsigned long addr, int numpages)
  34. {
  35. if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
  36. return 0;
  37. if (!PAGE_ALIGNED(addr))
  38. return -EINVAL;
  39. uv_unshare_page(PHYS_PFN(__pa(addr)), numpages);
  40. return 0;
  41. }
  42. int set_memory_decrypted(unsigned long addr, int numpages)
  43. {
  44. if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
  45. return 0;
  46. if (!PAGE_ALIGNED(addr))
  47. return -EINVAL;
  48. uv_share_page(PHYS_PFN(__pa(addr)), numpages);
  49. return 0;
  50. }
  51. /* There's one dispatch log per CPU. */
  52. #define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE)
  53. static struct page *dtl_page_store[NR_DTL_PAGE];
  54. static long dtl_nr_pages;
  55. static bool is_dtl_page_shared(struct page *page)
  56. {
  57. long i;
  58. for (i = 0; i < dtl_nr_pages; i++)
  59. if (dtl_page_store[i] == page)
  60. return true;
  61. return false;
  62. }
  63. void dtl_cache_ctor(void *addr)
  64. {
  65. unsigned long pfn = PHYS_PFN(__pa(addr));
  66. struct page *page = pfn_to_page(pfn);
  67. if (!is_dtl_page_shared(page)) {
  68. dtl_page_store[dtl_nr_pages] = page;
  69. dtl_nr_pages++;
  70. WARN_ON(dtl_nr_pages >= NR_DTL_PAGE);
  71. uv_share_page(pfn, 1);
  72. }
  73. }