mem-reservation.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. // SPDX-License-Identifier: GPL-2.0
  2. /******************************************************************************
  3. * Xen memory reservation utilities.
  4. *
  5. * Copyright (c) 2003, B Dragovic
  6. * Copyright (c) 2003-2004, M Williamson, K Fraser
  7. * Copyright (c) 2005 Dan M. Smith, IBM Corporation
  8. * Copyright (c) 2010 Daniel Kiper
  9. * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
  10. */
  11. #include <asm/xen/hypercall.h>
  12. #include <xen/interface/memory.h>
  13. #include <xen/mem-reservation.h>
  14. #include <linux/moduleparam.h>
  15. bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
  16. core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
  17. /*
  18. * Use one extent per PAGE_SIZE to avoid to break down the page into
  19. * multiple frame.
  20. */
  21. #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
  22. #ifdef CONFIG_XEN_HAVE_PVMMU
  23. void __xenmem_reservation_va_mapping_update(unsigned long count,
  24. struct page **pages,
  25. xen_pfn_t *frames)
  26. {
  27. int i;
  28. for (i = 0; i < count; i++) {
  29. struct page *page = pages[i];
  30. unsigned long pfn = page_to_pfn(page);
  31. int ret;
  32. BUG_ON(!page);
  33. /*
  34. * We don't support PV MMU when Linux and Xen is using
  35. * different page granularity.
  36. */
  37. BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
  38. set_phys_to_machine(pfn, frames[i]);
  39. ret = HYPERVISOR_update_va_mapping(
  40. (unsigned long)__va(pfn << PAGE_SHIFT),
  41. mfn_pte(frames[i], PAGE_KERNEL), 0);
  42. BUG_ON(ret);
  43. }
  44. }
  45. EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
  46. void __xenmem_reservation_va_mapping_reset(unsigned long count,
  47. struct page **pages)
  48. {
  49. int i;
  50. for (i = 0; i < count; i++) {
  51. struct page *page = pages[i];
  52. unsigned long pfn = page_to_pfn(page);
  53. int ret;
  54. /*
  55. * We don't support PV MMU when Linux and Xen are using
  56. * different page granularity.
  57. */
  58. BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
  59. ret = HYPERVISOR_update_va_mapping(
  60. (unsigned long)__va(pfn << PAGE_SHIFT),
  61. __pte_ma(0), 0);
  62. BUG_ON(ret);
  63. __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
  64. }
  65. }
  66. EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
  67. #endif /* CONFIG_XEN_HAVE_PVMMU */
  68. /* @frames is an array of PFNs */
  69. int xenmem_reservation_increase(int count, xen_pfn_t *frames)
  70. {
  71. struct xen_memory_reservation reservation = {
  72. .address_bits = 0,
  73. .extent_order = EXTENT_ORDER,
  74. .domid = DOMID_SELF
  75. };
  76. /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
  77. set_xen_guest_handle(reservation.extent_start, frames);
  78. reservation.nr_extents = count;
  79. return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
  80. }
  81. EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
  82. /* @frames is an array of GFNs */
  83. int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
  84. {
  85. struct xen_memory_reservation reservation = {
  86. .address_bits = 0,
  87. .extent_order = EXTENT_ORDER,
  88. .domid = DOMID_SELF
  89. };
  90. /* XENMEM_decrease_reservation requires a GFN */
  91. set_xen_guest_handle(reservation.extent_start, frames);
  92. reservation.nr_extents = count;
  93. return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
  94. }
  95. EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);