unpopulated-alloc.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/errno.h>
  3. #include <linux/gfp.h>
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <linux/memremap.h>
  7. #include <linux/slab.h>
  8. #include <asm/page.h>
  9. #include <xen/balloon.h>
  10. #include <xen/page.h>
  11. #include <xen/xen.h>
  12. static DEFINE_MUTEX(list_lock);
  13. static struct page *page_list;
  14. static unsigned int list_count;
  15. static struct resource *target_resource;
  16. /*
  17. * If arch is not happy with system "iomem_resource" being used for
  18. * the region allocation it can provide it's own view by creating specific
  19. * Xen resource with unused regions of guest physical address space provided
  20. * by the hypervisor.
  21. */
  22. int __weak __init arch_xen_unpopulated_init(struct resource **res)
  23. {
  24. *res = &iomem_resource;
  25. return 0;
  26. }
  27. static int fill_list(unsigned int nr_pages)
  28. {
  29. struct dev_pagemap *pgmap;
  30. struct resource *res, *tmp_res = NULL;
  31. void *vaddr;
  32. unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
  33. struct range mhp_range;
  34. int ret;
  35. res = kzalloc(sizeof(*res), GFP_KERNEL);
  36. if (!res)
  37. return -ENOMEM;
  38. res->name = "Xen scratch";
  39. res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  40. mhp_range = mhp_get_pluggable_range(true);
  41. ret = allocate_resource(target_resource, res,
  42. alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
  43. PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
  44. if (ret < 0) {
  45. pr_err("Cannot allocate new IOMEM resource\n");
  46. goto err_resource;
  47. }
  48. /*
  49. * Reserve the region previously allocated from Xen resource to avoid
  50. * re-using it by someone else.
  51. */
  52. if (target_resource != &iomem_resource) {
  53. tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
  54. if (!tmp_res) {
  55. ret = -ENOMEM;
  56. goto err_insert;
  57. }
  58. tmp_res->name = res->name;
  59. tmp_res->start = res->start;
  60. tmp_res->end = res->end;
  61. tmp_res->flags = res->flags;
  62. ret = request_resource(&iomem_resource, tmp_res);
  63. if (ret < 0) {
  64. pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret);
  65. kfree(tmp_res);
  66. goto err_insert;
  67. }
  68. }
  69. pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
  70. if (!pgmap) {
  71. ret = -ENOMEM;
  72. goto err_pgmap;
  73. }
  74. pgmap->type = MEMORY_DEVICE_GENERIC;
  75. pgmap->range = (struct range) {
  76. .start = res->start,
  77. .end = res->end,
  78. };
  79. pgmap->nr_range = 1;
  80. pgmap->owner = res;
  81. #ifdef CONFIG_XEN_HAVE_PVMMU
  82. /*
  83. * memremap will build page tables for the new memory so
  84. * the p2m must contain invalid entries so the correct
  85. * non-present PTEs will be written.
  86. *
  87. * If a failure occurs, the original (identity) p2m entries
  88. * are not restored since this region is now known not to
  89. * conflict with any devices.
  90. */
  91. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  92. xen_pfn_t pfn = PFN_DOWN(res->start);
  93. for (i = 0; i < alloc_pages; i++) {
  94. if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
  95. pr_warn("set_phys_to_machine() failed, no memory added\n");
  96. ret = -ENOMEM;
  97. goto err_memremap;
  98. }
  99. }
  100. }
  101. #endif
  102. vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
  103. if (IS_ERR(vaddr)) {
  104. pr_err("Cannot remap memory range\n");
  105. ret = PTR_ERR(vaddr);
  106. goto err_memremap;
  107. }
  108. for (i = 0; i < alloc_pages; i++) {
  109. struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
  110. pg->zone_device_data = page_list;
  111. page_list = pg;
  112. list_count++;
  113. }
  114. return 0;
  115. err_memremap:
  116. kfree(pgmap);
  117. err_pgmap:
  118. if (tmp_res) {
  119. release_resource(tmp_res);
  120. kfree(tmp_res);
  121. }
  122. err_insert:
  123. release_resource(res);
  124. err_resource:
  125. kfree(res);
  126. return ret;
  127. }
  128. /**
  129. * xen_alloc_unpopulated_pages - alloc unpopulated pages
  130. * @nr_pages: Number of pages
  131. * @pages: pages returned
  132. * @return 0 on success, error otherwise
  133. */
  134. int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
  135. {
  136. unsigned int i;
  137. int ret = 0;
  138. /*
  139. * Fallback to default behavior if we do not have any suitable resource
  140. * to allocate required region from and as the result we won't be able to
  141. * construct pages.
  142. */
  143. if (!target_resource)
  144. return xen_alloc_ballooned_pages(nr_pages, pages);
  145. mutex_lock(&list_lock);
  146. if (list_count < nr_pages) {
  147. ret = fill_list(nr_pages - list_count);
  148. if (ret)
  149. goto out;
  150. }
  151. for (i = 0; i < nr_pages; i++) {
  152. struct page *pg = page_list;
  153. BUG_ON(!pg);
  154. page_list = pg->zone_device_data;
  155. list_count--;
  156. pages[i] = pg;
  157. #ifdef CONFIG_XEN_HAVE_PVMMU
  158. if (!xen_feature(XENFEAT_auto_translated_physmap)) {
  159. ret = xen_alloc_p2m_entry(page_to_pfn(pg));
  160. if (ret < 0) {
  161. unsigned int j;
  162. for (j = 0; j <= i; j++) {
  163. pages[j]->zone_device_data = page_list;
  164. page_list = pages[j];
  165. list_count++;
  166. }
  167. goto out;
  168. }
  169. }
  170. #endif
  171. }
  172. out:
  173. mutex_unlock(&list_lock);
  174. return ret;
  175. }
  176. EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
  177. /**
  178. * xen_free_unpopulated_pages - return unpopulated pages
  179. * @nr_pages: Number of pages
  180. * @pages: pages to return
  181. */
  182. void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
  183. {
  184. unsigned int i;
  185. if (!target_resource) {
  186. xen_free_ballooned_pages(nr_pages, pages);
  187. return;
  188. }
  189. mutex_lock(&list_lock);
  190. for (i = 0; i < nr_pages; i++) {
  191. pages[i]->zone_device_data = page_list;
  192. page_list = pages[i];
  193. list_count++;
  194. }
  195. mutex_unlock(&list_lock);
  196. }
  197. EXPORT_SYMBOL(xen_free_unpopulated_pages);
  198. static int __init unpopulated_init(void)
  199. {
  200. int ret;
  201. if (!xen_domain())
  202. return -ENODEV;
  203. ret = arch_xen_unpopulated_init(&target_resource);
  204. if (ret) {
  205. pr_err("xen:unpopulated: Cannot initialize target resource\n");
  206. target_resource = NULL;
  207. }
  208. return ret;
  209. }
  210. early_initcall(unpopulated_init);