vm_mgr_mm.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "gh_vm_mgr: " fmt
  6. #include <linux/gunyah_rsc_mgr.h>
  7. #include <linux/mm.h>
  8. #include <uapi/linux/gunyah.h>
  9. #include "vm_mgr.h"
  10. static bool pages_are_mergeable(struct page *a, struct page *b)
  11. {
  12. return page_to_pfn(a) + 1 == page_to_pfn(b);
  13. }
  14. static bool gh_vm_mem_overlap(struct gh_vm_mem *a, u64 addr, u64 size)
  15. {
  16. u64 a_end = a->guest_phys_addr + (a->npages << PAGE_SHIFT);
  17. u64 end = addr + size;
  18. return a->guest_phys_addr < end && addr < a_end;
  19. }
  20. static struct gh_vm_mem *__gh_vm_mem_find_by_label(struct gh_vm *ghvm, u32 label)
  21. __must_hold(&ghvm->mm_lock)
  22. {
  23. struct gh_vm_mem *mapping;
  24. list_for_each_entry(mapping, &ghvm->memory_mappings, list)
  25. if (mapping->parcel.label == label)
  26. return mapping;
  27. return NULL;
  28. }
  29. static void gh_vm_mem_reclaim_mapping(struct gh_vm *ghvm, struct gh_vm_mem *mapping)
  30. __must_hold(&ghvm->mm_lock)
  31. {
  32. int ret = 0;
  33. if (mapping->parcel.mem_handle != GH_MEM_HANDLE_INVAL) {
  34. ret = gh_rm_mem_reclaim(ghvm->rm, &mapping->parcel);
  35. if (ret)
  36. pr_warn("Failed to reclaim memory parcel for label %d: %d\n",
  37. mapping->parcel.label, ret);
  38. }
  39. if (!ret) {
  40. unpin_user_pages(mapping->pages, mapping->npages);
  41. account_locked_vm(ghvm->mm, mapping->npages, false);
  42. }
  43. kfree(mapping->pages);
  44. kfree(mapping->parcel.acl_entries);
  45. kfree(mapping->parcel.mem_entries);
  46. list_del(&mapping->list);
  47. }
  48. void gh_vm_mem_reclaim(struct gh_vm *ghvm)
  49. {
  50. struct gh_vm_mem *mapping, *tmp;
  51. mutex_lock(&ghvm->mm_lock);
  52. list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
  53. gh_vm_mem_reclaim_mapping(ghvm, mapping);
  54. kfree(mapping);
  55. }
  56. mutex_unlock(&ghvm->mm_lock);
  57. }
  58. struct gh_vm_mem *gh_vm_mem_find_by_addr(struct gh_vm *ghvm, u64 guest_phys_addr, u32 size)
  59. {
  60. struct gh_vm_mem *mapping;
  61. if (overflows_type(guest_phys_addr + size, u64))
  62. return NULL;
  63. mutex_lock(&ghvm->mm_lock);
  64. list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
  65. if (gh_vm_mem_overlap(mapping, guest_phys_addr, size))
  66. goto unlock;
  67. }
  68. mapping = NULL;
  69. unlock:
  70. mutex_unlock(&ghvm->mm_lock);
  71. return mapping;
  72. }
  73. int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region, bool lend)
  74. {
  75. struct gh_vm_mem *mapping, *tmp_mapping;
  76. struct page *curr_page, *prev_page;
  77. struct gh_rm_mem_parcel *parcel;
  78. int i, j, pinned, ret = 0;
  79. unsigned int gup_flags;
  80. size_t entry_size;
  81. u16 vmid;
  82. if (!region->memory_size || !PAGE_ALIGNED(region->memory_size) ||
  83. !PAGE_ALIGNED(region->userspace_addr) ||
  84. !PAGE_ALIGNED(region->guest_phys_addr))
  85. return -EINVAL;
  86. if (overflows_type(region->guest_phys_addr + region->memory_size, u64))
  87. return -EOVERFLOW;
  88. ret = mutex_lock_interruptible(&ghvm->mm_lock);
  89. if (ret)
  90. return ret;
  91. mapping = __gh_vm_mem_find_by_label(ghvm, region->label);
  92. if (mapping) {
  93. ret = -EEXIST;
  94. goto unlock;
  95. }
  96. list_for_each_entry(tmp_mapping, &ghvm->memory_mappings, list) {
  97. if (gh_vm_mem_overlap(tmp_mapping, region->guest_phys_addr,
  98. region->memory_size)) {
  99. ret = -EEXIST;
  100. goto unlock;
  101. }
  102. }
  103. mapping = kzalloc(sizeof(*mapping), GFP_KERNEL_ACCOUNT);
  104. if (!mapping) {
  105. ret = -ENOMEM;
  106. goto unlock;
  107. }
  108. mapping->guest_phys_addr = region->guest_phys_addr;
  109. mapping->npages = region->memory_size >> PAGE_SHIFT;
  110. parcel = &mapping->parcel;
  111. parcel->label = region->label;
  112. parcel->mem_handle = GH_MEM_HANDLE_INVAL; /* to be filled later by mem_share/mem_lend */
  113. parcel->mem_type = GH_RM_MEM_TYPE_NORMAL;
  114. ret = account_locked_vm(ghvm->mm, mapping->npages, true);
  115. if (ret)
  116. goto free_mapping;
  117. mapping->pages = kcalloc(mapping->npages, sizeof(*mapping->pages), GFP_KERNEL_ACCOUNT);
  118. if (!mapping->pages) {
  119. ret = -ENOMEM;
  120. mapping->npages = 0; /* update npages for reclaim */
  121. goto unlock_pages;
  122. }
  123. gup_flags = FOLL_LONGTERM;
  124. if (region->flags & GH_MEM_ALLOW_WRITE)
  125. gup_flags |= FOLL_WRITE;
  126. pinned = pin_user_pages_fast(region->userspace_addr, mapping->npages,
  127. gup_flags, mapping->pages);
  128. if (pinned < 0) {
  129. ret = pinned;
  130. goto free_pages;
  131. } else if (pinned != mapping->npages) {
  132. ret = -EFAULT;
  133. mapping->npages = pinned; /* update npages for reclaim */
  134. goto unpin_pages;
  135. }
  136. if (lend) {
  137. parcel->n_acl_entries = 1;
  138. mapping->share_type = VM_MEM_LEND;
  139. } else {
  140. parcel->n_acl_entries = 2;
  141. mapping->share_type = VM_MEM_SHARE;
  142. }
  143. parcel->acl_entries = kcalloc(parcel->n_acl_entries,
  144. sizeof(*parcel->acl_entries), GFP_KERNEL);
  145. if (!parcel->acl_entries) {
  146. ret = -ENOMEM;
  147. goto unpin_pages;
  148. }
  149. /* acl_entries[0].vmid will be this VM's vmid. We'll fill it when the
  150. * VM is starting and we know the VM's vmid.
  151. */
  152. if (region->flags & GH_MEM_ALLOW_READ)
  153. parcel->acl_entries[0].perms |= GH_RM_ACL_R;
  154. if (region->flags & GH_MEM_ALLOW_WRITE)
  155. parcel->acl_entries[0].perms |= GH_RM_ACL_W;
  156. if (region->flags & GH_MEM_ALLOW_EXEC)
  157. parcel->acl_entries[0].perms |= GH_RM_ACL_X;
  158. if (!lend) {
  159. ret = gh_rm_get_vmid(ghvm->rm, &vmid);
  160. if (ret)
  161. goto free_acl;
  162. parcel->acl_entries[1].vmid = cpu_to_le16(vmid);
  163. /* Host assumed to have all these permissions. Gunyah will not
  164. * grant new permissions if host actually had less than RWX
  165. */
  166. parcel->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W | GH_RM_ACL_X;
  167. }
  168. parcel->n_mem_entries = 1;
  169. for (i = 1; i < mapping->npages; i++) {
  170. if (!pages_are_mergeable(mapping->pages[i - 1], mapping->pages[i]))
  171. parcel->n_mem_entries++;
  172. }
  173. parcel->mem_entries = kcalloc(parcel->n_mem_entries,
  174. sizeof(parcel->mem_entries[0]),
  175. GFP_KERNEL_ACCOUNT);
  176. if (!parcel->mem_entries) {
  177. ret = -ENOMEM;
  178. goto free_acl;
  179. }
  180. /* reduce number of entries by combining contiguous pages into single memory entry */
  181. prev_page = mapping->pages[0];
  182. parcel->mem_entries[0].phys_addr = cpu_to_le64(page_to_phys(prev_page));
  183. entry_size = PAGE_SIZE;
  184. for (i = 1, j = 0; i < mapping->npages; i++) {
  185. curr_page = mapping->pages[i];
  186. if (pages_are_mergeable(prev_page, curr_page)) {
  187. entry_size += PAGE_SIZE;
  188. } else {
  189. parcel->mem_entries[j].size = cpu_to_le64(entry_size);
  190. j++;
  191. parcel->mem_entries[j].phys_addr =
  192. cpu_to_le64(page_to_phys(curr_page));
  193. entry_size = PAGE_SIZE;
  194. }
  195. prev_page = curr_page;
  196. }
  197. parcel->mem_entries[j].size = cpu_to_le64(entry_size);
  198. list_add(&mapping->list, &ghvm->memory_mappings);
  199. mutex_unlock(&ghvm->mm_lock);
  200. return 0;
  201. free_acl:
  202. kfree(parcel->acl_entries);
  203. unpin_pages:
  204. unpin_user_pages(mapping->pages, pinned);
  205. free_pages:
  206. kfree(mapping->pages);
  207. unlock_pages:
  208. account_locked_vm(ghvm->mm, mapping->npages, false);
  209. free_mapping:
  210. kfree(mapping);
  211. unlock:
  212. mutex_unlock(&ghvm->mm_lock);
  213. return ret;
  214. }