gzvm_mmu.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2023 MediaTek Inc.
  4. */
  5. #include <linux/gzvm_drv.h>
  6. /**
  7. * hva_to_pa_fast() - converts hva to pa in generic fast way
  8. * @hva: Host virtual address.
  9. *
  10. * Return: GZVM_PA_ERR_BAD for translation error
  11. */
  12. u64 hva_to_pa_fast(u64 hva)
  13. {
  14. struct page *page[1];
  15. u64 pfn;
  16. if (get_user_page_fast_only(hva, 0, page)) {
  17. pfn = page_to_phys(page[0]);
  18. put_page((struct page *)page);
  19. return pfn;
  20. }
  21. return GZVM_PA_ERR_BAD;
  22. }
  23. /**
  24. * hva_to_pa_slow() - converts hva to pa in a slow way
  25. * @hva: Host virtual address
  26. *
  27. * This function converts HVA to PA in a slow way because the target hva is not
  28. * yet allocated and mapped in the host stage1 page table, we cannot find it
  29. * directly from current page table.
  30. * Thus, we have to allocate it and this operation is much slower than directly
  31. * find via current page table.
  32. *
  33. * Context: This function may sleep
  34. * Return: PA or GZVM_PA_ERR_BAD for translation error
  35. */
  36. u64 hva_to_pa_slow(u64 hva)
  37. {
  38. struct page *page = NULL;
  39. u64 pfn = 0;
  40. int npages;
  41. npages = get_user_pages_unlocked(hva, 1, &page, 0);
  42. if (npages != 1)
  43. return GZVM_PA_ERR_BAD;
  44. if (page) {
  45. pfn = page_to_phys(page);
  46. put_page(page);
  47. return pfn;
  48. }
  49. return GZVM_PA_ERR_BAD;
  50. }
  51. static u64 __gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn)
  52. {
  53. u64 hva, pa;
  54. hva = gzvm_gfn_to_hva_memslot(memslot, gfn);
  55. pa = gzvm_hva_to_pa_arch(hva);
  56. if (pa != GZVM_PA_ERR_BAD)
  57. return PHYS_PFN(pa);
  58. pa = hva_to_pa_fast(hva);
  59. if (pa != GZVM_PA_ERR_BAD)
  60. return PHYS_PFN(pa);
  61. pa = hva_to_pa_slow(hva);
  62. if (pa != GZVM_PA_ERR_BAD)
  63. return PHYS_PFN(pa);
  64. return GZVM_PA_ERR_BAD;
  65. }
  66. /**
  67. * gzvm_gfn_to_pfn_memslot() - Translate gfn (guest ipa) to pfn (host pa),
  68. * result is in @pfn
  69. * @memslot: Pointer to struct gzvm_memslot.
  70. * @gfn: Guest frame number.
  71. * @pfn: Host page frame number.
  72. *
  73. * Return:
  74. * * 0 - Succeed
  75. * * -EFAULT - Failed to convert
  76. */
  77. int gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn,
  78. u64 *pfn)
  79. {
  80. u64 __pfn;
  81. if (!memslot)
  82. return -EFAULT;
  83. __pfn = __gzvm_gfn_to_pfn_memslot(memslot, gfn);
  84. if (__pfn == GZVM_PA_ERR_BAD) {
  85. *pfn = 0;
  86. return -EFAULT;
  87. }
  88. *pfn = __pfn;
  89. return 0;
  90. }
  91. static int cmp_ppages(struct rb_node *node, const struct rb_node *parent)
  92. {
  93. struct gzvm_pinned_page *a = container_of(node,
  94. struct gzvm_pinned_page,
  95. node);
  96. struct gzvm_pinned_page *b = container_of(parent,
  97. struct gzvm_pinned_page,
  98. node);
  99. if (a->ipa < b->ipa)
  100. return -1;
  101. if (a->ipa > b->ipa)
  102. return 1;
  103. return 0;
  104. }
  105. static int rb_ppage_cmp(const void *key, const struct rb_node *node)
  106. {
  107. struct gzvm_pinned_page *p = container_of(node,
  108. struct gzvm_pinned_page,
  109. node);
  110. phys_addr_t ipa = (phys_addr_t)key;
  111. return (ipa < p->ipa) ? -1 : (ipa > p->ipa);
  112. }
  113. static int gzvm_insert_ppage(struct gzvm *vm, struct gzvm_pinned_page *ppage)
  114. {
  115. if (rb_find_add(&ppage->node, &vm->pinned_pages, cmp_ppages))
  116. return -EEXIST;
  117. return 0;
  118. }
  119. static int pin_one_page(struct gzvm *vm, unsigned long hva, u64 gpa)
  120. {
  121. unsigned int flags = FOLL_HWPOISON | FOLL_LONGTERM | FOLL_WRITE;
  122. struct gzvm_pinned_page *ppage = NULL;
  123. struct mm_struct *mm = current->mm;
  124. struct page *page = NULL;
  125. ppage = kmalloc(sizeof(*ppage), GFP_KERNEL_ACCOUNT);
  126. if (!ppage)
  127. return -ENOMEM;
  128. mmap_read_lock(mm);
  129. pin_user_pages(hva, 1, flags, &page, NULL);
  130. mmap_read_unlock(mm);
  131. if (!page) {
  132. kfree(ppage);
  133. return -EFAULT;
  134. }
  135. ppage->page = page;
  136. ppage->ipa = gpa;
  137. gzvm_insert_ppage(vm, ppage);
  138. return 0;
  139. }
  140. /**
  141. * gzvm_handle_relinquish() - Handle memory relinquish request from hypervisor
  142. *
  143. * @vcpu: Pointer to struct gzvm_vcpu_run in userspace
  144. * @ipa: Start address(gpa) of a reclaimed page
  145. *
  146. * Return: Always return 0 because there are no cases of failure
  147. */
  148. int gzvm_handle_relinquish(struct gzvm_vcpu *vcpu, phys_addr_t ipa)
  149. {
  150. struct gzvm_pinned_page *ppage;
  151. struct rb_node *node;
  152. struct gzvm *vm = vcpu->gzvm;
  153. node = rb_find((void *)ipa, &vm->pinned_pages, rb_ppage_cmp);
  154. if (node)
  155. rb_erase(node, &vm->pinned_pages);
  156. else
  157. return 0;
  158. ppage = container_of(node, struct gzvm_pinned_page, node);
  159. unpin_user_pages_dirty_lock(&ppage->page, 1, true);
  160. kfree(ppage);
  161. return 0;
  162. }
  163. static int handle_block_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
  164. {
  165. unsigned long hva;
  166. u64 pfn, __gfn;
  167. int ret, i;
  168. u32 nr_entries = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE;
  169. struct gzvm_memslot *memslot = &vm->memslot[memslot_id];
  170. u64 start_gfn = ALIGN_DOWN(gfn, nr_entries);
  171. u32 total_pages = memslot->npages;
  172. u64 base_gfn = memslot->base_gfn;
  173. /* if the demand region is less than a block, adjust the nr_entries */
  174. if (start_gfn + nr_entries > base_gfn + total_pages)
  175. nr_entries = base_gfn + total_pages - start_gfn;
  176. mutex_lock(&vm->demand_paging_lock);
  177. for (i = 0, __gfn = start_gfn; i < nr_entries; i++, __gfn++) {
  178. ret = gzvm_gfn_to_pfn_memslot(&vm->memslot[memslot_id], __gfn,
  179. &pfn);
  180. if (unlikely(ret)) {
  181. ret = -ERR_FAULT;
  182. goto err_unlock;
  183. }
  184. vm->demand_page_buffer[i] = pfn;
  185. hva = gzvm_gfn_to_hva_memslot(&vm->memslot[memslot_id], __gfn);
  186. ret = pin_one_page(vm, hva, PFN_PHYS(__gfn));
  187. if (ret)
  188. goto err_unlock;
  189. }
  190. ret = gzvm_arch_map_guest_block(vm->vm_id, memslot_id, start_gfn,
  191. nr_entries);
  192. if (unlikely(ret)) {
  193. ret = -EFAULT;
  194. goto err_unlock;
  195. }
  196. err_unlock:
  197. mutex_unlock(&vm->demand_paging_lock);
  198. return ret;
  199. }
  200. static int handle_single_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
  201. {
  202. unsigned long hva;
  203. int ret;
  204. u64 pfn;
  205. ret = gzvm_gfn_to_pfn_memslot(&vm->memslot[memslot_id], gfn, &pfn);
  206. if (unlikely(ret))
  207. return -EFAULT;
  208. ret = gzvm_arch_map_guest(vm->vm_id, memslot_id, pfn, gfn, 1);
  209. if (unlikely(ret))
  210. return -EFAULT;
  211. hva = gzvm_gfn_to_hva_memslot(&vm->memslot[memslot_id], gfn);
  212. return pin_one_page(vm, hva, PFN_PHYS(gfn));
  213. }
  214. /**
  215. * gzvm_handle_page_fault() - Handle guest page fault, find corresponding page
  216. * for the faulting gpa
  217. * @vcpu: Pointer to struct gzvm_vcpu_run of the faulting vcpu
  218. *
  219. * Return:
  220. * * 0 - Success to handle guest page fault
  221. * * -EFAULT - Failed to map phys addr to guest's GPA
  222. */
  223. int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu)
  224. {
  225. struct gzvm *vm = vcpu->gzvm;
  226. int memslot_id;
  227. u64 gfn;
  228. gfn = PHYS_PFN(vcpu->run->exception.fault_gpa);
  229. memslot_id = gzvm_find_memslot(vm, gfn);
  230. if (unlikely(memslot_id < 0))
  231. return -EFAULT;
  232. if (vm->demand_page_gran == PAGE_SIZE)
  233. return handle_single_demand_page(vm, memslot_id, gfn);
  234. else
  235. return handle_block_demand_page(vm, memslot_id, gfn);
  236. }