qcom-io-pgtable-alloc.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/shrinker.h>
  8. #include <linux/slab.h>
  9. #include <linux/qcom_scm.h>
  10. #include <soc/qcom/secure_buffer.h>
  11. struct io_pgtable_pool {
  12. u32 vmid;
  13. struct kref ref;
  14. spinlock_t pool_lock;
  15. struct list_head page_pool;
  16. };
  17. static DEFINE_MUTEX(page_pool_xa_lock);
  18. static DEFINE_XARRAY(page_pool_xa);
  19. static atomic_long_t page_pool_count = ATOMIC_LONG_INIT(0);
  20. static bool is_secure_vmid(u32 vmid)
  21. {
  22. return !!vmid;
  23. }
  24. static int io_pgtable_hyp_assign_page(u32 vmid, struct page *page)
  25. {
  26. struct qcom_scm_vmperm dst_vmids[] = {{QCOM_SCM_VMID_HLOS,
  27. PERM_READ | PERM_WRITE},
  28. {vmid, PERM_READ}};
  29. u64 src_vmid_list = BIT(QCOM_SCM_VMID_HLOS);
  30. phys_addr_t page_addr = page_to_phys(page);
  31. int ret;
  32. ret = qcom_scm_assign_mem(page_addr, PAGE_SIZE, &src_vmid_list,
  33. dst_vmids, ARRAY_SIZE(dst_vmids));
  34. if (ret)
  35. pr_debug("failed qcom_assign for %pa address of size %zx - subsys VMid %d rc:%d\n",
  36. &page_addr, PAGE_SIZE, vmid, ret);
  37. WARN(ret, "failed to assign memory to VMID: %u rc:%d\n", vmid, ret);
  38. return ret ? -EADDRNOTAVAIL : 0;
  39. }
  40. static int io_pgtable_hyp_unassign_page(u32 vmid, struct page *page)
  41. {
  42. struct qcom_scm_vmperm dst_vmids[] = {{QCOM_SCM_VMID_HLOS,
  43. PERM_READ | PERM_WRITE | PERM_EXEC}};
  44. u64 src_vmid_list = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid);
  45. phys_addr_t page_addr = page_to_phys(page);
  46. int ret;
  47. ret = qcom_scm_assign_mem(page_addr, PAGE_SIZE, &src_vmid_list,
  48. dst_vmids, ARRAY_SIZE(dst_vmids));
  49. if (ret)
  50. pr_debug("failed qcom_assign for unassigning %pa address of size %zx - subsys VMid %d rc:%d\n",
  51. &page_addr, PAGE_SIZE, vmid, ret);
  52. WARN(ret, "failed to unassign memory from VMID: %u rc: %d\n", vmid, ret);
  53. return ret ? -EADDRNOTAVAIL : 0;
  54. }
  55. static struct page *__alloc_page_from_pool(struct list_head *page_pool)
  56. {
  57. struct page *page;
  58. page = list_first_entry_or_null(page_pool, struct page, lru);
  59. if (page) {
  60. list_del(&page->lru);
  61. atomic_long_dec(&page_pool_count);
  62. dec_node_page_state(page, NR_KERNEL_MISC_RECLAIMABLE);
  63. }
  64. return page;
  65. }
  66. static struct page *alloc_page_from_pool(u32 vmid)
  67. {
  68. struct io_pgtable_pool *pool = xa_load(&page_pool_xa, vmid);
  69. struct page *page;
  70. unsigned long flags;
  71. spin_lock_irqsave(&pool->pool_lock, flags);
  72. page = __alloc_page_from_pool(&pool->page_pool);
  73. spin_unlock_irqrestore(&pool->pool_lock, flags);
  74. return page;
  75. }
  76. static void free_page_to_pool(struct page *page)
  77. {
  78. u32 vmid = page_private(page);
  79. struct io_pgtable_pool *pool = xa_load(&page_pool_xa, vmid);
  80. unsigned long flags;
  81. clear_page(page_address(page));
  82. spin_lock_irqsave(&pool->pool_lock, flags);
  83. list_add(&page->lru, &pool->page_pool);
  84. atomic_long_inc(&page_pool_count);
  85. inc_node_page_state(page, NR_KERNEL_MISC_RECLAIMABLE);
  86. spin_unlock_irqrestore(&pool->pool_lock, flags);
  87. }
  88. /* Assumes that page_pool_xa_lock is held. */
  89. static void io_pgtable_pool_release(struct kref *ref)
  90. {
  91. struct io_pgtable_pool *pool = container_of(ref, struct io_pgtable_pool, ref);
  92. struct page *page;
  93. bool secure_vmid = is_secure_vmid(pool->vmid);
  94. xa_erase(&page_pool_xa, pool->vmid);
  95. /*
  96. * There's no need to take the pool lock, as the pool is no longer accessible to other
  97. * IOMMU clients. There's no possibility for concurrent access either as this
  98. * function is only invoked when the last reference is removed.
  99. */
  100. page = __alloc_page_from_pool(&pool->page_pool);
  101. while (page) {
  102. if (!secure_vmid || !io_pgtable_hyp_unassign_page(pool->vmid, page))
  103. __free_page(page);
  104. page = __alloc_page_from_pool(&pool->page_pool);
  105. }
  106. kfree(pool);
  107. }
  108. /*
  109. * qcom_io_pgtable_allocator_register: Register with the io-pgtable allocator interface.
  110. *
  111. * @vmid: The VMID that io-pgtable memory needs to be shared with when allocated. If VMID
  112. * is 0, then page table memory will not be shared with any other VMs.
  113. *
  114. * On success, 0 is returned and there will be a reference held for metadata associated with
  115. * @vmid. Otherwise, an error code will be returned.
  116. */
  117. int qcom_io_pgtable_allocator_register(u32 vmid)
  118. {
  119. struct io_pgtable_pool *pool;
  120. int ret = 0;
  121. mutex_lock(&page_pool_xa_lock);
  122. pool = xa_load(&page_pool_xa, vmid);
  123. if (pool) {
  124. kref_get(&pool->ref);
  125. goto out;
  126. }
  127. pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  128. if (!pool) {
  129. ret = -ENOMEM;
  130. goto out;
  131. }
  132. pool->vmid = vmid;
  133. kref_init(&pool->ref);
  134. spin_lock_init(&pool->pool_lock);
  135. INIT_LIST_HEAD(&pool->page_pool);
  136. ret = xa_err(xa_store(&page_pool_xa, vmid, pool, GFP_KERNEL));
  137. if (ret < 0)
  138. kfree(pool);
  139. out:
  140. mutex_unlock(&page_pool_xa_lock);
  141. return ret;
  142. }
  143. /*
  144. * qcom_io_pgtable_allocator_unregister: Unregister with the io-pgtable allocator interface.
  145. *
  146. * @vmid: The VMID that was used when registering with the interface with
  147. * qcom_io_pgtable_allocator_register().
  148. *
  149. * Decrements the references to allocator metadata for @vmid.
  150. *
  151. * If this call results in references to @vmid dropping to 0, then all metadata and pages
  152. * associated with @vmid are released.
  153. */
  154. void qcom_io_pgtable_allocator_unregister(u32 vmid)
  155. {
  156. struct io_pgtable_pool *pool;
  157. mutex_lock(&page_pool_xa_lock);
  158. pool = xa_load(&page_pool_xa, vmid);
  159. kref_put(&pool->ref, io_pgtable_pool_release);
  160. mutex_unlock(&page_pool_xa_lock);
  161. }
  162. /*
  163. * qcom_io_pgtable_alloc_page: Allocate page table memory from the io-pgtable allocator.
  164. *
  165. * @vmid: The VMID that the page table memory should be shared with.
  166. * @gfp: The GFP flags to be used for allocating the page table memory.
  167. *
  168. * This function may sleep if memory needs to be shared with other VMs.
  169. *
  170. * On success, a page will be returned. The page will also have been shared with other
  171. * VMs--if any. In case of an error, this function returns NULL.
  172. */
  173. struct page *qcom_io_pgtable_alloc_page(u32 vmid, gfp_t gfp)
  174. {
  175. struct page *page;
  176. /*
  177. * Mapping memory for secure domains may result in having to assign page table
  178. * memory to another VMID, which can sleep. Atomic and secure domains are
  179. * not a legal combination. We can use the GFP flags to detect atomic domains,
  180. * as they will have GFP_ATOMIC set.
  181. */
  182. BUG_ON(!gfpflags_allow_blocking(gfp) && is_secure_vmid(vmid));
  183. page = alloc_page_from_pool(vmid);
  184. if (page)
  185. return page;
  186. page = alloc_page(gfp);
  187. if (!page)
  188. return NULL;
  189. /* The page may be inaccessible if this is true, so leak it. */
  190. else if (is_secure_vmid(vmid) && io_pgtable_hyp_assign_page(vmid, page))
  191. return NULL;
  192. set_page_private(page, (unsigned long)vmid);
  193. return page;
  194. }
  195. /*
  196. * qcom_io_pgtable_free_page: Frees page table memory.
  197. *
  198. * @page: The page to be freed.
  199. *
  200. * We cache pages in their respective page pools to improve performance
  201. * for future allocations.
  202. *
  203. * Export this symbol for the IOMMU driver, since it decides when
  204. * page table memory is freed after TLB maintenance.
  205. */
  206. void qcom_io_pgtable_free_page(struct page *page)
  207. {
  208. free_page_to_pool(page);
  209. }
  210. EXPORT_SYMBOL(qcom_io_pgtable_free_page);
  211. static unsigned long io_pgtable_alloc_count_objects(struct shrinker *shrinker,
  212. struct shrink_control *sc)
  213. {
  214. unsigned long count = atomic_long_read(&page_pool_count);
  215. return count ? count : SHRINK_EMPTY;
  216. }
  217. static unsigned long scan_page_pool(struct io_pgtable_pool *pool, struct list_head *freelist,
  218. unsigned long nr_to_scan)
  219. {
  220. struct page *page;
  221. unsigned long count = 0, flags;
  222. spin_lock_irqsave(&pool->pool_lock, flags);
  223. while (count < nr_to_scan) {
  224. page = __alloc_page_from_pool(&pool->page_pool);
  225. if (page) {
  226. list_add(&page->lru, freelist);
  227. count++;
  228. } else {
  229. break;
  230. }
  231. }
  232. spin_unlock_irqrestore(&pool->pool_lock, flags);
  233. return count;
  234. }
  235. static unsigned long io_pgtable_alloc_scan_objects(struct shrinker *shrinker,
  236. struct shrink_control *sc)
  237. {
  238. struct page *page, *tmp;
  239. struct io_pgtable_pool *pool;
  240. unsigned long index;
  241. unsigned long nr_to_scan = sc->nr_to_scan, count = 0;
  242. u32 vmid;
  243. LIST_HEAD(freelist);
  244. mutex_lock(&page_pool_xa_lock);
  245. xa_for_each(&page_pool_xa, index, pool) {
  246. count += scan_page_pool(pool, &freelist, nr_to_scan - count);
  247. if (count >= nr_to_scan)
  248. break;
  249. }
  250. mutex_unlock(&page_pool_xa_lock);
  251. list_for_each_entry_safe(page, tmp, &freelist, lru) {
  252. vmid = page_private(page);
  253. list_del(&page->lru);
  254. if (!is_secure_vmid(vmid) || !io_pgtable_hyp_unassign_page(vmid, page))
  255. __free_page(page);
  256. else
  257. count--;
  258. }
  259. return count;
  260. }
  261. static struct shrinker io_pgtable_alloc_shrinker = {
  262. .count_objects = io_pgtable_alloc_count_objects,
  263. .scan_objects = io_pgtable_alloc_scan_objects,
  264. .seeks = DEFAULT_SEEKS,
  265. };
  266. int qcom_io_pgtable_alloc_init(void)
  267. {
  268. return register_shrinker(&io_pgtable_alloc_shrinker, "io_pgtable_alloc");
  269. }
  270. void qcom_io_pgtable_alloc_exit(void)
  271. {
  272. unregister_shrinker(&io_pgtable_alloc_shrinker);
  273. }