qti-smmu-proxy-pvm.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include "qti-smmu-proxy-common.h"
  6. #include <linux/qti-smmu-proxy-callbacks.h>
  7. #include <linux/qcom-dma-mapping.h>
  8. #include <linux/of.h>
  9. static void *msgq_hdl;
  10. DEFINE_MUTEX(sender_mutex);
  11. static const struct file_operations smmu_proxy_dev_fops;
  12. int smmu_proxy_unmap(void *data)
  13. {
  14. struct dma_buf *dmabuf;
  15. void *buf;
  16. size_t size;
  17. int ret;
  18. struct smmu_proxy_unmap_req *req;
  19. struct smmu_proxy_unmap_resp *resp;
  20. mutex_lock(&sender_mutex);
  21. buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
  22. if (!buf) {
  23. ret = -ENOMEM;
  24. pr_err("%s: Failed to allocate memory!\n", __func__);
  25. goto out;
  26. }
  27. req = buf;
  28. dmabuf = data;
  29. ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &req->hdl);
  30. if (ret) {
  31. pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
  32. goto free_buf;
  33. }
  34. req->hdr.msg_type = SMMU_PROXY_UNMAP;
  35. req->hdr.msg_size = sizeof(*req);
  36. ret = gh_msgq_send(msgq_hdl, (void *) req, req->hdr.msg_size, 0);
  37. if (ret < 0) {
  38. pr_err("%s: failed to send message rc: %d\n", __func__, ret);
  39. goto free_buf;
  40. }
  41. /*
  42. * No need to validate size - gh_msgq_recv() ensures that sizeof(*resp) <
  43. * GH_MSGQ_MAX_MSG_SIZE_BYTES
  44. */
  45. ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
  46. if (ret < 0) {
  47. pr_err_ratelimited("%s: failed to receive message rc: %d\n", __func__, ret);
  48. goto free_buf;
  49. }
  50. resp = buf;
  51. if (resp->hdr.ret) {
  52. ret = resp->hdr.ret;
  53. pr_err("%s: Unmap call failed on remote VM, rc: %d\n", __func__,
  54. resp->hdr.ret);
  55. }
  56. free_buf:
  57. kfree(buf);
  58. out:
  59. mutex_unlock(&sender_mutex);
  60. return ret;
  61. }
  62. int smmu_proxy_map(struct device *client_dev, struct sg_table *proxy_iova,
  63. struct dma_buf *dmabuf)
  64. {
  65. void *buf;
  66. size_t size;
  67. int ret = 0;
  68. int n_acl_entries, i;
  69. int vmids[2] = { VMID_TVM, VMID_OEMVM };
  70. int perms[2] = { PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
  71. struct csf_version csf_version;
  72. struct mem_buf_lend_kernel_arg arg = {0};
  73. struct smmu_proxy_map_req *req;
  74. struct smmu_proxy_map_resp *resp;
  75. ret = smmu_proxy_get_csf_version(&csf_version);
  76. if (ret) {
  77. return ret;
  78. }
  79. /*
  80. * We enter this function iff the CSF version is 2.5.* . If CSF 2.5.1
  81. * is in use, we set n_acl_entries to two, in order to assign this
  82. * memory to the TVM and OEM VM. If CSF 2.5.0 is in use, we just assign
  83. * it to the TVM.
  84. */
  85. n_acl_entries = csf_version.min_ver == 1 ? 2 : 1;
  86. mutex_lock(&sender_mutex);
  87. buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
  88. if (!buf) {
  89. ret = -ENOMEM;
  90. goto out;
  91. }
  92. if (mem_buf_dma_buf_exclusive_owner(dmabuf)) {
  93. arg.vmids = vmids;
  94. arg.perms = perms;
  95. arg.nr_acl_entries = n_acl_entries;
  96. ret = mem_buf_lend(dmabuf, &arg);
  97. if (ret) {
  98. pr_err("%s: Failed to lend buf rc: %d\n", __func__, ret);
  99. goto free_buf;
  100. }
  101. }
  102. /* Prepare the message */
  103. req = buf;
  104. req->acl_desc.n_acl_entries = n_acl_entries;
  105. for (i = 0; i < n_acl_entries; i++) {
  106. req->acl_desc.acl_entries[i].vmid = vmids[i];
  107. req->acl_desc.acl_entries[i].perms = perms[i];
  108. }
  109. ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &req->hdl);
  110. if (ret) {
  111. pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
  112. goto free_buf;
  113. }
  114. ret = of_property_read_u32(client_dev->of_node,
  115. "qti,smmu-proxy-cb-id",
  116. &req->cb_id);
  117. if (ret) {
  118. dev_err(client_dev, "%s: Err reading 'qti,smmu-proxy-cb-id' rc: %d\n",
  119. __func__, ret);
  120. goto free_buf;
  121. }
  122. req->hdr.msg_type = SMMU_PROXY_MAP;
  123. req->hdr.msg_size = offsetof(struct smmu_proxy_map_req,
  124. acl_desc.acl_entries[n_acl_entries]);
  125. ret = gh_msgq_send(msgq_hdl, (void *) req, req->hdr.msg_size, 0);
  126. if (ret < 0) {
  127. pr_err("%s: failed to send message rc: %d\n", __func__, ret);
  128. goto free_buf;
  129. }
  130. /*
  131. * No need to validate size - gh_msgq_recv() ensures that sizeof(*resp) <
  132. * GH_MSGQ_MAX_MSG_SIZE_BYTES
  133. */
  134. ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
  135. if (ret < 0) {
  136. pr_err_ratelimited("%s: failed to receive message rc: %d\n", __func__, ret);
  137. goto free_buf;
  138. }
  139. resp = buf;
  140. if (resp->hdr.ret) {
  141. ret = resp->hdr.ret;
  142. pr_err_ratelimited("%s: Map call failed on remote VM, rc: %d\n", __func__,
  143. resp->hdr.ret);
  144. goto free_buf;
  145. }
  146. ret = mem_buf_dma_buf_set_destructor(dmabuf, smmu_proxy_unmap, dmabuf);
  147. if (ret) {
  148. pr_err_ratelimited("%s: Failed to set vmperm destructor, rc: %d\n",
  149. __func__, ret);
  150. goto free_buf;
  151. }
  152. sg_dma_address(proxy_iova->sgl) = resp->iova;
  153. sg_dma_len(proxy_iova->sgl) = resp->mapping_len;
  154. /*
  155. * We set the number of entries to one here, as we only allow the mapping to go
  156. * through on the TVM if the sg_table returned by dma_buf_map_attachment has one
  157. * entry.
  158. */
  159. proxy_iova->nents = 1;
  160. free_buf:
  161. kfree(buf);
  162. out:
  163. mutex_unlock(&sender_mutex);
  164. return ret;
  165. }
  166. void smmu_proxy_unmap_nop(struct device *client_dev, struct sg_table *table,
  167. struct dma_buf *dmabuf)
  168. {
  169. }
  170. static long smmu_proxy_dev_ioctl(struct file *filp, unsigned int cmd,
  171. unsigned long arg)
  172. {
  173. unsigned int dir = _IOC_DIR(cmd);
  174. union smmu_proxy_ioctl_arg ioctl_arg;
  175. int ret;
  176. if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
  177. return -EINVAL;
  178. if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
  179. return -EFAULT;
  180. if (!(dir & _IOC_WRITE))
  181. memset(&ioctl_arg, 0, sizeof(ioctl_arg));
  182. switch (cmd) {
  183. case QTI_SMMU_PROXY_GET_VERSION_IOCTL:
  184. {
  185. struct csf_version *csf_version =
  186. &ioctl_arg.csf_version;
  187. ret = smmu_proxy_get_csf_version(csf_version);
  188. if(ret)
  189. return ret;
  190. break;
  191. }
  192. default:
  193. return -ENOTTY;
  194. }
  195. if (dir & _IOC_READ) {
  196. if (copy_to_user((void __user *)arg, &ioctl_arg,
  197. _IOC_SIZE(cmd)))
  198. return -EFAULT;
  199. }
  200. return 0;
  201. }
  202. static const struct file_operations smmu_proxy_dev_fops = {
  203. .unlocked_ioctl = smmu_proxy_dev_ioctl,
  204. .compat_ioctl = compat_ptr_ioctl,
  205. };
  206. static int sender_probe_handler(struct platform_device *pdev)
  207. {
  208. int ret;
  209. struct csf_version csf_version;
  210. msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_SMMU_PROXY);
  211. if (IS_ERR(msgq_hdl)) {
  212. ret = PTR_ERR(msgq_hdl);
  213. pr_err("%s: Queue registration failed rc: %ld!\n", __func__, PTR_ERR(msgq_hdl));
  214. return ret;
  215. }
  216. ret = smmu_proxy_get_csf_version(&csf_version);
  217. if (ret) {
  218. pr_err("%s: Failed to get CSF version rc: %d\n", __func__, ret);
  219. goto free_msgq;
  220. }
  221. if (csf_version.arch_ver == 2 && csf_version.max_ver == 0) {
  222. ret = qti_smmu_proxy_register_callbacks(NULL, NULL);
  223. } else if (csf_version.arch_ver == 2 && csf_version.max_ver == 5) {
  224. ret = qti_smmu_proxy_register_callbacks(smmu_proxy_map, smmu_proxy_unmap_nop);
  225. } else {
  226. pr_err("%s: Invalid CSF version: %d.%d\n", __func__, csf_version.arch_ver,
  227. csf_version.max_ver);
  228. goto free_msgq;
  229. }
  230. if (ret) {
  231. pr_err("%s: Failed to set SMMU proxy callbacks rc: %d\n", __func__, ret);
  232. goto free_msgq;
  233. }
  234. ret = smmu_proxy_create_dev(&smmu_proxy_dev_fops);
  235. if (ret) {
  236. pr_err("%s: Failed to create character device rc: %d\n", __func__,
  237. ret);
  238. goto set_callbacks_null;
  239. }
  240. return 0;
  241. set_callbacks_null:
  242. qti_smmu_proxy_register_callbacks(NULL, NULL);
  243. free_msgq:
  244. gh_msgq_unregister(msgq_hdl);
  245. return ret;
  246. }
  247. static const struct of_device_id smmu_proxy_match_table[] = {
  248. {.compatible = "smmu-proxy-sender"},
  249. {},
  250. };
  251. static struct platform_driver smmu_proxy_driver = {
  252. .probe = sender_probe_handler,
  253. .driver = {
  254. .name = "qti-smmu-proxy",
  255. .of_match_table = smmu_proxy_match_table,
  256. },
  257. };
  258. int __init init_smmu_proxy_driver(void)
  259. {
  260. return platform_driver_register(&smmu_proxy_driver);
  261. }
  262. module_init(init_smmu_proxy_driver);
  263. MODULE_IMPORT_NS(DMA_BUF);
  264. MODULE_LICENSE("GPL v2");