mem-buf.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/cdev.h>
  7. #include <linux/file.h>
  8. #include <linux/fs.h>
  9. #include <linux/idr.h>
  10. #include <linux/kernel.h>
  11. #include <linux/kthread.h>
  12. #include <linux/mem-buf.h>
  13. #include <linux/module.h>
  14. #include <linux/of.h>
  15. #include "mem-buf-gh.h"
  16. #include "mem-buf-ids.h"
  17. #define MEM_BUF_MAX_DEVS 1
  18. static dev_t mem_buf_dev_no;
  19. static struct class *mem_buf_class;
  20. static struct cdev mem_buf_char_dev;
  21. union mem_buf_ioctl_arg {
  22. struct mem_buf_alloc_ioctl_arg allocation;
  23. struct mem_buf_lend_ioctl_arg lend;
  24. struct mem_buf_retrieve_ioctl_arg retrieve;
  25. struct mem_buf_reclaim_ioctl_arg reclaim;
  26. struct mem_buf_share_ioctl_arg share;
  27. struct mem_buf_exclusive_owner_ioctl_arg get_ownership;
  28. struct mem_buf_get_memparcel_hdl_ioctl_arg get_memparcel_hdl;
  29. };
  30. static bool is_valid_mem_buf_perms(u32 mem_buf_perms)
  31. {
  32. if (mem_buf_perms & ~MEM_BUF_PERM_VALID_FLAGS) {
  33. pr_err_ratelimited("%s: Invalid mem-buf permissions detected\n",
  34. __func__);
  35. return false;
  36. }
  37. return true;
  38. }
  39. static int mem_buf_perms_to_perms(u32 mem_buf_perms)
  40. {
  41. int perms = 0;
  42. if (!is_valid_mem_buf_perms(mem_buf_perms))
  43. return -EINVAL;
  44. if (mem_buf_perms & MEM_BUF_PERM_FLAG_READ)
  45. perms |= PERM_READ;
  46. if (mem_buf_perms & MEM_BUF_PERM_FLAG_WRITE)
  47. perms |= PERM_WRITE;
  48. if (mem_buf_perms & MEM_BUF_PERM_FLAG_EXEC)
  49. perms |= PERM_EXEC;
  50. return perms;
  51. }
  52. int mem_buf_acl_to_vmid_perms_list(unsigned int nr_acl_entries, const void __user *acl_entries,
  53. int **dst_vmids, int **dst_perms)
  54. {
  55. int ret, i, *vmids, *perms;
  56. struct acl_entry entry;
  57. if (!nr_acl_entries || !acl_entries)
  58. return -EINVAL;
  59. vmids = kmalloc_array(nr_acl_entries, sizeof(*vmids), GFP_KERNEL);
  60. if (!vmids)
  61. return -ENOMEM;
  62. perms = kmalloc_array(nr_acl_entries, sizeof(*perms), GFP_KERNEL);
  63. if (!perms) {
  64. kfree(vmids);
  65. return -ENOMEM;
  66. }
  67. for (i = 0; i < nr_acl_entries; i++) {
  68. ret = copy_struct_from_user(&entry, sizeof(entry),
  69. acl_entries + (sizeof(entry) * i),
  70. sizeof(entry));
  71. if (ret < 0)
  72. goto out;
  73. vmids[i] = mem_buf_fd_to_vmid(entry.vmid);
  74. perms[i] = mem_buf_perms_to_perms(entry.perms);
  75. if (vmids[i] < 0 || perms[i] < 0) {
  76. ret = -EINVAL;
  77. goto out;
  78. }
  79. }
  80. *dst_vmids = vmids;
  81. *dst_perms = perms;
  82. return ret;
  83. out:
  84. kfree(perms);
  85. kfree(vmids);
  86. return ret;
  87. }
  88. static int mem_buf_lend_user(struct mem_buf_lend_ioctl_arg *uarg, bool is_lend)
  89. {
  90. int *vmids, *perms;
  91. int ret;
  92. struct dma_buf *dmabuf;
  93. struct mem_buf_lend_kernel_arg karg = {0};
  94. if (!uarg->nr_acl_entries || !uarg->acl_list ||
  95. uarg->nr_acl_entries > MEM_BUF_MAX_NR_ACL_ENTS ||
  96. uarg->reserved0 || uarg->reserved1 || uarg->reserved2)
  97. return -EINVAL;
  98. dmabuf = dma_buf_get(uarg->dma_buf_fd);
  99. if (IS_ERR(dmabuf))
  100. return PTR_ERR(dmabuf);
  101. ret = mem_buf_acl_to_vmid_perms_list(uarg->nr_acl_entries,
  102. (void *)uarg->acl_list, &vmids, &perms);
  103. if (ret)
  104. goto err_acl;
  105. karg.nr_acl_entries = uarg->nr_acl_entries;
  106. karg.vmids = vmids;
  107. karg.perms = perms;
  108. if (is_lend) {
  109. ret = mem_buf_lend(dmabuf, &karg);
  110. if (ret)
  111. goto err_lend;
  112. } else {
  113. ret = mem_buf_share(dmabuf, &karg);
  114. if (ret)
  115. goto err_lend;
  116. }
  117. uarg->memparcel_hdl = karg.memparcel_hdl;
  118. err_lend:
  119. kfree(perms);
  120. kfree(vmids);
  121. err_acl:
  122. dma_buf_put(dmabuf);
  123. return ret;
  124. }
  125. static int mem_buf_reclaim_user(struct mem_buf_reclaim_ioctl_arg *uarg)
  126. {
  127. struct dma_buf *dmabuf;
  128. int ret;
  129. if (uarg->reserved0 || uarg->reserved1 || uarg->reserved2)
  130. return -EINVAL;
  131. dmabuf = dma_buf_get(uarg->dma_buf_fd);
  132. if (IS_ERR(dmabuf))
  133. return PTR_ERR(dmabuf);
  134. ret = mem_buf_reclaim(dmabuf);
  135. dma_buf_put(dmabuf);
  136. return ret;
  137. }
  138. static int mem_buf_get_exclusive_ownership(struct mem_buf_exclusive_owner_ioctl_arg *uarg)
  139. {
  140. struct dma_buf *dmabuf;
  141. int ret = 0;
  142. dmabuf = dma_buf_get(uarg->dma_buf_fd);
  143. if (IS_ERR(dmabuf))
  144. return PTR_ERR(dmabuf);
  145. if (IS_ERR(to_mem_buf_vmperm(dmabuf))) {
  146. ret = -EINVAL;
  147. goto put_dma_buf;
  148. }
  149. uarg->is_exclusive_owner = mem_buf_dma_buf_exclusive_owner(dmabuf);
  150. put_dma_buf:
  151. dma_buf_put(dmabuf);
  152. return ret;
  153. }
  154. static int mem_buf_get_memparcel_hdl(struct mem_buf_get_memparcel_hdl_ioctl_arg *uarg)
  155. {
  156. struct dma_buf *dmabuf;
  157. int ret = 0;
  158. gh_memparcel_handle_t memparcel_hdl;
  159. dmabuf = dma_buf_get(uarg->dma_buf_fd);
  160. if (IS_ERR(dmabuf))
  161. return PTR_ERR(dmabuf);
  162. ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &memparcel_hdl);
  163. if (ret) {
  164. ret = -EINVAL;
  165. goto put_dma_buf;
  166. }
  167. uarg->memparcel_hdl = memparcel_hdl;
  168. put_dma_buf:
  169. dma_buf_put(dmabuf);
  170. return ret;
  171. }
  172. static long mem_buf_dev_ioctl(struct file *filp, unsigned int cmd,
  173. unsigned long arg)
  174. {
  175. int fd;
  176. unsigned int dir = _IOC_DIR(cmd);
  177. union mem_buf_ioctl_arg ioctl_arg;
  178. if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
  179. return -EINVAL;
  180. if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
  181. return -EFAULT;
  182. if (!(dir & _IOC_WRITE))
  183. memset(&ioctl_arg, 0, sizeof(ioctl_arg));
  184. switch (cmd) {
  185. case MEM_BUF_IOC_ALLOC:
  186. {
  187. struct mem_buf_alloc_ioctl_arg *allocation =
  188. &ioctl_arg.allocation;
  189. if (!(mem_buf_capability & MEM_BUF_CAP_CONSUMER))
  190. return -EOPNOTSUPP;
  191. fd = mem_buf_alloc_fd(allocation);
  192. if (fd < 0)
  193. return fd;
  194. allocation->mem_buf_fd = fd;
  195. break;
  196. }
  197. case MEM_BUF_IOC_LEND:
  198. {
  199. struct mem_buf_lend_ioctl_arg *lend = &ioctl_arg.lend;
  200. int ret;
  201. ret = mem_buf_lend_user(lend, true);
  202. if (ret)
  203. return ret;
  204. break;
  205. }
  206. case MEM_BUF_IOC_RETRIEVE:
  207. {
  208. struct mem_buf_retrieve_ioctl_arg *retrieve =
  209. &ioctl_arg.retrieve;
  210. int ret;
  211. ret = mem_buf_retrieve_user(retrieve);
  212. if (ret)
  213. return ret;
  214. break;
  215. }
  216. case MEM_BUF_IOC_RECLAIM:
  217. {
  218. struct mem_buf_reclaim_ioctl_arg *reclaim =
  219. &ioctl_arg.reclaim;
  220. int ret;
  221. ret = mem_buf_reclaim_user(reclaim);
  222. if (ret)
  223. return ret;
  224. break;
  225. }
  226. case MEM_BUF_IOC_SHARE:
  227. {
  228. struct mem_buf_share_ioctl_arg *share = &ioctl_arg.share;
  229. int ret;
  230. /* The two formats are currently identical */
  231. ret = mem_buf_lend_user((struct mem_buf_lend_ioctl_arg *)share,
  232. false);
  233. if (ret)
  234. return ret;
  235. break;
  236. }
  237. case MEM_BUF_IOC_EXCLUSIVE_OWNER:
  238. {
  239. struct mem_buf_exclusive_owner_ioctl_arg *get_ownership = &ioctl_arg.get_ownership;
  240. int ret;
  241. ret = mem_buf_get_exclusive_ownership(get_ownership);
  242. if (ret)
  243. return ret;
  244. break;
  245. }
  246. case MEM_BUF_IOC_GET_MEMPARCEL_HDL:
  247. {
  248. struct mem_buf_get_memparcel_hdl_ioctl_arg *get_memparcel_hdl;
  249. int ret;
  250. get_memparcel_hdl = &ioctl_arg.get_memparcel_hdl;
  251. ret = mem_buf_get_memparcel_hdl(get_memparcel_hdl);
  252. if (ret)
  253. return ret;
  254. break;
  255. }
  256. default:
  257. return -ENOTTY;
  258. }
  259. if (dir & _IOC_READ) {
  260. if (copy_to_user((void __user *)arg, &ioctl_arg,
  261. _IOC_SIZE(cmd)))
  262. return -EFAULT;
  263. }
  264. return 0;
  265. }
  266. static const struct file_operations mem_buf_dev_fops = {
  267. .unlocked_ioctl = mem_buf_dev_ioctl,
  268. .compat_ioctl = compat_ptr_ioctl,
  269. };
  270. static int mem_buf_msgq_probe(struct platform_device *pdev)
  271. {
  272. int ret;
  273. struct device *dev = &pdev->dev;
  274. struct device *class_dev;
  275. if (!mem_buf_dev)
  276. return -EPROBE_DEFER;
  277. ret = mem_buf_msgq_alloc(dev);
  278. if (ret)
  279. return ret;
  280. cdev_init(&mem_buf_char_dev, &mem_buf_dev_fops);
  281. ret = cdev_add(&mem_buf_char_dev, mem_buf_dev_no, MEM_BUF_MAX_DEVS);
  282. if (ret < 0)
  283. goto err_cdev_add;
  284. class_dev = device_create(mem_buf_class, NULL, mem_buf_dev_no, NULL,
  285. "membuf");
  286. if (IS_ERR(class_dev)) {
  287. ret = PTR_ERR(class_dev);
  288. goto err_dev_create;
  289. }
  290. return 0;
  291. err_dev_create:
  292. cdev_del(&mem_buf_char_dev);
  293. err_cdev_add:
  294. mem_buf_msgq_free(dev);
  295. return ret;
  296. }
  297. static int mem_buf_msgq_remove(struct platform_device *pdev)
  298. {
  299. device_destroy(mem_buf_class, mem_buf_dev_no);
  300. cdev_del(&mem_buf_char_dev);
  301. mem_buf_msgq_free(&pdev->dev);
  302. return 0;
  303. }
  304. static const struct of_device_id mem_buf_msgq_match_tbl[] = {
  305. {.compatible = "qcom,mem-buf-msgq"},
  306. {},
  307. };
  308. static struct platform_driver mem_buf_msgq_driver = {
  309. .probe = mem_buf_msgq_probe,
  310. .remove = mem_buf_msgq_remove,
  311. .driver = {
  312. .name = "mem-buf-msgq",
  313. .of_match_table = of_match_ptr(mem_buf_msgq_match_tbl),
  314. },
  315. };
  316. static int __init mem_buf_init(void)
  317. {
  318. int ret;
  319. ret = alloc_chrdev_region(&mem_buf_dev_no, 0, MEM_BUF_MAX_DEVS,
  320. "membuf");
  321. if (ret < 0)
  322. goto err_chrdev_region;
  323. mem_buf_class = class_create(THIS_MODULE, "membuf");
  324. if (IS_ERR(mem_buf_class)) {
  325. ret = PTR_ERR(mem_buf_class);
  326. goto err_class_create;
  327. }
  328. ret = platform_driver_register(&mem_buf_msgq_driver);
  329. if (ret < 0)
  330. goto err_platform_drvr_register;
  331. return 0;
  332. err_platform_drvr_register:
  333. class_destroy(mem_buf_class);
  334. err_class_create:
  335. unregister_chrdev_region(mem_buf_dev_no, MEM_BUF_MAX_DEVS);
  336. err_chrdev_region:
  337. return ret;
  338. }
  339. module_init(mem_buf_init);
  340. static void __exit mem_buf_exit(void)
  341. {
  342. platform_driver_unregister(&mem_buf_msgq_driver);
  343. class_destroy(mem_buf_class);
  344. unregister_chrdev_region(mem_buf_dev_no, MEM_BUF_MAX_DEVS);
  345. }
  346. module_exit(mem_buf_exit);
  347. MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Memory Buffer Sharing driver");
  348. MODULE_LICENSE("GPL");
  349. MODULE_IMPORT_NS(DMA_BUF);