udmabuf.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/cred.h>
  3. #include <linux/device.h>
  4. #include <linux/dma-buf.h>
  5. #include <linux/highmem.h>
  6. #include <linux/init.h>
  7. #include <linux/kernel.h>
  8. #include <linux/memfd.h>
  9. #include <linux/miscdevice.h>
  10. #include <linux/module.h>
  11. #include <linux/shmem_fs.h>
  12. #include <linux/slab.h>
  13. #include <linux/udmabuf.h>
  14. #include <linux/hugetlb.h>
  15. static int list_limit = 1024;
  16. module_param(list_limit, int, 0644);
  17. MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
  18. static int size_limit_mb = 64;
  19. module_param(size_limit_mb, int, 0644);
  20. MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
  21. struct udmabuf {
  22. pgoff_t pagecount;
  23. struct page **pages;
  24. struct sg_table *sg;
  25. struct miscdevice *device;
  26. };
  27. static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
  28. {
  29. struct vm_area_struct *vma = vmf->vma;
  30. struct udmabuf *ubuf = vma->vm_private_data;
  31. pgoff_t pgoff = vmf->pgoff;
  32. if (pgoff >= ubuf->pagecount)
  33. return VM_FAULT_SIGBUS;
  34. vmf->page = ubuf->pages[pgoff];
  35. get_page(vmf->page);
  36. return 0;
  37. }
  38. static const struct vm_operations_struct udmabuf_vm_ops = {
  39. .fault = udmabuf_vm_fault,
  40. };
  41. static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
  42. {
  43. struct udmabuf *ubuf = buf->priv;
  44. if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
  45. return -EINVAL;
  46. vma->vm_ops = &udmabuf_vm_ops;
  47. vma->vm_private_data = ubuf;
  48. return 0;
  49. }
  50. static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
  51. enum dma_data_direction direction)
  52. {
  53. struct udmabuf *ubuf = buf->priv;
  54. struct sg_table *sg;
  55. int ret;
  56. sg = kzalloc(sizeof(*sg), GFP_KERNEL);
  57. if (!sg)
  58. return ERR_PTR(-ENOMEM);
  59. ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
  60. 0, ubuf->pagecount << PAGE_SHIFT,
  61. GFP_KERNEL);
  62. if (ret < 0)
  63. goto err;
  64. ret = dma_map_sgtable(dev, sg, direction, 0);
  65. if (ret < 0)
  66. goto err;
  67. return sg;
  68. err:
  69. sg_free_table(sg);
  70. kfree(sg);
  71. return ERR_PTR(ret);
  72. }
  73. static void put_sg_table(struct device *dev, struct sg_table *sg,
  74. enum dma_data_direction direction)
  75. {
  76. dma_unmap_sgtable(dev, sg, direction, 0);
  77. sg_free_table(sg);
  78. kfree(sg);
  79. }
  80. static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
  81. enum dma_data_direction direction)
  82. {
  83. return get_sg_table(at->dev, at->dmabuf, direction);
  84. }
  85. static void unmap_udmabuf(struct dma_buf_attachment *at,
  86. struct sg_table *sg,
  87. enum dma_data_direction direction)
  88. {
  89. return put_sg_table(at->dev, sg, direction);
  90. }
  91. static void release_udmabuf(struct dma_buf *buf)
  92. {
  93. struct udmabuf *ubuf = buf->priv;
  94. struct device *dev = ubuf->device->this_device;
  95. pgoff_t pg;
  96. if (ubuf->sg)
  97. put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
  98. for (pg = 0; pg < ubuf->pagecount; pg++)
  99. put_page(ubuf->pages[pg]);
  100. kfree(ubuf->pages);
  101. kfree(ubuf);
  102. }
  103. static int begin_cpu_udmabuf(struct dma_buf *buf,
  104. enum dma_data_direction direction)
  105. {
  106. struct udmabuf *ubuf = buf->priv;
  107. struct device *dev = ubuf->device->this_device;
  108. int ret = 0;
  109. if (!ubuf->sg) {
  110. ubuf->sg = get_sg_table(dev, buf, direction);
  111. if (IS_ERR(ubuf->sg)) {
  112. ret = PTR_ERR(ubuf->sg);
  113. ubuf->sg = NULL;
  114. }
  115. } else {
  116. dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
  117. direction);
  118. }
  119. return ret;
  120. }
  121. static int end_cpu_udmabuf(struct dma_buf *buf,
  122. enum dma_data_direction direction)
  123. {
  124. struct udmabuf *ubuf = buf->priv;
  125. struct device *dev = ubuf->device->this_device;
  126. if (!ubuf->sg)
  127. return -EINVAL;
  128. dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
  129. return 0;
  130. }
  131. static const struct dma_buf_ops udmabuf_ops = {
  132. .cache_sgt_mapping = true,
  133. .map_dma_buf = map_udmabuf,
  134. .unmap_dma_buf = unmap_udmabuf,
  135. .release = release_udmabuf,
  136. .mmap = mmap_udmabuf,
  137. .begin_cpu_access = begin_cpu_udmabuf,
  138. .end_cpu_access = end_cpu_udmabuf,
  139. };
  140. #define SEALS_WANTED (F_SEAL_SHRINK)
  141. #define SEALS_DENIED (F_SEAL_WRITE)
  142. static long udmabuf_create(struct miscdevice *device,
  143. struct udmabuf_create_list *head,
  144. struct udmabuf_create_item *list)
  145. {
  146. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  147. struct file *memfd = NULL;
  148. struct address_space *mapping = NULL;
  149. struct udmabuf *ubuf;
  150. struct dma_buf *buf;
  151. pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
  152. struct page *page, *hpage = NULL;
  153. pgoff_t subpgoff, maxsubpgs;
  154. struct hstate *hpstate;
  155. int seals, ret = -EINVAL;
  156. u32 i, flags;
  157. ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
  158. if (!ubuf)
  159. return -ENOMEM;
  160. pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
  161. for (i = 0; i < head->count; i++) {
  162. if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
  163. goto err;
  164. if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
  165. goto err;
  166. ubuf->pagecount += list[i].size >> PAGE_SHIFT;
  167. if (ubuf->pagecount > pglimit)
  168. goto err;
  169. }
  170. if (!ubuf->pagecount)
  171. goto err;
  172. ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
  173. GFP_KERNEL);
  174. if (!ubuf->pages) {
  175. ret = -ENOMEM;
  176. goto err;
  177. }
  178. pgbuf = 0;
  179. for (i = 0; i < head->count; i++) {
  180. ret = -EBADFD;
  181. memfd = fget(list[i].memfd);
  182. if (!memfd)
  183. goto err;
  184. mapping = memfd->f_mapping;
  185. if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
  186. goto err;
  187. seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
  188. if (seals == -EINVAL)
  189. goto err;
  190. ret = -EINVAL;
  191. if ((seals & SEALS_WANTED) != SEALS_WANTED ||
  192. (seals & SEALS_DENIED) != 0)
  193. goto err;
  194. pgoff = list[i].offset >> PAGE_SHIFT;
  195. pgcnt = list[i].size >> PAGE_SHIFT;
  196. if (is_file_hugepages(memfd)) {
  197. hpstate = hstate_file(memfd);
  198. pgoff = list[i].offset >> huge_page_shift(hpstate);
  199. subpgoff = (list[i].offset &
  200. ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
  201. maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
  202. }
  203. for (pgidx = 0; pgidx < pgcnt; pgidx++) {
  204. if (is_file_hugepages(memfd)) {
  205. if (!hpage) {
  206. hpage = find_get_page_flags(mapping, pgoff,
  207. FGP_ACCESSED);
  208. if (!hpage) {
  209. ret = -EINVAL;
  210. goto err;
  211. }
  212. }
  213. page = hpage + subpgoff;
  214. get_page(page);
  215. subpgoff++;
  216. if (subpgoff == maxsubpgs) {
  217. put_page(hpage);
  218. hpage = NULL;
  219. subpgoff = 0;
  220. pgoff++;
  221. }
  222. } else {
  223. page = shmem_read_mapping_page(mapping,
  224. pgoff + pgidx);
  225. if (IS_ERR(page)) {
  226. ret = PTR_ERR(page);
  227. goto err;
  228. }
  229. }
  230. ubuf->pages[pgbuf++] = page;
  231. }
  232. fput(memfd);
  233. memfd = NULL;
  234. if (hpage) {
  235. put_page(hpage);
  236. hpage = NULL;
  237. }
  238. }
  239. exp_info.ops = &udmabuf_ops;
  240. exp_info.size = ubuf->pagecount << PAGE_SHIFT;
  241. exp_info.priv = ubuf;
  242. exp_info.flags = O_RDWR;
  243. ubuf->device = device;
  244. buf = dma_buf_export(&exp_info);
  245. if (IS_ERR(buf)) {
  246. ret = PTR_ERR(buf);
  247. goto err;
  248. }
  249. flags = 0;
  250. if (head->flags & UDMABUF_FLAGS_CLOEXEC)
  251. flags |= O_CLOEXEC;
  252. return dma_buf_fd(buf, flags);
  253. err:
  254. while (pgbuf > 0)
  255. put_page(ubuf->pages[--pgbuf]);
  256. if (memfd)
  257. fput(memfd);
  258. kfree(ubuf->pages);
  259. kfree(ubuf);
  260. return ret;
  261. }
  262. static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
  263. {
  264. struct udmabuf_create create;
  265. struct udmabuf_create_list head;
  266. struct udmabuf_create_item list;
  267. if (copy_from_user(&create, (void __user *)arg,
  268. sizeof(create)))
  269. return -EFAULT;
  270. head.flags = create.flags;
  271. head.count = 1;
  272. list.memfd = create.memfd;
  273. list.offset = create.offset;
  274. list.size = create.size;
  275. return udmabuf_create(filp->private_data, &head, &list);
  276. }
  277. static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
  278. {
  279. struct udmabuf_create_list head;
  280. struct udmabuf_create_item *list;
  281. int ret = -EINVAL;
  282. u32 lsize;
  283. if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
  284. return -EFAULT;
  285. if (head.count > list_limit)
  286. return -EINVAL;
  287. lsize = sizeof(struct udmabuf_create_item) * head.count;
  288. list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
  289. if (IS_ERR(list))
  290. return PTR_ERR(list);
  291. ret = udmabuf_create(filp->private_data, &head, list);
  292. kfree(list);
  293. return ret;
  294. }
  295. static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
  296. unsigned long arg)
  297. {
  298. long ret;
  299. switch (ioctl) {
  300. case UDMABUF_CREATE:
  301. ret = udmabuf_ioctl_create(filp, arg);
  302. break;
  303. case UDMABUF_CREATE_LIST:
  304. ret = udmabuf_ioctl_create_list(filp, arg);
  305. break;
  306. default:
  307. ret = -ENOTTY;
  308. break;
  309. }
  310. return ret;
  311. }
  312. static const struct file_operations udmabuf_fops = {
  313. .owner = THIS_MODULE,
  314. .unlocked_ioctl = udmabuf_ioctl,
  315. #ifdef CONFIG_COMPAT
  316. .compat_ioctl = udmabuf_ioctl,
  317. #endif
  318. };
  319. static struct miscdevice udmabuf_misc = {
  320. .minor = MISC_DYNAMIC_MINOR,
  321. .name = "udmabuf",
  322. .fops = &udmabuf_fops,
  323. };
  324. static int __init udmabuf_dev_init(void)
  325. {
  326. int ret;
  327. ret = misc_register(&udmabuf_misc);
  328. if (ret < 0) {
  329. pr_err("Could not initialize udmabuf device\n");
  330. return ret;
  331. }
  332. ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
  333. DMA_BIT_MASK(64));
  334. if (ret < 0) {
  335. pr_err("Could not setup DMA mask for udmabuf device\n");
  336. misc_deregister(&udmabuf_misc);
  337. return ret;
  338. }
  339. return 0;
  340. }
  341. static void __exit udmabuf_dev_exit(void)
  342. {
  343. misc_deregister(&udmabuf_misc);
  344. }
  345. module_init(udmabuf_dev_init)
  346. module_exit(udmabuf_dev_exit)
  347. MODULE_AUTHOR("Gerd Hoffmann <[email protected]>");
  348. MODULE_LICENSE("GPL v2");