videobuf2-dma-contig.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. /*
  2. * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <[email protected]>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/dma-buf.h>
  13. #include <linux/module.h>
  14. #include <linux/refcount.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/highmem.h>
  20. #include <media/videobuf2-v4l2.h>
  21. #include <media/videobuf2-dma-contig.h>
  22. #include <media/videobuf2-memops.h>
  23. struct vb2_dc_buf {
  24. struct device *dev;
  25. void *vaddr;
  26. unsigned long size;
  27. void *cookie;
  28. dma_addr_t dma_addr;
  29. unsigned long attrs;
  30. enum dma_data_direction dma_dir;
  31. struct sg_table *dma_sgt;
  32. struct frame_vector *vec;
  33. /* MMAP related */
  34. struct vb2_vmarea_handler handler;
  35. refcount_t refcount;
  36. struct sg_table *sgt_base;
  37. /* DMABUF related */
  38. struct dma_buf_attachment *db_attach;
  39. struct vb2_buffer *vb;
  40. bool non_coherent_mem;
  41. };
  42. /*********************************************/
  43. /* scatterlist table functions */
  44. /*********************************************/
  45. static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
  46. {
  47. struct scatterlist *s;
  48. dma_addr_t expected = sg_dma_address(sgt->sgl);
  49. unsigned int i;
  50. unsigned long size = 0;
  51. for_each_sgtable_dma_sg(sgt, s, i) {
  52. if (sg_dma_address(s) != expected)
  53. break;
  54. expected += sg_dma_len(s);
  55. size += sg_dma_len(s);
  56. }
  57. return size;
  58. }
  59. /*********************************************/
  60. /* callbacks for all buffers */
  61. /*********************************************/
  62. static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
  63. {
  64. struct vb2_dc_buf *buf = buf_priv;
  65. return &buf->dma_addr;
  66. }
  67. /*
  68. * This function may fail if:
  69. *
  70. * - dma_buf_vmap() fails
  71. * E.g. due to lack of virtual mapping address space, or due to
  72. * dmabuf->ops misconfiguration.
  73. *
  74. * - dma_vmap_noncontiguous() fails
  75. * For instance, when requested buffer size is larger than totalram_pages().
  76. * Relevant for buffers that use non-coherent memory.
  77. *
  78. * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
  79. * Relevant for buffers that use coherent memory.
  80. */
  81. static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
  82. {
  83. struct vb2_dc_buf *buf = buf_priv;
  84. if (buf->vaddr)
  85. return buf->vaddr;
  86. if (buf->db_attach) {
  87. struct iosys_map map;
  88. if (!dma_buf_vmap(buf->db_attach->dmabuf, &map))
  89. buf->vaddr = map.vaddr;
  90. return buf->vaddr;
  91. }
  92. if (buf->non_coherent_mem)
  93. buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
  94. buf->dma_sgt);
  95. return buf->vaddr;
  96. }
  97. static unsigned int vb2_dc_num_users(void *buf_priv)
  98. {
  99. struct vb2_dc_buf *buf = buf_priv;
  100. return refcount_read(&buf->refcount);
  101. }
  102. static void vb2_dc_prepare(void *buf_priv)
  103. {
  104. struct vb2_dc_buf *buf = buf_priv;
  105. struct sg_table *sgt = buf->dma_sgt;
  106. /* This takes care of DMABUF and user-enforced cache sync hint */
  107. if (buf->vb->skip_cache_sync_on_prepare)
  108. return;
  109. if (!buf->non_coherent_mem)
  110. return;
  111. /* Non-coherent MMAP only */
  112. if (buf->vaddr)
  113. flush_kernel_vmap_range(buf->vaddr, buf->size);
  114. /* For both USERPTR and non-coherent MMAP */
  115. dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
  116. }
  117. static void vb2_dc_finish(void *buf_priv)
  118. {
  119. struct vb2_dc_buf *buf = buf_priv;
  120. struct sg_table *sgt = buf->dma_sgt;
  121. /* This takes care of DMABUF and user-enforced cache sync hint */
  122. if (buf->vb->skip_cache_sync_on_finish)
  123. return;
  124. if (!buf->non_coherent_mem)
  125. return;
  126. /* Non-coherent MMAP only */
  127. if (buf->vaddr)
  128. invalidate_kernel_vmap_range(buf->vaddr, buf->size);
  129. /* For both USERPTR and non-coherent MMAP */
  130. dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
  131. }
  132. /*********************************************/
  133. /* callbacks for MMAP buffers */
  134. /*********************************************/
  135. static void vb2_dc_put(void *buf_priv)
  136. {
  137. struct vb2_dc_buf *buf = buf_priv;
  138. if (!refcount_dec_and_test(&buf->refcount))
  139. return;
  140. if (buf->non_coherent_mem) {
  141. if (buf->vaddr)
  142. dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
  143. dma_free_noncontiguous(buf->dev, buf->size,
  144. buf->dma_sgt, buf->dma_dir);
  145. } else {
  146. if (buf->sgt_base) {
  147. sg_free_table(buf->sgt_base);
  148. kfree(buf->sgt_base);
  149. }
  150. dma_free_attrs(buf->dev, buf->size, buf->cookie,
  151. buf->dma_addr, buf->attrs);
  152. }
  153. put_device(buf->dev);
  154. kfree(buf);
  155. }
  156. static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
  157. {
  158. struct vb2_queue *q = buf->vb->vb2_queue;
  159. buf->cookie = dma_alloc_attrs(buf->dev,
  160. buf->size,
  161. &buf->dma_addr,
  162. GFP_KERNEL | q->gfp_flags,
  163. buf->attrs);
  164. if (!buf->cookie)
  165. return -ENOMEM;
  166. if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
  167. return 0;
  168. buf->vaddr = buf->cookie;
  169. return 0;
  170. }
  171. static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
  172. {
  173. struct vb2_queue *q = buf->vb->vb2_queue;
  174. buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
  175. buf->size,
  176. buf->dma_dir,
  177. GFP_KERNEL | q->gfp_flags,
  178. buf->attrs);
  179. if (!buf->dma_sgt)
  180. return -ENOMEM;
  181. buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
  182. /*
  183. * For non-coherent buffers the kernel mapping is created on demand
  184. * in vb2_dc_vaddr().
  185. */
  186. return 0;
  187. }
  188. static void *vb2_dc_alloc(struct vb2_buffer *vb,
  189. struct device *dev,
  190. unsigned long size)
  191. {
  192. struct vb2_dc_buf *buf;
  193. int ret;
  194. if (WARN_ON(!dev))
  195. return ERR_PTR(-EINVAL);
  196. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  197. if (!buf)
  198. return ERR_PTR(-ENOMEM);
  199. buf->attrs = vb->vb2_queue->dma_attrs;
  200. buf->dma_dir = vb->vb2_queue->dma_dir;
  201. buf->vb = vb;
  202. buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
  203. buf->size = size;
  204. /* Prevent the device from being released while the buffer is used */
  205. buf->dev = get_device(dev);
  206. if (buf->non_coherent_mem)
  207. ret = vb2_dc_alloc_non_coherent(buf);
  208. else
  209. ret = vb2_dc_alloc_coherent(buf);
  210. if (ret) {
  211. dev_err(dev, "dma alloc of size %lu failed\n", size);
  212. kfree(buf);
  213. return ERR_PTR(-ENOMEM);
  214. }
  215. buf->handler.refcount = &buf->refcount;
  216. buf->handler.put = vb2_dc_put;
  217. buf->handler.arg = buf;
  218. refcount_set(&buf->refcount, 1);
  219. return buf;
  220. }
  221. static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
  222. {
  223. struct vb2_dc_buf *buf = buf_priv;
  224. int ret;
  225. if (!buf) {
  226. printk(KERN_ERR "No buffer to map\n");
  227. return -EINVAL;
  228. }
  229. if (buf->non_coherent_mem)
  230. ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
  231. buf->dma_sgt);
  232. else
  233. ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
  234. buf->size, buf->attrs);
  235. if (ret) {
  236. pr_err("Remapping memory failed, error: %d\n", ret);
  237. return ret;
  238. }
  239. vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
  240. vma->vm_private_data = &buf->handler;
  241. vma->vm_ops = &vb2_common_vm_ops;
  242. vma->vm_ops->open(vma);
  243. pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
  244. __func__, (unsigned long)buf->dma_addr, vma->vm_start,
  245. buf->size);
  246. return 0;
  247. }
  248. /*********************************************/
  249. /* DMABUF ops for exporters */
  250. /*********************************************/
  251. struct vb2_dc_attachment {
  252. struct sg_table sgt;
  253. enum dma_data_direction dma_dir;
  254. };
  255. static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
  256. struct dma_buf_attachment *dbuf_attach)
  257. {
  258. struct vb2_dc_attachment *attach;
  259. unsigned int i;
  260. struct scatterlist *rd, *wr;
  261. struct sg_table *sgt;
  262. struct vb2_dc_buf *buf = dbuf->priv;
  263. int ret;
  264. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  265. if (!attach)
  266. return -ENOMEM;
  267. sgt = &attach->sgt;
  268. /* Copy the buf->base_sgt scatter list to the attachment, as we can't
  269. * map the same scatter list to multiple attachments at the same time.
  270. */
  271. ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
  272. if (ret) {
  273. kfree(attach);
  274. return -ENOMEM;
  275. }
  276. rd = buf->sgt_base->sgl;
  277. wr = sgt->sgl;
  278. for (i = 0; i < sgt->orig_nents; ++i) {
  279. sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
  280. rd = sg_next(rd);
  281. wr = sg_next(wr);
  282. }
  283. attach->dma_dir = DMA_NONE;
  284. dbuf_attach->priv = attach;
  285. return 0;
  286. }
  287. static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
  288. struct dma_buf_attachment *db_attach)
  289. {
  290. struct vb2_dc_attachment *attach = db_attach->priv;
  291. struct sg_table *sgt;
  292. if (!attach)
  293. return;
  294. sgt = &attach->sgt;
  295. /* release the scatterlist cache */
  296. if (attach->dma_dir != DMA_NONE)
  297. /*
  298. * Cache sync can be skipped here, as the vb2_dc memory is
  299. * allocated from device coherent memory, which means the
  300. * memory locations do not require any explicit cache
  301. * maintenance prior or after being used by the device.
  302. */
  303. dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
  304. DMA_ATTR_SKIP_CPU_SYNC);
  305. sg_free_table(sgt);
  306. kfree(attach);
  307. db_attach->priv = NULL;
  308. }
  309. static struct sg_table *vb2_dc_dmabuf_ops_map(
  310. struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
  311. {
  312. struct vb2_dc_attachment *attach = db_attach->priv;
  313. /* stealing dmabuf mutex to serialize map/unmap operations */
  314. struct mutex *lock = &db_attach->dmabuf->lock;
  315. struct sg_table *sgt;
  316. mutex_lock(lock);
  317. sgt = &attach->sgt;
  318. /* return previously mapped sg table */
  319. if (attach->dma_dir == dma_dir) {
  320. mutex_unlock(lock);
  321. return sgt;
  322. }
  323. /* release any previous cache */
  324. if (attach->dma_dir != DMA_NONE) {
  325. dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
  326. DMA_ATTR_SKIP_CPU_SYNC);
  327. attach->dma_dir = DMA_NONE;
  328. }
  329. /*
  330. * mapping to the client with new direction, no cache sync
  331. * required see comment in vb2_dc_dmabuf_ops_detach()
  332. */
  333. if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
  334. DMA_ATTR_SKIP_CPU_SYNC)) {
  335. pr_err("failed to map scatterlist\n");
  336. mutex_unlock(lock);
  337. return ERR_PTR(-EIO);
  338. }
  339. attach->dma_dir = dma_dir;
  340. mutex_unlock(lock);
  341. return sgt;
  342. }
  343. static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  344. struct sg_table *sgt, enum dma_data_direction dma_dir)
  345. {
  346. /* nothing to be done here */
  347. }
  348. static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
  349. {
  350. /* drop reference obtained in vb2_dc_get_dmabuf */
  351. vb2_dc_put(dbuf->priv);
  352. }
  353. static int
  354. vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
  355. enum dma_data_direction direction)
  356. {
  357. return 0;
  358. }
  359. static int
  360. vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
  361. enum dma_data_direction direction)
  362. {
  363. return 0;
  364. }
  365. static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
  366. {
  367. struct vb2_dc_buf *buf;
  368. void *vaddr;
  369. buf = dbuf->priv;
  370. vaddr = vb2_dc_vaddr(buf->vb, buf);
  371. if (!vaddr)
  372. return -EINVAL;
  373. iosys_map_set_vaddr(map, vaddr);
  374. return 0;
  375. }
  376. static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  377. struct vm_area_struct *vma)
  378. {
  379. return vb2_dc_mmap(dbuf->priv, vma);
  380. }
  381. static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
  382. .attach = vb2_dc_dmabuf_ops_attach,
  383. .detach = vb2_dc_dmabuf_ops_detach,
  384. .map_dma_buf = vb2_dc_dmabuf_ops_map,
  385. .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
  386. .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
  387. .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
  388. .vmap = vb2_dc_dmabuf_ops_vmap,
  389. .mmap = vb2_dc_dmabuf_ops_mmap,
  390. .release = vb2_dc_dmabuf_ops_release,
  391. };
  392. static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
  393. {
  394. int ret;
  395. struct sg_table *sgt;
  396. if (buf->non_coherent_mem)
  397. return buf->dma_sgt;
  398. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  399. if (!sgt) {
  400. dev_err(buf->dev, "failed to alloc sg table\n");
  401. return NULL;
  402. }
  403. ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
  404. buf->size, buf->attrs);
  405. if (ret < 0) {
  406. dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
  407. kfree(sgt);
  408. return NULL;
  409. }
  410. return sgt;
  411. }
  412. static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
  413. void *buf_priv,
  414. unsigned long flags)
  415. {
  416. struct vb2_dc_buf *buf = buf_priv;
  417. struct dma_buf *dbuf;
  418. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  419. exp_info.ops = &vb2_dc_dmabuf_ops;
  420. exp_info.size = buf->size;
  421. exp_info.flags = flags;
  422. exp_info.priv = buf;
  423. if (!buf->sgt_base)
  424. buf->sgt_base = vb2_dc_get_base_sgt(buf);
  425. if (WARN_ON(!buf->sgt_base))
  426. return NULL;
  427. dbuf = dma_buf_export(&exp_info);
  428. if (IS_ERR(dbuf))
  429. return NULL;
  430. /* dmabuf keeps reference to vb2 buffer */
  431. refcount_inc(&buf->refcount);
  432. return dbuf;
  433. }
  434. /*********************************************/
  435. /* callbacks for USERPTR buffers */
  436. /*********************************************/
  437. static void vb2_dc_put_userptr(void *buf_priv)
  438. {
  439. struct vb2_dc_buf *buf = buf_priv;
  440. struct sg_table *sgt = buf->dma_sgt;
  441. int i;
  442. struct page **pages;
  443. if (sgt) {
  444. /*
  445. * No need to sync to CPU, it's already synced to the CPU
  446. * since the finish() memop will have been called before this.
  447. */
  448. dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
  449. DMA_ATTR_SKIP_CPU_SYNC);
  450. pages = frame_vector_pages(buf->vec);
  451. /* sgt should exist only if vector contains pages... */
  452. BUG_ON(IS_ERR(pages));
  453. if (buf->dma_dir == DMA_FROM_DEVICE ||
  454. buf->dma_dir == DMA_BIDIRECTIONAL)
  455. for (i = 0; i < frame_vector_count(buf->vec); i++)
  456. set_page_dirty_lock(pages[i]);
  457. sg_free_table(sgt);
  458. kfree(sgt);
  459. } else {
  460. dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
  461. buf->dma_dir, 0);
  462. }
  463. vb2_destroy_framevec(buf->vec);
  464. kfree(buf);
  465. }
  466. static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
  467. unsigned long vaddr, unsigned long size)
  468. {
  469. struct vb2_dc_buf *buf;
  470. struct frame_vector *vec;
  471. unsigned int offset;
  472. int n_pages, i;
  473. int ret = 0;
  474. struct sg_table *sgt;
  475. unsigned long contig_size;
  476. unsigned long dma_align = dma_get_cache_alignment();
  477. /* Only cache aligned DMA transfers are reliable */
  478. if (!IS_ALIGNED(vaddr | size, dma_align)) {
  479. pr_debug("user data must be aligned to %lu bytes\n", dma_align);
  480. return ERR_PTR(-EINVAL);
  481. }
  482. if (!size) {
  483. pr_debug("size is zero\n");
  484. return ERR_PTR(-EINVAL);
  485. }
  486. if (WARN_ON(!dev))
  487. return ERR_PTR(-EINVAL);
  488. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  489. if (!buf)
  490. return ERR_PTR(-ENOMEM);
  491. buf->dev = dev;
  492. buf->dma_dir = vb->vb2_queue->dma_dir;
  493. buf->vb = vb;
  494. offset = lower_32_bits(offset_in_page(vaddr));
  495. vec = vb2_create_framevec(vaddr, size);
  496. if (IS_ERR(vec)) {
  497. ret = PTR_ERR(vec);
  498. goto fail_buf;
  499. }
  500. buf->vec = vec;
  501. n_pages = frame_vector_count(vec);
  502. ret = frame_vector_to_pages(vec);
  503. if (ret < 0) {
  504. unsigned long *nums = frame_vector_pfns(vec);
  505. /*
  506. * Failed to convert to pages... Check the memory is physically
  507. * contiguous and use direct mapping
  508. */
  509. for (i = 1; i < n_pages; i++)
  510. if (nums[i-1] + 1 != nums[i])
  511. goto fail_pfnvec;
  512. buf->dma_addr = dma_map_resource(buf->dev,
  513. __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
  514. if (dma_mapping_error(buf->dev, buf->dma_addr)) {
  515. ret = -ENOMEM;
  516. goto fail_pfnvec;
  517. }
  518. goto out;
  519. }
  520. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  521. if (!sgt) {
  522. pr_err("failed to allocate sg table\n");
  523. ret = -ENOMEM;
  524. goto fail_pfnvec;
  525. }
  526. ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
  527. offset, size, GFP_KERNEL);
  528. if (ret) {
  529. pr_err("failed to initialize sg table\n");
  530. goto fail_sgt;
  531. }
  532. /*
  533. * No need to sync to the device, this will happen later when the
  534. * prepare() memop is called.
  535. */
  536. if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
  537. DMA_ATTR_SKIP_CPU_SYNC)) {
  538. pr_err("failed to map scatterlist\n");
  539. ret = -EIO;
  540. goto fail_sgt_init;
  541. }
  542. contig_size = vb2_dc_get_contiguous_size(sgt);
  543. if (contig_size < size) {
  544. pr_err("contiguous mapping is too small %lu/%lu\n",
  545. contig_size, size);
  546. ret = -EFAULT;
  547. goto fail_map_sg;
  548. }
  549. buf->dma_addr = sg_dma_address(sgt->sgl);
  550. buf->dma_sgt = sgt;
  551. buf->non_coherent_mem = 1;
  552. out:
  553. buf->size = size;
  554. return buf;
  555. fail_map_sg:
  556. dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
  557. fail_sgt_init:
  558. sg_free_table(sgt);
  559. fail_sgt:
  560. kfree(sgt);
  561. fail_pfnvec:
  562. vb2_destroy_framevec(vec);
  563. fail_buf:
  564. kfree(buf);
  565. return ERR_PTR(ret);
  566. }
  567. /*********************************************/
  568. /* callbacks for DMABUF buffers */
  569. /*********************************************/
  570. static int vb2_dc_map_dmabuf(void *mem_priv)
  571. {
  572. struct vb2_dc_buf *buf = mem_priv;
  573. struct sg_table *sgt;
  574. unsigned long contig_size;
  575. if (WARN_ON(!buf->db_attach)) {
  576. pr_err("trying to pin a non attached buffer\n");
  577. return -EINVAL;
  578. }
  579. if (WARN_ON(buf->dma_sgt)) {
  580. pr_err("dmabuf buffer is already pinned\n");
  581. return 0;
  582. }
  583. /* get the associated scatterlist for this buffer */
  584. sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
  585. if (IS_ERR(sgt)) {
  586. pr_err("Error getting dmabuf scatterlist\n");
  587. return -EINVAL;
  588. }
  589. /* checking if dmabuf is big enough to store contiguous chunk */
  590. contig_size = vb2_dc_get_contiguous_size(sgt);
  591. if (contig_size < buf->size) {
  592. pr_err("contiguous chunk is too small %lu/%lu\n",
  593. contig_size, buf->size);
  594. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  595. return -EFAULT;
  596. }
  597. buf->dma_addr = sg_dma_address(sgt->sgl);
  598. buf->dma_sgt = sgt;
  599. buf->vaddr = NULL;
  600. return 0;
  601. }
  602. static void vb2_dc_unmap_dmabuf(void *mem_priv)
  603. {
  604. struct vb2_dc_buf *buf = mem_priv;
  605. struct sg_table *sgt = buf->dma_sgt;
  606. struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
  607. if (WARN_ON(!buf->db_attach)) {
  608. pr_err("trying to unpin a not attached buffer\n");
  609. return;
  610. }
  611. if (WARN_ON(!sgt)) {
  612. pr_err("dmabuf buffer is already unpinned\n");
  613. return;
  614. }
  615. if (buf->vaddr) {
  616. dma_buf_vunmap(buf->db_attach->dmabuf, &map);
  617. buf->vaddr = NULL;
  618. }
  619. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  620. buf->dma_addr = 0;
  621. buf->dma_sgt = NULL;
  622. }
  623. static void vb2_dc_detach_dmabuf(void *mem_priv)
  624. {
  625. struct vb2_dc_buf *buf = mem_priv;
  626. /* if vb2 works correctly you should never detach mapped buffer */
  627. if (WARN_ON(buf->dma_addr))
  628. vb2_dc_unmap_dmabuf(buf);
  629. /* detach this attachment */
  630. dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
  631. kfree(buf);
  632. }
  633. static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
  634. struct dma_buf *dbuf, unsigned long size)
  635. {
  636. struct vb2_dc_buf *buf;
  637. struct dma_buf_attachment *dba;
  638. if (dbuf->size < size)
  639. return ERR_PTR(-EFAULT);
  640. if (WARN_ON(!dev))
  641. return ERR_PTR(-EINVAL);
  642. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  643. if (!buf)
  644. return ERR_PTR(-ENOMEM);
  645. buf->dev = dev;
  646. buf->vb = vb;
  647. /* create attachment for the dmabuf with the user device */
  648. dba = dma_buf_attach(dbuf, buf->dev);
  649. if (IS_ERR(dba)) {
  650. pr_err("failed to attach dmabuf\n");
  651. kfree(buf);
  652. return dba;
  653. }
  654. buf->dma_dir = vb->vb2_queue->dma_dir;
  655. buf->size = size;
  656. buf->db_attach = dba;
  657. return buf;
  658. }
  659. /*********************************************/
  660. /* DMA CONTIG exported functions */
  661. /*********************************************/
  662. const struct vb2_mem_ops vb2_dma_contig_memops = {
  663. .alloc = vb2_dc_alloc,
  664. .put = vb2_dc_put,
  665. .get_dmabuf = vb2_dc_get_dmabuf,
  666. .cookie = vb2_dc_cookie,
  667. .vaddr = vb2_dc_vaddr,
  668. .mmap = vb2_dc_mmap,
  669. .get_userptr = vb2_dc_get_userptr,
  670. .put_userptr = vb2_dc_put_userptr,
  671. .prepare = vb2_dc_prepare,
  672. .finish = vb2_dc_finish,
  673. .map_dmabuf = vb2_dc_map_dmabuf,
  674. .unmap_dmabuf = vb2_dc_unmap_dmabuf,
  675. .attach_dmabuf = vb2_dc_attach_dmabuf,
  676. .detach_dmabuf = vb2_dc_detach_dmabuf,
  677. .num_users = vb2_dc_num_users,
  678. };
  679. EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
  680. /**
  681. * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
  682. * @dev: device for configuring DMA parameters
  683. * @size: size of DMA max segment size to set
  684. *
  685. * To allow mapping the scatter-list into a single chunk in the DMA
  686. * address space, the device is required to have the DMA max segment
  687. * size parameter set to a value larger than the buffer size. Otherwise,
  688. * the DMA-mapping subsystem will split the mapping into max segment
  689. * size chunks. This function sets the DMA max segment size
  690. * parameter to let DMA-mapping map a buffer as a single chunk in DMA
  691. * address space.
  692. * This code assumes that the DMA-mapping subsystem will merge all
  693. * scatterlist segments if this is really possible (for example when
  694. * an IOMMU is available and enabled).
  695. * Ideally, this parameter should be set by the generic bus code, but it
  696. * is left with the default 64KiB value due to historical litmiations in
  697. * other subsystems (like limited USB host drivers) and there no good
  698. * place to set it to the proper value.
  699. * This function should be called from the drivers, which are known to
  700. * operate on platforms with IOMMU and provide access to shared buffers
  701. * (either USERPTR or DMABUF). This should be done before initializing
  702. * videobuf2 queue.
  703. */
  704. int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
  705. {
  706. if (!dev->dma_parms) {
  707. dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
  708. return -ENODEV;
  709. }
  710. if (dma_get_max_seg_size(dev) < size)
  711. return dma_set_max_seg_size(dev, size);
  712. return 0;
  713. }
  714. EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
  715. MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
  716. MODULE_AUTHOR("Pawel Osciak <[email protected]>");
  717. MODULE_LICENSE("GPL");
  718. MODULE_IMPORT_NS(DMA_BUF);