dma-buf.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Header file for dma buffer sharing framework.
  4. *
  5. * Copyright(C) 2011 Linaro Limited. All rights reserved.
  6. * Author: Sumit Semwal <[email protected]>
  7. *
  8. * Many thanks to linaro-mm-sig list, and specially
  9. * Arnd Bergmann <[email protected]>, Rob Clark <[email protected]> and
  10. * Daniel Vetter <[email protected]> for their support in creation and
  11. * refining of this idea.
  12. */
  13. #ifndef __DMA_BUF_H__
  14. #define __DMA_BUF_H__
  15. #include <linux/iosys-map.h>
  16. #include <linux/file.h>
  17. #include <linux/err.h>
  18. #include <linux/scatterlist.h>
  19. #include <linux/list.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/fs.h>
  22. #include <linux/dma-fence.h>
  23. #include <linux/wait.h>
  24. #include <linux/android_kabi.h>
  25. struct device;
  26. struct dma_buf;
  27. struct dma_buf_attachment;
  28. /**
  29. * struct dma_buf_ops - operations possible on struct dma_buf
  30. * @vmap: [optional] creates a virtual mapping for the buffer into kernel
  31. * address space. Same restrictions as for vmap and friends apply.
  32. * @vunmap: [optional] unmaps a vmap from the buffer
  33. */
  34. struct dma_buf_ops {
  35. /**
  36. * @cache_sgt_mapping:
  37. *
  38. * If true the framework will cache the first mapping made for each
  39. * attachment. This avoids creating mappings for attachments multiple
  40. * times.
  41. */
  42. bool cache_sgt_mapping;
  43. /**
  44. * @attach:
  45. *
  46. * This is called from dma_buf_attach() to make sure that a given
  47. * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
  48. * which support buffer objects in special locations like VRAM or
  49. * device-specific carveout areas should check whether the buffer could
  50. * be move to system memory (or directly accessed by the provided
  51. * device), and otherwise need to fail the attach operation.
  52. *
  53. * The exporter should also in general check whether the current
  54. * allocation fulfills the DMA constraints of the new device. If this
  55. * is not the case, and the allocation cannot be moved, it should also
  56. * fail the attach operation.
  57. *
  58. * Any exporter-private housekeeping data can be stored in the
  59. * &dma_buf_attachment.priv pointer.
  60. *
  61. * This callback is optional.
  62. *
  63. * Returns:
  64. *
  65. * 0 on success, negative error code on failure. It might return -EBUSY
  66. * to signal that backing storage is already allocated and incompatible
  67. * with the requirements of requesting device.
  68. */
  69. int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
  70. /**
  71. * @detach:
  72. *
  73. * This is called by dma_buf_detach() to release a &dma_buf_attachment.
  74. * Provided so that exporters can clean up any housekeeping for an
  75. * &dma_buf_attachment.
  76. *
  77. * This callback is optional.
  78. */
  79. void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
  80. /**
  81. * @pin:
  82. *
  83. * This is called by dma_buf_pin() and lets the exporter know that the
  84. * DMA-buf can't be moved any more. Ideally, the exporter should
  85. * pin the buffer so that it is generally accessible by all
  86. * devices.
  87. *
  88. * This is called with the &dmabuf.resv object locked and is mutual
  89. * exclusive with @cache_sgt_mapping.
  90. *
  91. * This is called automatically for non-dynamic importers from
  92. * dma_buf_attach().
  93. *
  94. * Note that similar to non-dynamic exporters in their @map_dma_buf
  95. * callback the driver must guarantee that the memory is available for
  96. * use and cleared of any old data by the time this function returns.
  97. * Drivers which pipeline their buffer moves internally must wait for
  98. * all moves and clears to complete.
  99. *
  100. * Returns:
  101. *
  102. * 0 on success, negative error code on failure.
  103. */
  104. int (*pin)(struct dma_buf_attachment *attach);
  105. /**
  106. * @unpin:
  107. *
  108. * This is called by dma_buf_unpin() and lets the exporter know that the
  109. * DMA-buf can be moved again.
  110. *
  111. * This is called with the dmabuf->resv object locked and is mutual
  112. * exclusive with @cache_sgt_mapping.
  113. *
  114. * This callback is optional.
  115. */
  116. void (*unpin)(struct dma_buf_attachment *attach);
  117. /**
  118. * @map_dma_buf:
  119. *
  120. * This is called by dma_buf_map_attachment() and is used to map a
  121. * shared &dma_buf into device address space, and it is mandatory. It
  122. * can only be called if @attach has been called successfully.
  123. *
  124. * This call may sleep, e.g. when the backing storage first needs to be
  125. * allocated, or moved to a location suitable for all currently attached
  126. * devices.
  127. *
  128. * Note that any specific buffer attributes required for this function
  129. * should get added to device_dma_parameters accessible via
  130. * &device.dma_params from the &dma_buf_attachment. The @attach callback
  131. * should also check these constraints.
  132. *
  133. * If this is being called for the first time, the exporter can now
  134. * choose to scan through the list of attachments for this buffer,
  135. * collate the requirements of the attached devices, and choose an
  136. * appropriate backing storage for the buffer.
  137. *
  138. * Based on enum dma_data_direction, it might be possible to have
  139. * multiple users accessing at the same time (for reading, maybe), or
  140. * any other kind of sharing that the exporter might wish to make
  141. * available to buffer-users.
  142. *
  143. * This is always called with the dmabuf->resv object locked when
  144. * the dynamic_mapping flag is true.
  145. *
  146. * Note that for non-dynamic exporters the driver must guarantee that
  147. * that the memory is available for use and cleared of any old data by
  148. * the time this function returns. Drivers which pipeline their buffer
  149. * moves internally must wait for all moves and clears to complete.
  150. * Dynamic exporters do not need to follow this rule: For non-dynamic
  151. * importers the buffer is already pinned through @pin, which has the
  152. * same requirements. Dynamic importers otoh are required to obey the
  153. * dma_resv fences.
  154. *
  155. * Returns:
  156. *
  157. * A &sg_table scatter list of the backing storage of the DMA buffer,
  158. * already mapped into the device address space of the &device attached
  159. * with the provided &dma_buf_attachment. The addresses and lengths in
  160. * the scatter list are PAGE_SIZE aligned.
  161. *
  162. * On failure, returns a negative error value wrapped into a pointer.
  163. * May also return -EINTR when a signal was received while being
  164. * blocked.
  165. *
  166. * Note that exporters should not try to cache the scatter list, or
  167. * return the same one for multiple calls. Caching is done either by the
  168. * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
  169. * of the scatter list is transferred to the caller, and returned by
  170. * @unmap_dma_buf.
  171. */
  172. struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
  173. enum dma_data_direction);
  174. /**
  175. * @unmap_dma_buf:
  176. *
  177. * This is called by dma_buf_unmap_attachment() and should unmap and
  178. * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
  179. * For static dma_buf handling this might also unpin the backing
  180. * storage if this is the last mapping of the DMA buffer.
  181. */
  182. void (*unmap_dma_buf)(struct dma_buf_attachment *,
  183. struct sg_table *,
  184. enum dma_data_direction);
  185. /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
  186. * if the call would block.
  187. */
  188. /**
  189. * @release:
  190. *
  191. * Called after the last dma_buf_put to release the &dma_buf, and
  192. * mandatory.
  193. */
  194. void (*release)(struct dma_buf *);
  195. /**
  196. * @begin_cpu_access:
  197. *
  198. * This is called from dma_buf_begin_cpu_access() and allows the
  199. * exporter to ensure that the memory is actually coherent for cpu
  200. * access. The exporter also needs to ensure that cpu access is coherent
  201. * for the access direction. The direction can be used by the exporter
  202. * to optimize the cache flushing, i.e. access with a different
  203. * direction (read instead of write) might return stale or even bogus
  204. * data (e.g. when the exporter needs to copy the data to temporary
  205. * storage).
  206. *
  207. * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
  208. * command for userspace mappings established through @mmap, and also
  209. * for kernel mappings established with @vmap.
  210. *
  211. * This callback is optional.
  212. *
  213. * Returns:
  214. *
  215. * 0 on success or a negative error code on failure. This can for
  216. * example fail when the backing storage can't be allocated. Can also
  217. * return -ERESTARTSYS or -EINTR when the call has been interrupted and
  218. * needs to be restarted.
  219. */
  220. int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
  221. /**
  222. * @begin_cpu_access_partial:
  223. *
  224. * This is called from dma_buf_begin_cpu_access_partial() and allows the
  225. * exporter to ensure that the memory specified in the range is
  226. * available for cpu access - the exporter might need to allocate or
  227. * swap-in and pin the backing storage.
  228. * The exporter also needs to ensure that cpu access is
  229. * coherent for the access direction. The direction can be used by the
  230. * exporter to optimize the cache flushing, i.e. access with a different
  231. * direction (read instead of write) might return stale or even bogus
  232. * data (e.g. when the exporter needs to copy the data to temporary
  233. * storage).
  234. *
  235. * This callback is optional.
  236. *
  237. * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
  238. * from userspace (where storage shouldn't be pinned to avoid handing
  239. * de-factor mlock rights to userspace) and for the kernel-internal
  240. * users of the various kmap interfaces, where the backing storage must
  241. * be pinned to guarantee that the atomic kmap calls can succeed. Since
  242. * there's no in-kernel users of the kmap interfaces yet this isn't a
  243. * real problem.
  244. *
  245. * Returns:
  246. *
  247. * 0 on success or a negative error code on failure. This can for
  248. * example fail when the backing storage can't be allocated. Can also
  249. * return -ERESTARTSYS or -EINTR when the call has been interrupted and
  250. * needs to be restarted.
  251. */
  252. int (*begin_cpu_access_partial)(struct dma_buf *dmabuf,
  253. enum dma_data_direction,
  254. unsigned int offset, unsigned int len);
  255. /**
  256. * @end_cpu_access:
  257. *
  258. * This is called from dma_buf_end_cpu_access() when the importer is
  259. * done accessing the CPU. The exporter can use this to flush caches and
  260. * undo anything else done in @begin_cpu_access.
  261. *
  262. * This callback is optional.
  263. *
  264. * Returns:
  265. *
  266. * 0 on success or a negative error code on failure. Can return
  267. * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
  268. * to be restarted.
  269. */
  270. int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
  271. /**
  272. * @end_cpu_access_partial:
  273. *
  274. * This is called from dma_buf_end_cpu_access_partial() when the
  275. * importer is done accessing the CPU. The exporter can use to limit
  276. * cache flushing to only the range specefied and to unpin any
  277. * resources pinned in @begin_cpu_access_umapped.
  278. * The result of any dma_buf kmap calls after end_cpu_access_partial is
  279. * undefined.
  280. *
  281. * This callback is optional.
  282. *
  283. * Returns:
  284. *
  285. * 0 on success or a negative error code on failure. Can return
  286. * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
  287. * to be restarted.
  288. */
  289. int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
  290. enum dma_data_direction,
  291. unsigned int offset, unsigned int len);
  292. /**
  293. * @mmap:
  294. *
  295. * This callback is used by the dma_buf_mmap() function
  296. *
  297. * Note that the mapping needs to be incoherent, userspace is expected
  298. * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
  299. *
  300. * Because dma-buf buffers have invariant size over their lifetime, the
  301. * dma-buf core checks whether a vma is too large and rejects such
  302. * mappings. The exporter hence does not need to duplicate this check.
  303. * Drivers do not need to check this themselves.
  304. *
  305. * If an exporter needs to manually flush caches and hence needs to fake
  306. * coherency for mmap support, it needs to be able to zap all the ptes
  307. * pointing at the backing storage. Now linux mm needs a struct
  308. * address_space associated with the struct file stored in vma->vm_file
  309. * to do that with the function unmap_mapping_range. But the dma_buf
  310. * framework only backs every dma_buf fd with the anon_file struct file,
  311. * i.e. all dma_bufs share the same file.
  312. *
  313. * Hence exporters need to setup their own file (and address_space)
  314. * association by setting vma->vm_file and adjusting vma->vm_pgoff in
  315. * the dma_buf mmap callback. In the specific case of a gem driver the
  316. * exporter could use the shmem file already provided by gem (and set
  317. * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
  318. * corresponding range of the struct address_space associated with their
  319. * own file.
  320. *
  321. * This callback is optional.
  322. *
  323. * Returns:
  324. *
  325. * 0 on success or a negative error code on failure.
  326. */
  327. int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
  328. int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
  329. void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
  330. /**
  331. * @get_flags:
  332. *
  333. * This is called by dma_buf_get_flags and is used to get the buffer's
  334. * flags.
  335. * This callback is optional.
  336. *
  337. * Returns:
  338. *
  339. * 0 on success or a negative error code on failure. On success flags
  340. * will be populated with the buffer's flags.
  341. */
  342. int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
  343. ANDROID_KABI_RESERVE(1);
  344. ANDROID_KABI_RESERVE(2);
  345. };
  346. /**
  347. * struct dma_buf - shared buffer object
  348. *
  349. * This represents a shared buffer, created by calling dma_buf_export(). The
  350. * userspace representation is a normal file descriptor, which can be created by
  351. * calling dma_buf_fd().
  352. *
  353. * Shared dma buffers are reference counted using dma_buf_put() and
  354. * get_dma_buf().
  355. *
  356. * Device DMA access is handled by the separate &struct dma_buf_attachment.
  357. */
  358. struct dma_buf {
  359. /**
  360. * @size:
  361. *
  362. * Size of the buffer; invariant over the lifetime of the buffer.
  363. */
  364. size_t size;
  365. /**
  366. * @file:
  367. *
  368. * File pointer used for sharing buffers across, and for refcounting.
  369. * See dma_buf_get() and dma_buf_put().
  370. */
  371. struct file *file;
  372. /**
  373. * @attachments:
  374. *
  375. * List of dma_buf_attachment that denotes all devices attached,
  376. * protected by &dma_resv lock @resv.
  377. */
  378. struct list_head attachments;
  379. /** @ops: dma_buf_ops associated with this buffer object. */
  380. const struct dma_buf_ops *ops;
  381. /**
  382. * @lock:
  383. *
  384. * Used internally to serialize list manipulation, attach/detach and
  385. * vmap/unmap. Note that in many cases this is superseeded by
  386. * dma_resv_lock() on @resv.
  387. */
  388. struct mutex lock;
  389. /**
  390. * @vmapping_counter:
  391. *
  392. * Used internally to refcnt the vmaps returned by dma_buf_vmap().
  393. * Protected by @lock.
  394. */
  395. unsigned vmapping_counter;
  396. /**
  397. * @vmap_ptr:
  398. * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
  399. */
  400. struct iosys_map vmap_ptr;
  401. /**
  402. * @exp_name:
  403. *
  404. * Name of the exporter; useful for debugging. See the
  405. * DMA_BUF_SET_NAME IOCTL.
  406. */
  407. const char *exp_name;
  408. /**
  409. * @name:
  410. *
  411. * Userspace-provided name; useful for accounting and debugging,
  412. * protected by dma_resv_lock() on @resv and @name_lock for read access.
  413. */
  414. const char *name;
  415. /** @name_lock: Spinlock to protect name acces for read access. */
  416. spinlock_t name_lock;
  417. /**
  418. * @owner:
  419. *
  420. * Pointer to exporter module; used for refcounting when exporter is a
  421. * kernel module.
  422. */
  423. struct module *owner;
  424. /** @list_node: node for dma_buf accounting and debugging. */
  425. struct list_head list_node;
  426. /** @priv: exporter specific private data for this buffer object. */
  427. void *priv;
  428. /**
  429. * @resv:
  430. *
  431. * Reservation object linked to this dma-buf.
  432. *
  433. * IMPLICIT SYNCHRONIZATION RULES:
  434. *
  435. * Drivers which support implicit synchronization of buffer access as
  436. * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
  437. * below rules.
  438. *
  439. * - Drivers must add a read fence through dma_resv_add_fence() with the
  440. * DMA_RESV_USAGE_READ flag for anything the userspace API considers a
  441. * read access. This highly depends upon the API and window system.
  442. *
  443. * - Similarly drivers must add a write fence through
  444. * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
  445. * anything the userspace API considers write access.
  446. *
  447. * - Drivers may just always add a write fence, since that only
  448. * causes unecessarily synchronization, but no correctness issues.
  449. *
  450. * - Some drivers only expose a synchronous userspace API with no
  451. * pipelining across drivers. These do not set any fences for their
  452. * access. An example here is v4l.
  453. *
  454. * - Driver should use dma_resv_usage_rw() when retrieving fences as
  455. * dependency for implicit synchronization.
  456. *
  457. * DYNAMIC IMPORTER RULES:
  458. *
  459. * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
  460. * additional constraints on how they set up fences:
  461. *
  462. * - Dynamic importers must obey the write fences and wait for them to
  463. * signal before allowing access to the buffer's underlying storage
  464. * through the device.
  465. *
  466. * - Dynamic importers should set fences for any access that they can't
  467. * disable immediately from their &dma_buf_attach_ops.move_notify
  468. * callback.
  469. *
  470. * IMPORTANT:
  471. *
  472. * All drivers and memory management related functions must obey the
  473. * struct dma_resv rules, specifically the rules for updating and
  474. * obeying fences. See enum dma_resv_usage for further descriptions.
  475. */
  476. struct dma_resv *resv;
  477. /** @poll: for userspace poll support */
  478. wait_queue_head_t poll;
  479. /** @cb_in: for userspace poll support */
  480. /** @cb_out: for userspace poll support */
  481. struct dma_buf_poll_cb_t {
  482. struct dma_fence_cb cb;
  483. wait_queue_head_t *poll;
  484. __poll_t active;
  485. } cb_in, cb_out;
  486. #ifdef CONFIG_DMABUF_SYSFS_STATS
  487. /**
  488. * @sysfs_entry:
  489. *
  490. * For exposing information about this buffer in sysfs. See also
  491. * `DMA-BUF statistics`_ for the uapi this enables.
  492. */
  493. struct dma_buf_sysfs_entry {
  494. struct kobject kobj;
  495. struct dma_buf *dmabuf;
  496. } *sysfs_entry;
  497. #endif
  498. ANDROID_KABI_RESERVE(1);
  499. ANDROID_KABI_RESERVE(2);
  500. };
  501. /**
  502. * struct dma_buf_attach_ops - importer operations for an attachment
  503. *
  504. * Attachment operations implemented by the importer.
  505. */
  506. struct dma_buf_attach_ops {
  507. /**
  508. * @allow_peer2peer:
  509. *
  510. * If this is set to true the importer must be able to handle peer
  511. * resources without struct pages.
  512. */
  513. bool allow_peer2peer;
  514. /**
  515. * @move_notify: [optional] notification that the DMA-buf is moving
  516. *
  517. * If this callback is provided the framework can avoid pinning the
  518. * backing store while mappings exists.
  519. *
  520. * This callback is called with the lock of the reservation object
  521. * associated with the dma_buf held and the mapping function must be
  522. * called with this lock held as well. This makes sure that no mapping
  523. * is created concurrently with an ongoing move operation.
  524. *
  525. * Mappings stay valid and are not directly affected by this callback.
  526. * But the DMA-buf can now be in a different physical location, so all
  527. * mappings should be destroyed and re-created as soon as possible.
  528. *
  529. * New mappings can be created after this callback returns, and will
  530. * point to the new location of the DMA-buf.
  531. */
  532. void (*move_notify)(struct dma_buf_attachment *attach);
  533. ANDROID_KABI_RESERVE(1);
  534. };
  535. /**
  536. * struct dma_buf_attachment - holds device-buffer attachment data
  537. * @dmabuf: buffer for this attachment.
  538. * @dev: device attached to the buffer.
  539. * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
  540. * @sgt: cached mapping.
  541. * @dir: direction of cached mapping.
  542. * @peer2peer: true if the importer can handle peer resources without pages.
  543. * @priv: exporter specific attachment data.
  544. * @importer_ops: importer operations for this attachment, if provided
  545. * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
  546. * @importer_priv: importer specific attachment data.
  547. * @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
  548. * through dma_buf_map_attachment.
  549. *
  550. * This structure holds the attachment information between the dma_buf buffer
  551. * and its user device(s). The list contains one attachment struct per device
  552. * attached to the buffer.
  553. *
  554. * An attachment is created by calling dma_buf_attach(), and released again by
  555. * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
  556. * transfer is created by dma_buf_map_attachment() and freed again by calling
  557. * dma_buf_unmap_attachment().
  558. */
  559. struct dma_buf_attachment {
  560. struct dma_buf *dmabuf;
  561. struct device *dev;
  562. struct list_head node;
  563. struct sg_table *sgt;
  564. enum dma_data_direction dir;
  565. bool peer2peer;
  566. const struct dma_buf_attach_ops *importer_ops;
  567. void *importer_priv;
  568. void *priv;
  569. unsigned long dma_map_attrs;
  570. ANDROID_KABI_RESERVE(1);
  571. ANDROID_KABI_RESERVE(2);
  572. };
  573. /**
  574. * struct dma_buf_export_info - holds information needed to export a dma_buf
  575. * @exp_name: name of the exporter - useful for debugging.
  576. * @owner: pointer to exporter module - used for refcounting kernel module
  577. * @ops: Attach allocator-defined dma buf ops to the new buffer
  578. * @size: Size of the buffer - invariant over the lifetime of the buffer
  579. * @flags: mode flags for the file
  580. * @resv: reservation-object, NULL to allocate default one
  581. * @priv: Attach private data of allocator to this buffer
  582. *
  583. * This structure holds the information required to export the buffer. Used
  584. * with dma_buf_export() only.
  585. */
  586. struct dma_buf_export_info {
  587. const char *exp_name;
  588. struct module *owner;
  589. const struct dma_buf_ops *ops;
  590. size_t size;
  591. int flags;
  592. struct dma_resv *resv;
  593. void *priv;
  594. ANDROID_KABI_RESERVE(1);
  595. ANDROID_KABI_RESERVE(2);
  596. };
  597. /**
  598. * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
  599. * @name: export-info name
  600. *
  601. * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
  602. * zeroes it out and pre-populates exp_name in it.
  603. */
  604. #define DEFINE_DMA_BUF_EXPORT_INFO(name) \
  605. struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
  606. .owner = THIS_MODULE }
  607. /**
  608. * get_dma_buf - convenience wrapper for get_file.
  609. * @dmabuf: [in] pointer to dma_buf
  610. *
  611. * Increments the reference count on the dma-buf, needed in case of drivers
  612. * that either need to create additional references to the dmabuf on the
  613. * kernel side. For example, an exporter that needs to keep a dmabuf ptr
  614. * so that subsequent exports don't create a new dmabuf.
  615. */
  616. static inline void get_dma_buf(struct dma_buf *dmabuf)
  617. {
  618. get_file(dmabuf->file);
  619. }
  620. /**
  621. * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
  622. * @dmabuf: the DMA-buf to check
  623. *
  624. * Returns true if a DMA-buf exporter wants to be called with the dma_resv
  625. * locked for the map/unmap callbacks, false if it doesn't wants to be called
  626. * with the lock held.
  627. */
  628. static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
  629. {
  630. return !!dmabuf->ops->pin;
  631. }
  632. /**
  633. * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
  634. * mappings
  635. * @attach: the DMA-buf attachment to check
  636. *
  637. * Returns true if a DMA-buf importer wants to call the map/unmap functions with
  638. * the dma_resv lock held.
  639. */
  640. static inline bool
  641. dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
  642. {
  643. return !!attach->importer_ops;
  644. }
  645. int is_dma_buf_file(struct file *file);
  646. int dma_buf_get_each(int (*callback)(const struct dma_buf *dmabuf,
  647. void *private), void *private);
  648. struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
  649. struct device *dev);
  650. struct dma_buf_attachment *
  651. dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
  652. const struct dma_buf_attach_ops *importer_ops,
  653. void *importer_priv);
  654. void dma_buf_detach(struct dma_buf *dmabuf,
  655. struct dma_buf_attachment *attach);
  656. int dma_buf_pin(struct dma_buf_attachment *attach);
  657. void dma_buf_unpin(struct dma_buf_attachment *attach);
  658. struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
  659. int dma_buf_fd(struct dma_buf *dmabuf, int flags);
  660. struct dma_buf *dma_buf_get(int fd);
  661. void dma_buf_put(struct dma_buf *dmabuf);
  662. struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
  663. enum dma_data_direction);
  664. void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
  665. enum dma_data_direction);
  666. void dma_buf_move_notify(struct dma_buf *dma_buf);
  667. int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
  668. enum dma_data_direction dir);
  669. int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
  670. enum dma_data_direction dir,
  671. unsigned int offset, unsigned int len);
  672. int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
  673. enum dma_data_direction dir);
  674. int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
  675. enum dma_data_direction dir,
  676. unsigned int offset, unsigned int len);
  677. struct sg_table *
  678. dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
  679. enum dma_data_direction direction);
  680. void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
  681. struct sg_table *sg_table,
  682. enum dma_data_direction direction);
  683. int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
  684. unsigned long);
  685. int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
  686. void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
  687. long dma_buf_set_name(struct dma_buf *dmabuf, const char *name);
  688. int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
  689. #endif /* __DMA_BUF_H__ */