dma-buf: start caching of sg_table objects v2

To allow a smooth transition from pinning buffer objects to dynamic
invalidation we first start to cache the sg_table for an attachment.

v2: keep closer to the DRM implementation

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.kernel.org/patch/10943053/
This commit is contained in:
Christian König
2018-07-03 16:42:26 +02:00
parent 0c7b178ad7
commit f13e143e74
2 changed files with 38 additions and 2 deletions

View File

@@ -44,6 +44,15 @@ struct dma_buf_attachment;
* @vunmap: [optional] unmaps a vmap from the buffer
*/
struct dma_buf_ops {
/**
* @cache_sgt_mapping:
*
* If true the framework will cache the first mapping made for each
* attachment. This avoids creating mappings for attachments multiple
* times.
*/
bool cache_sgt_mapping;
/**
* @attach:
*
@@ -323,6 +332,8 @@ struct dma_buf {
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment.
* @sgt: cached mapping.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
@@ -338,6 +349,8 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
struct sg_table *sgt;
enum dma_data_direction dir;
void *priv;
};