BACKPORT: FROMLIST: dma-heap: Let dma heap use dma_map_attrs to map & unmap iova

For dma-heap users, they can't bypass cache sync when map/unmap iova
with dma heap. But they can do it by adding DMA_ATTR_SKIP_CPU_SYNC
into dma_alloc_attrs.

To keep alignment, at dma_heap side, also use
dma_buf_attachment.dma_map_attrs to do iova map & unmap.

This patch is a little different with linux patch, because ACK
has cached heap, and linux doesn't have it

Bug: 189986159
Link: https://lore.kernel.org/patchwork/patch/1455032/
Change-Id: I324712644688c29e55c9197efcde9283bbbd813b
Signed-off-by: Guangming Cao <Guangming.Cao@mediatek.com>
This commit is contained in:
Guangming Cao
2021-06-03 10:21:51 +08:00
parent 749d6e7f2c
commit 956db89e71
2 changed files with 8 additions and 6 deletions

View File

@@ -99,9 +99,10 @@ static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachme
{ {
struct dma_heap_attachment *a = attachment->priv; struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = &a->table; struct sg_table *table = &a->table;
int attrs = attachment->dma_map_attrs;
int ret; int ret;
ret = dma_map_sgtable(attachment->dev, table, direction, 0); ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
if (ret) if (ret)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
a->mapped = true; a->mapped = true;
@@ -113,9 +114,10 @@ static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
struct dma_heap_attachment *a = attachment->priv; struct dma_heap_attachment *a = attachment->priv;
int attrs = attachment->dma_map_attrs;
a->mapped = false; a->mapped = false;
dma_unmap_sgtable(attachment->dev, table, direction, 0); dma_unmap_sgtable(attachment->dev, table, direction, attrs);
} }
static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,

View File

@@ -140,11 +140,11 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac
{ {
struct dma_heap_attachment *a = attachment->priv; struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = a->table; struct sg_table *table = a->table;
int attr = 0; int attr = attachment->dma_map_attrs;
int ret; int ret;
if (a->uncached) if (a->uncached)
attr = DMA_ATTR_SKIP_CPU_SYNC; attr |= DMA_ATTR_SKIP_CPU_SYNC;
ret = dma_map_sgtable(attachment->dev, table, direction, attr); ret = dma_map_sgtable(attachment->dev, table, direction, attr);
if (ret) if (ret)
@@ -159,10 +159,10 @@ static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
struct dma_heap_attachment *a = attachment->priv; struct dma_heap_attachment *a = attachment->priv;
int attr = 0; int attr = attachment->dma_map_attrs;
if (a->uncached) if (a->uncached)
attr = DMA_ATTR_SKIP_CPU_SYNC; attr |= DMA_ATTR_SKIP_CPU_SYNC;
a->mapped = false; a->mapped = false;
dma_unmap_sgtable(attachment->dev, table, direction, attr); dma_unmap_sgtable(attachment->dev, table, direction, attr);
} }