Merge "disp: msm: remove use of DMA attributes LLC_NWA and Upstream Hint"

This commit is contained in:
qctecmdr
2021-08-19 18:45:48 -07:00
committed by Gerrit - the friendly Code Review server
3 changed files with 0 additions and 14 deletions

View File

@@ -1238,15 +1238,6 @@ int msm_gem_delayed_import(struct drm_gem_object *obj)
if (msm_obj->flags & MSM_BO_SKIPSYNC) if (msm_obj->flags & MSM_BO_SKIPSYNC)
attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
/*
* All SMMU mapping are generated with cache hint.
* SSPP cache hint will control the LLCC access.
*/
if (msm_obj->flags & MSM_BO_KEEPATTRS)
attach->dma_map_attrs |=
(DMA_ATTR_IOMMU_USE_UPSTREAM_HINT |
DMA_ATTR_IOMMU_USE_LLC_NWA);
/* /*
* dma_buf_map_attachment will call dma_map_sg for ion buffer * dma_buf_map_attachment will call dma_map_sg for ion buffer
* mapping, and iova will get mapped when the function returns. * mapping, and iova will get mapped when the function returns.

View File

@@ -163,8 +163,6 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
return ERR_CAST(attach); return ERR_CAST(attach);
} }
attach->dma_map_attrs |= DMA_ATTR_IOMMU_USE_LLC_NWA;
/* /*
* For cached buffers where CPU access is required, dma_map_attachment * For cached buffers where CPU access is required, dma_map_attachment
* must be called now to allow user-space to perform cpu sync begin/end * must be called now to allow user-space to perform cpu sync begin/end

View File

@@ -242,9 +242,6 @@ static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
return -ENOMEM; return -ENOMEM;
} }
if (flags & MSM_BO_KEEPATTRS)
attrs |= DMA_ATTR_IOMMU_USE_LLC_NWA;
/* /*
* For import buffer type, dma_map_sg_attrs is called during * For import buffer type, dma_map_sg_attrs is called during
* dma_buf_map_attachment and is not required to call again * dma_buf_map_attachment and is not required to call again