Browse Source

disp: msm: remove use of DMA attributes LLC_NWA and Upstream Hint

Remove DMA_ATTR_IOMMU_USE_LLC_NWA and DMA_ATTR_IOMMU_USE_UPSTREAM_HINT
attributes as they are no longer needed since io-coherency is enabled.
Passing this attribute is a no op since buffer is io-coherent and will
be mapped with a write allocate policy contradicting intention
of that attribute.

Change-Id: I882f148d770c795eb005c5391171a6280c083d37
Signed-off-by: Samantha Tran <[email protected]>
Samantha Tran 3 years ago
parent
commit
2c2224bdf3
3 changed files with 0 additions and 14 deletions
  1. 0 9
      msm/msm_gem.c
  2. 0 2
      msm/msm_gem_prime.c
  3. 0 3
      msm/msm_smmu.c

+ 0 - 9
msm/msm_gem.c

@@ -1238,15 +1238,6 @@ int msm_gem_delayed_import(struct drm_gem_object *obj)
 	if (msm_obj->flags & MSM_BO_SKIPSYNC)
 		attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
 
-	/*
-	 * All SMMU mapping are generated with cache hint.
-	 * SSPP cache hint will control the LLCC access.
-	 */
-	if (msm_obj->flags & MSM_BO_KEEPATTRS)
-		attach->dma_map_attrs |=
-				(DMA_ATTR_IOMMU_USE_UPSTREAM_HINT |
-				DMA_ATTR_IOMMU_USE_LLC_NWA);
-
 	/*
 	 * dma_buf_map_attachment will call dma_map_sg for ion buffer
 	 * mapping, and iova will get mapped when the function returns.

+ 0 - 2
msm/msm_gem_prime.c

@@ -172,8 +172,6 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
 		return ERR_CAST(attach);
 	}
 
-	attach->dma_map_attrs |= DMA_ATTR_IOMMU_USE_LLC_NWA;
-
 	/*
 	 * For cached buffers where CPU access is required, dma_map_attachment
 	 * must be called now to allow user-space to perform cpu sync begin/end

+ 0 - 3
msm/msm_smmu.c

@@ -242,9 +242,6 @@ static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
 		return -ENOMEM;
 	}
 
-	if (flags & MSM_BO_KEEPATTRS)
-		attrs |= DMA_ATTR_IOMMU_USE_LLC_NWA;
-
 	/*
 	 * For import buffer type, dma_map_sg_attrs is called during
 	 * dma_buf_map_attachment and is not required to call again