msm_dma_iommu_mapping.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2015-2016, 2018, 2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef _LINUX_MSM_DMA_IOMMU_MAPPING_H
  7. #define _LINUX_MSM_DMA_IOMMU_MAPPING_H
  8. #include <linux/device.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/scatterlist.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/qcom-dma-mapping.h>
  13. #if IS_ENABLED(CONFIG_QCOM_LAZY_MAPPING)
  14. /*
  15. * This function is not taking a reference to the dma_buf here. It is expected
  16. * that clients hold reference to the dma_buf until they are done with mapping
  17. * and unmapping.
  18. */
  19. int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
  20. enum dma_data_direction dir, struct dma_buf *dma_buf,
  21. unsigned long attrs);
  22. /*
  23. * This function takes an extra reference to the dma_buf.
  24. * What this means is that calling msm_dma_unmap_sg will not result in buffer's
  25. * iommu mapping being removed, which means that subsequent calls to lazy map
  26. * will simply re-use the existing iommu mapping.
  27. * The iommu unmapping of the buffer will occur when the ION buffer is
  28. * destroyed.
  29. * Using lazy mapping can provide a performance benefit because subsequent
  30. * mappings are faster.
  31. *
  32. * The limitation of using this API are that all subsequent iommu mappings
  33. * must be the same as the original mapping, ie they must map the same part of
  34. * the buffer with the same dma data direction. Also there can't be multiple
  35. * mappings of different parts of the buffer.
  36. */
  37. static inline int msm_dma_map_sg_lazy(struct device *dev,
  38. struct scatterlist *sg, int nents,
  39. enum dma_data_direction dir,
  40. struct dma_buf *dma_buf)
  41. {
  42. return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, 0);
  43. }
  44. static inline int msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
  45. int nents, enum dma_data_direction dir,
  46. struct dma_buf *dma_buf)
  47. {
  48. unsigned long attrs;
  49. attrs = DMA_ATTR_NO_DELAYED_UNMAP;
  50. return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, attrs);
  51. }
  52. void msm_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
  53. int nents, enum dma_data_direction dir,
  54. struct dma_buf *dma_buf, unsigned long attrs);
  55. static inline int msm_dma_map_sgtable(struct device *dev, struct sg_table *sgt,
  56. enum dma_data_direction dir,
  57. struct dma_buf *dma_buf, unsigned long attrs)
  58. {
  59. int nents;
  60. nents = msm_dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, dma_buf, attrs);
  61. if (nents < 0)
  62. return nents;
  63. else if (unlikely(nents == 0))
  64. return -EINVAL;
  65. sgt->nents = nents;
  66. return 0;
  67. }
  68. static inline void msm_dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
  69. enum dma_data_direction dir,
  70. struct dma_buf *dma_buf, unsigned long attrs)
  71. {
  72. msm_dma_unmap_sg_attrs(dev, sgt->sgl, sgt->nents, dir, dma_buf, attrs);
  73. }
  74. int msm_dma_unmap_all_for_dev(struct device *dev);
  75. /*
  76. * Below is private function only to be called by framework (ION) and not by
  77. * clients.
  78. */
  79. void msm_dma_buf_freed(void *buffer);
  80. #else /*CONFIG_QCOM_LAZY_MAPPING*/
  81. static inline int msm_dma_map_sg_attrs(struct device *dev,
  82. struct scatterlist *sg, int nents,
  83. enum dma_data_direction dir, struct dma_buf *dma_buf,
  84. unsigned long attrs)
  85. {
  86. return -EINVAL;
  87. }
  88. static inline void
  89. msm_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
  90. int nents, enum dma_data_direction dir,
  91. struct dma_buf *dma_buf, unsigned long attrs)
  92. {
  93. }
  94. static inline int msm_dma_map_sg_lazy(struct device *dev,
  95. struct scatterlist *sg, int nents,
  96. enum dma_data_direction dir,
  97. struct dma_buf *dma_buf)
  98. {
  99. return -EINVAL;
  100. }
  101. static inline int msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
  102. int nents, enum dma_data_direction dir,
  103. struct dma_buf *dma_buf)
  104. {
  105. return -EINVAL;
  106. }
  107. static inline int msm_dma_map_sgtable(struct device *dev, struct sg_table *sgt,
  108. enum dma_data_direction dir,
  109. struct dma_buf *dma_buf, unsigned long attrs)
  110. {
  111. return -EINVAL;
  112. }
  113. static inline void msm_dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
  114. enum dma_data_direction dir,
  115. struct dma_buf *dma_buf, unsigned long attrs)
  116. {
  117. }
  118. static inline int msm_dma_unmap_all_for_dev(struct device *dev)
  119. {
  120. return 0;
  121. }
  122. static inline void msm_dma_buf_freed(void *buffer) {}
  123. #endif /*CONFIG_QCOM_LAZY_MAPPING*/
  124. #endif