Merge tag 'dma-mapping-5.8-2' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping helpers from Christoph Hellwig: "These were in a separate stable branch so that various media and drm trees could pull the in for bug fixes, but looking at linux-next that hasn't actually happened yet. Still sending the APIs to you in the hope that these bug fixes get picked up for 5.8 in one way or another. Summary: - add DMA mapping helpers for struct sg_table (Marek Szyprowski)" * tag 'dma-mapping-5.8-2' of git://git.infradead.org/users/hch/dma-mapping: iommu: add generic helper for mapping sgtable objects scatterlist: add generic wrappers for iterating over sgtable objects dma-mapping: add generic helpers for mapping sgtable objects
This commit is contained in:
@@ -609,6 +609,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_map_sgtable - Map the given buffer for DMA
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
* @sgt: The sg_table object describing the buffer
|
||||
* @dir: DMA direction
|
||||
* @attrs: Optional DMA attributes for the map operation
|
||||
*
|
||||
* Maps a buffer described by a scatterlist stored in the given sg_table
|
||||
* object for the @dir DMA operation by the @dev device. After success the
|
||||
* ownership for the buffer is transferred to the DMA domain. One has to
|
||||
* call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
|
||||
* ownership of the buffer back to the CPU domain before touching the
|
||||
* buffer by the CPU.
|
||||
*
|
||||
* Returns 0 on success or -EINVAL on error during mapping the buffer.
|
||||
*/
|
||||
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
int nents;
|
||||
|
||||
nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
|
||||
if (nents <= 0)
|
||||
return -EINVAL;
|
||||
sgt->nents = nents;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_unmap_sgtable - Unmap the given buffer for DMA
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
* @sgt: The sg_table object describing the buffer
|
||||
* @dir: DMA direction
|
||||
* @attrs: Optional DMA attributes for the unmap operation
|
||||
*
|
||||
* Unmaps a buffer described by a scatterlist stored in the given sg_table
|
||||
* object for the @dir DMA operation by the @dev device. After this function
|
||||
* the ownership of the buffer is transferred back to the CPU domain.
|
||||
*/
|
||||
static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
* @sgt: The sg_table object describing the buffer
|
||||
* @dir: DMA direction
|
||||
*
|
||||
* Performs the needed cache synchronization and moves the ownership of the
|
||||
* buffer back to the CPU domain, so it is safe to perform any access to it
|
||||
* by the CPU. Before doing any further DMA operations, one has to transfer
|
||||
* the ownership of the buffer back to the DMA domain by calling the
|
||||
* dma_sync_sgtable_for_device().
|
||||
*/
|
||||
static inline void dma_sync_sgtable_for_cpu(struct device *dev,
|
||||
struct sg_table *sgt, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
* @sgt: The sg_table object describing the buffer
|
||||
* @dir: DMA direction
|
||||
*
|
||||
* Performs the needed cache synchronization and moves the ownership of the
|
||||
* buffer back to the DMA domain, so it is safe to perform the DMA operation.
|
||||
* Once finished, one has to call dma_sync_sgtable_for_cpu() or
|
||||
* dma_unmap_sgtable().
|
||||
*/
|
||||
static inline void dma_sync_sgtable_for_device(struct device *dev,
|
||||
struct sg_table *sgt, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
|
||||
}
|
||||
|
||||
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
|
||||
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
|
||||
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
|
||||
|
Reference in New Issue
Block a user