RDMA/umem: Add rdma_umem_for_each_dma_block()
This helper does the same as rdma_for_each_block(), except it works on a umem. This simplifies most of the call sites. Link: https://lore.kernel.org/r/4-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com> Acked-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
@@ -40,6 +40,26 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
|
||||
PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
|
||||
struct ib_umem *umem,
|
||||
unsigned long pgsz)
|
||||
{
|
||||
__rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
|
||||
}
|
||||
|
||||
/**
|
||||
* rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
|
||||
* @umem: umem to iterate over
|
||||
* @pgsz: Page size to split the list into
|
||||
*
|
||||
* pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
|
||||
* returned DMA blocks will be aligned to pgsz and span the range:
|
||||
* ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
|
||||
*/
|
||||
#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
|
||||
for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
|
||||
__rdma_block_iter_next(biter);)
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_USER_MEM
|
||||
|
||||
struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
|
||||
|
Reference in New Issue
Block a user