RDMA/verbs: Add a DMA iterator to return aligned contiguous memory blocks

This helper iterates over a DMA-mapped SGL and returns contiguous memory
blocks aligned to a HW supported page size.

Suggested-by: Jason Gunthorpe <jgg@ziepe.ca>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Shiraz Saleem
2019-05-06 08:53:33 -05:00
committed by Jason Gunthorpe
parent 4a35339958
commit a808273a49
2 changed files with 81 additions and 0 deletions

View File

@@ -2710,3 +2710,37 @@ int rdma_init_netdev(struct ib_device *device, u8 port_num,
netdev, params.param);
}
EXPORT_SYMBOL(rdma_init_netdev);
void __rdma_block_iter_start(struct ib_block_iter *biter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgsz)
{
memset(biter, 0, sizeof(struct ib_block_iter));
biter->__sg = sglist;
biter->__sg_nents = nents;
/* Driver provides best block size to use */
biter->__pg_bit = __fls(pgsz);
}
EXPORT_SYMBOL(__rdma_block_iter_start);
bool __rdma_block_iter_next(struct ib_block_iter *biter)
{
unsigned int block_offset;
if (!biter->__sg_nents || !biter->__sg)
return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--;
}
return true;
}
EXPORT_SYMBOL(__rdma_block_iter_next);