IB/rxe, IB/rdmavt: Use dma_virt_ops instead of duplicating it
Make the rxe and rdmavt drivers use dma_virt_ops. Update the comments that refer to the source files removed by this patch. Remove struct ib_dma_mapping_ops. Remove ib_device.dma_ops. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Cc: Andrew Boyer <andrew.boyer@dell.com> Cc: Dennis Dalessandro <dennis.dalessandro@intel.com> Cc: Jonathan Toppins <jtoppins@redhat.com> Cc: Alex Estrin <alex.estrin@intel.com> Cc: Leon Romanovsky <leonro@mellanox.com> Reviewed-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:

committed by
Doug Ledford

parent
99db949403
commit
0bbb3b7496
@@ -1784,53 +1784,6 @@ struct ib_cache {
|
||||
enum ib_port_state *port_state_cache;
|
||||
};
|
||||
|
||||
struct ib_dma_mapping_ops {
|
||||
int (*mapping_error)(struct ib_device *dev,
|
||||
u64 dma_addr);
|
||||
u64 (*map_single)(struct ib_device *dev,
|
||||
void *ptr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
void (*unmap_single)(struct ib_device *dev,
|
||||
u64 addr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
u64 (*map_page)(struct ib_device *dev,
|
||||
struct page *page, unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction direction);
|
||||
void (*unmap_page)(struct ib_device *dev,
|
||||
u64 addr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
int (*map_sg)(struct ib_device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction);
|
||||
void (*unmap_sg)(struct ib_device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction);
|
||||
int (*map_sg_attrs)(struct ib_device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs);
|
||||
void (*unmap_sg_attrs)(struct ib_device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs);
|
||||
void (*sync_single_for_cpu)(struct ib_device *dev,
|
||||
u64 dma_handle,
|
||||
size_t size,
|
||||
enum dma_data_direction dir);
|
||||
void (*sync_single_for_device)(struct ib_device *dev,
|
||||
u64 dma_handle,
|
||||
size_t size,
|
||||
enum dma_data_direction dir);
|
||||
void *(*alloc_coherent)(struct ib_device *dev,
|
||||
size_t size,
|
||||
u64 *dma_handle,
|
||||
gfp_t flag);
|
||||
void (*free_coherent)(struct ib_device *dev,
|
||||
size_t size, void *cpu_addr,
|
||||
u64 dma_handle);
|
||||
};
|
||||
|
||||
struct iw_cm_verbs;
|
||||
|
||||
struct ib_port_immutable {
|
||||
@@ -2090,7 +2043,6 @@ struct ib_device {
|
||||
struct ib_rwq_ind_table_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
|
||||
struct ib_dma_mapping_ops *dma_ops;
|
||||
|
||||
struct module *owner;
|
||||
struct device dev;
|
||||
@@ -2965,8 +2917,6 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
|
||||
*/
|
||||
static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
return dev->dma_ops->mapping_error(dev, dma_addr);
|
||||
return dma_mapping_error(&dev->dev, dma_addr);
|
||||
}
|
||||
|
||||
@@ -2981,8 +2931,6 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
|
||||
void *cpu_addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
|
||||
return dma_map_single(&dev->dev, cpu_addr, size, direction);
|
||||
}
|
||||
|
||||
@@ -2997,10 +2945,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
|
||||
u64 addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
dev->dma_ops->unmap_single(dev, addr, size, direction);
|
||||
else
|
||||
dma_unmap_single(&dev->dev, addr, size, direction);
|
||||
dma_unmap_single(&dev->dev, addr, size, direction);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3017,8 +2962,6 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
|
||||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
return dev->dma_ops->map_page(dev, page, offset, size, direction);
|
||||
return dma_map_page(&dev->dev, page, offset, size, direction);
|
||||
}
|
||||
|
||||
@@ -3033,10 +2976,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
|
||||
u64 addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
dev->dma_ops->unmap_page(dev, addr, size, direction);
|
||||
else
|
||||
dma_unmap_page(&dev->dev, addr, size, direction);
|
||||
dma_unmap_page(&dev->dev, addr, size, direction);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3050,8 +2990,6 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
return dev->dma_ops->map_sg(dev, sg, nents, direction);
|
||||
return dma_map_sg(&dev->dev, sg, nents, direction);
|
||||
}
|
||||
|
||||
@@ -3066,10 +3004,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
dev->dma_ops->unmap_sg(dev, sg, nents, direction);
|
||||
else
|
||||
dma_unmap_sg(&dev->dev, sg, nents, direction);
|
||||
dma_unmap_sg(&dev->dev, sg, nents, direction);
|
||||
}
|
||||
|
||||
static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
|
||||
@@ -3077,9 +3012,6 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long dma_attrs)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
|
||||
dma_attrs);
|
||||
return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
|
||||
}
|
||||
|
||||
@@ -3088,11 +3020,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long dma_attrs)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
|
||||
dma_attrs);
|
||||
else
|
||||
dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
|
||||
dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
|
||||
}
|
||||
/**
|
||||
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
|
||||
@@ -3134,10 +3062,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
|
||||
else
|
||||
dma_sync_single_for_cpu(&dev->dev, addr, size, dir);
|
||||
dma_sync_single_for_cpu(&dev->dev, addr, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3152,10 +3077,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
|
||||
else
|
||||
dma_sync_single_for_device(&dev->dev, addr, size, dir);
|
||||
dma_sync_single_for_device(&dev->dev, addr, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3170,14 +3092,6 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
|
||||
dma_addr_t *dma_handle,
|
||||
gfp_t flag)
|
||||
{
|
||||
if (dev->dma_ops) {
|
||||
u64 handle;
|
||||
void *ret;
|
||||
|
||||
ret = dev->dma_ops->alloc_coherent(dev, size, &handle, flag);
|
||||
*dma_handle = handle;
|
||||
return ret;
|
||||
}
|
||||
return dma_alloc_coherent(&dev->dev, size, dma_handle, flag);
|
||||
}
|
||||
|
||||
@@ -3192,10 +3106,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
|
||||
size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
else
|
||||
dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle);
|
||||
dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Reference in New Issue
Block a user