RDMA: Restore ability to return error for destroy WQ
Make this interface symmetrical to other destroy paths.
Fixes: a49b1dc7ae
("RDMA: Convert destroy_wq to be void")
Link: https://lore.kernel.org/r/20200907120921.476363-9-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:

committed by
Jason Gunthorpe

parent
d0c45c8556
commit
add53535fb
@@ -16,7 +16,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
|
|||||||
container_of(uobject, struct ib_uwq_object, uevent.uobject);
|
container_of(uobject, struct ib_uwq_object, uevent.uobject);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = ib_destroy_wq(wq, &attrs->driver_udata);
|
ret = ib_destroy_wq_user(wq, &attrs->driver_udata);
|
||||||
if (ib_is_destroy_retryable(ret, why, uobject))
|
if (ib_is_destroy_retryable(ret, why, uobject))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@@ -2399,25 +2399,28 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
|
|||||||
EXPORT_SYMBOL(ib_create_wq);
|
EXPORT_SYMBOL(ib_create_wq);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ib_destroy_wq - Destroys the specified user WQ.
|
* ib_destroy_wq_user - Destroys the specified user WQ.
|
||||||
* @wq: The WQ to destroy.
|
* @wq: The WQ to destroy.
|
||||||
* @udata: Valid user data
|
* @udata: Valid user data
|
||||||
*/
|
*/
|
||||||
int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
|
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
|
||||||
{
|
{
|
||||||
struct ib_cq *cq = wq->cq;
|
struct ib_cq *cq = wq->cq;
|
||||||
struct ib_pd *pd = wq->pd;
|
struct ib_pd *pd = wq->pd;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (atomic_read(&wq->usecnt))
|
if (atomic_read(&wq->usecnt))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
wq->device->ops.destroy_wq(wq, udata);
|
ret = wq->device->ops.destroy_wq(wq, udata);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
atomic_dec(&pd->usecnt);
|
atomic_dec(&pd->usecnt);
|
||||||
atomic_dec(&cq->usecnt);
|
atomic_dec(&cq->usecnt);
|
||||||
|
return ret;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ib_destroy_wq);
|
EXPORT_SYMBOL(ib_destroy_wq_user);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ib_modify_wq - Modifies the specified WQ.
|
* ib_modify_wq - Modifies the specified WQ.
|
||||||
|
@@ -899,7 +899,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
|
|||||||
struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
|
struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
|
||||||
struct ib_wq_init_attr *init_attr,
|
struct ib_wq_init_attr *init_attr,
|
||||||
struct ib_udata *udata);
|
struct ib_udata *udata);
|
||||||
void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
||||||
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
||||||
u32 wq_attr_mask, struct ib_udata *udata);
|
u32 wq_attr_mask, struct ib_udata *udata);
|
||||||
|
|
||||||
|
@@ -4327,7 +4327,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
|
int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
|
||||||
{
|
{
|
||||||
struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
|
struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
|
||||||
struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
|
struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
|
||||||
@@ -4338,6 +4338,7 @@ void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
|
|||||||
destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
|
destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
|
||||||
|
|
||||||
kfree(qp);
|
kfree(qp);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ib_rwq_ind_table
|
struct ib_rwq_ind_table
|
||||||
|
@@ -1241,7 +1241,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
|
|||||||
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
||||||
struct ib_wq_init_attr *init_attr,
|
struct ib_wq_init_attr *init_attr,
|
||||||
struct ib_udata *udata);
|
struct ib_udata *udata);
|
||||||
void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
||||||
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
||||||
u32 wq_attr_mask, struct ib_udata *udata);
|
u32 wq_attr_mask, struct ib_udata *udata);
|
||||||
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
||||||
|
@@ -5085,14 +5085,18 @@ err:
|
|||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
|
int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
|
||||||
{
|
{
|
||||||
struct mlx5_ib_dev *dev = to_mdev(wq->device);
|
struct mlx5_ib_dev *dev = to_mdev(wq->device);
|
||||||
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
|
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
|
||||||
|
int ret;
|
||||||
|
|
||||||
mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
|
ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
destroy_user_rq(dev, wq->pd, rwq, udata);
|
destroy_user_rq(dev, wq->pd, rwq, udata);
|
||||||
kfree(rwq);
|
kfree(rwq);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
||||||
|
@@ -26,8 +26,8 @@ int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
|
|||||||
|
|
||||||
int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
|
int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
|
||||||
|
|
||||||
void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
|
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
|
||||||
struct mlx5_core_qp *rq);
|
struct mlx5_core_qp *rq);
|
||||||
int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
|
int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
|
||||||
struct mlx5_core_qp *sq);
|
struct mlx5_core_qp *sq);
|
||||||
void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
|
void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
|
||||||
|
@@ -576,11 +576,12 @@ err_destroy_rq:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
|
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
|
||||||
struct mlx5_core_qp *rq)
|
struct mlx5_core_qp *rq)
|
||||||
{
|
{
|
||||||
destroy_resource_common(dev, rq);
|
destroy_resource_common(dev, rq);
|
||||||
destroy_rq_tracked(dev, rq->qpn, rq->uid);
|
destroy_rq_tracked(dev, rq->qpn, rq->uid);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
|
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
|
||||||
|
@@ -2480,7 +2480,7 @@ struct ib_device_ops {
|
|||||||
struct ib_wq *(*create_wq)(struct ib_pd *pd,
|
struct ib_wq *(*create_wq)(struct ib_pd *pd,
|
||||||
struct ib_wq_init_attr *init_attr,
|
struct ib_wq_init_attr *init_attr,
|
||||||
struct ib_udata *udata);
|
struct ib_udata *udata);
|
||||||
void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
|
int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
|
||||||
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
|
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
|
||||||
u32 wq_attr_mask, struct ib_udata *udata);
|
u32 wq_attr_mask, struct ib_udata *udata);
|
||||||
struct ib_rwq_ind_table *(*create_rwq_ind_table)(
|
struct ib_rwq_ind_table *(*create_rwq_ind_table)(
|
||||||
@@ -4316,7 +4316,7 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
|
|||||||
|
|
||||||
struct ib_wq *ib_create_wq(struct ib_pd *pd,
|
struct ib_wq *ib_create_wq(struct ib_pd *pd,
|
||||||
struct ib_wq_init_attr *init_attr);
|
struct ib_wq_init_attr *init_attr);
|
||||||
int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
|
||||||
int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
|
int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
|
||||||
u32 wq_attr_mask);
|
u32 wq_attr_mask);
|
||||||
int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
|
int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
|
||||||
|
Reference in New Issue
Block a user