IB: Pass uverbs_attr_bundle down ib_x destroy path
The uverbs_attr_bundle with the ucontext is sent down to the drivers ib_x destroy path as ib_udata. The next patch will use the ib_udata to free the drivers destroy path from the dependency in 'uobject->context' as we already did for the create path. Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:

committed by
Jason Gunthorpe

parent
a6a3797df2
commit
c4367a2635
@@ -138,10 +138,12 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
|
||||
* rvt_destory_ah - Destory an address handle
|
||||
* @ibah: address handle
|
||||
* @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
|
||||
* @udata: user data or NULL for kernel object
|
||||
*
|
||||
* Return: 0 on success
|
||||
*/
|
||||
int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags)
|
||||
int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
|
||||
struct rvt_ah *ah = ibah_to_rvtah(ibah);
|
||||
|
@@ -54,7 +54,8 @@ struct ib_ah *rvt_create_ah(struct ib_pd *pd,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
u32 create_flags,
|
||||
struct ib_udata *udata);
|
||||
int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
|
||||
int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags,
|
||||
struct ib_udata *udata);
|
||||
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
|
||||
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
|
||||
|
||||
|
@@ -299,12 +299,13 @@ done:
|
||||
/**
|
||||
* rvt_destroy_cq - destroy a completion queue
|
||||
* @ibcq: the completion queue to destroy.
|
||||
* @udata: user data or NULL for kernel object
|
||||
*
|
||||
* Called by ib_destroy_cq() in the generic verbs code.
|
||||
*
|
||||
* Return: always 0
|
||||
*/
|
||||
int rvt_destroy_cq(struct ib_cq *ibcq)
|
||||
int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
|
||||
struct rvt_dev_info *rdi = cq->rdi;
|
||||
|
@@ -55,7 +55,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int rvt_destroy_cq(struct ib_cq *ibcq);
|
||||
int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
|
||||
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
||||
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
||||
|
@@ -548,7 +548,7 @@ bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
|
||||
*
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int rvt_dereg_mr(struct ib_mr *ibmr)
|
||||
int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_mr *mr = to_imr(ibmr);
|
||||
int ret;
|
||||
@@ -575,9 +575,8 @@ out:
|
||||
*
|
||||
* Return: the memory region on success, otherwise return an errno.
|
||||
*/
|
||||
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
|
||||
enum ib_mr_type mr_type,
|
||||
u32 max_num_sg)
|
||||
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
|
||||
u32 max_num_sg, struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_mr *mr;
|
||||
|
||||
|
@@ -78,10 +78,9 @@ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc);
|
||||
struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int mr_access_flags,
|
||||
struct ib_udata *udata);
|
||||
int rvt_dereg_mr(struct ib_mr *ibmr);
|
||||
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
|
||||
enum ib_mr_type mr_type,
|
||||
u32 max_num_sg);
|
||||
int rvt_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
|
||||
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
|
||||
u32 max_num_sg, struct ib_udata *udata);
|
||||
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||
int sg_nents, unsigned int *sg_offset);
|
||||
struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
|
||||
|
@@ -93,10 +93,11 @@ bail:
|
||||
/**
|
||||
* rvt_dealloc_pd - Free PD
|
||||
* @ibpd: Free up PD
|
||||
* @udata: Valid user data or NULL for kernel object
|
||||
*
|
||||
* Return: always 0
|
||||
*/
|
||||
void rvt_dealloc_pd(struct ib_pd *ibpd)
|
||||
void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
|
||||
|
||||
|
@@ -52,6 +52,6 @@
|
||||
|
||||
int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
void rvt_dealloc_pd(struct ib_pd *ibpd);
|
||||
void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
|
||||
#endif /* DEF_RDMAVTPD_H */
|
||||
|
@@ -1617,7 +1617,7 @@ inval:
|
||||
*
|
||||
* Return: 0 on success.
|
||||
*/
|
||||
int rvt_destroy_qp(struct ib_qp *ibqp)
|
||||
int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
|
||||
|
@@ -57,7 +57,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_udata *udata);
|
||||
int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int rvt_destroy_qp(struct ib_qp *ibqp);
|
||||
int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
|
||||
int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_qp_init_attr *init_attr);
|
||||
int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
|
@@ -340,7 +340,7 @@ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
|
||||
*
|
||||
* Return always 0
|
||||
*/
|
||||
int rvt_destroy_srq(struct ib_srq *ibsrq)
|
||||
int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
|
||||
struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
|
||||
|
@@ -57,6 +57,6 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
enum ib_srq_attr_mask attr_mask,
|
||||
struct ib_udata *udata);
|
||||
int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
|
||||
int rvt_destroy_srq(struct ib_srq *ibsrq);
|
||||
int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
|
||||
|
||||
#endif /* DEF_RVTSRQ_H */
|
||||
|
@@ -185,7 +185,7 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
|
||||
}
|
||||
|
||||
static void rxe_dealloc_pd(struct ib_pd *ibpd)
|
||||
static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_pd *pd = to_rpd(ibpd);
|
||||
|
||||
@@ -242,7 +242,7 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
|
||||
static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_ah *ah = to_rah(ibah);
|
||||
|
||||
@@ -389,7 +389,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rxe_destroy_srq(struct ib_srq *ibsrq)
|
||||
static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_srq *srq = to_rsrq(ibsrq);
|
||||
|
||||
@@ -509,7 +509,7 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rxe_destroy_qp(struct ib_qp *ibqp)
|
||||
static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_qp *qp = to_rqp(ibqp);
|
||||
|
||||
@@ -839,7 +839,7 @@ err1:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int rxe_destroy_cq(struct ib_cq *ibcq)
|
||||
static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_cq *cq = to_rcq(ibcq);
|
||||
|
||||
@@ -990,7 +990,7 @@ err2:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int rxe_dereg_mr(struct ib_mr *ibmr)
|
||||
static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_mem *mr = to_rmr(ibmr);
|
||||
|
||||
@@ -1001,9 +1001,8 @@ static int rxe_dereg_mr(struct ib_mr *ibmr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
|
||||
enum ib_mr_type mr_type,
|
||||
u32 max_num_sg)
|
||||
static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
|
||||
u32 max_num_sg, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_dev *rxe = to_rdev(ibpd->device);
|
||||
struct rxe_pd *pd = to_rpd(ibpd);
|
||||
|
Reference in New Issue
Block a user