RDMA: Handle PD allocations by IB/core
The PD allocations in IB/core allows us to simplify drivers and their error flows in their .alloc_pd() paths. The changes in .alloc_pd() go hand in had with relevant update in .dealloc_pd(). We will use this opportunity and convert .dealloc_pd() to don't fail, as it was suggested a long time ago, failures are not happening as we have never seen a WARN_ON print. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Tento commit je obsažen v:

odevzdal
Jason Gunthorpe

rodič
30471d4b20
revize
21a428a019
@@ -239,6 +239,7 @@ static const struct ib_device_ops qedr_dev_ops = {
|
||||
.reg_user_mr = qedr_reg_user_mr,
|
||||
.req_notify_cq = qedr_arm_cq,
|
||||
.resize_cq = qedr_resize_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
|
||||
};
|
||||
|
||||
static int qedr_register_device(struct qedr_dev *dev)
|
||||
|
@@ -450,11 +450,12 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
|
||||
struct ib_ucontext *context, struct ib_udata *udata)
|
||||
int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = ibpd->device;
|
||||
struct qedr_dev *dev = get_qedr_dev(ibdev);
|
||||
struct qedr_pd *pd;
|
||||
struct qedr_pd *pd = get_qedr_pd(ibpd);
|
||||
u16 pd_id;
|
||||
int rc;
|
||||
|
||||
@@ -463,16 +464,12 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
|
||||
|
||||
if (!dev->rdma_ctx) {
|
||||
DP_ERR(dev, "invalid RDMA context\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
|
||||
if (!pd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
|
||||
if (rc)
|
||||
goto err;
|
||||
return rc;
|
||||
|
||||
pd->pd_id = pd_id;
|
||||
|
||||
@@ -485,36 +482,23 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
|
||||
if (rc) {
|
||||
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
|
||||
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
|
||||
goto err;
|
||||
return rc;
|
||||
}
|
||||
|
||||
pd->uctx = get_qedr_ucontext(context);
|
||||
pd->uctx->pd = pd;
|
||||
}
|
||||
|
||||
return &pd->ibpd;
|
||||
|
||||
err:
|
||||
kfree(pd);
|
||||
return ERR_PTR(rc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qedr_dealloc_pd(struct ib_pd *ibpd)
|
||||
void qedr_dealloc_pd(struct ib_pd *ibpd)
|
||||
{
|
||||
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
|
||||
struct qedr_pd *pd = get_qedr_pd(ibpd);
|
||||
|
||||
if (!pd) {
|
||||
pr_err("Invalid PD received in dealloc_pd\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
|
||||
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
|
||||
|
||||
kfree(pd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qedr_free_pbl(struct qedr_dev *dev,
|
||||
|
@@ -47,9 +47,9 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
|
||||
int qedr_dealloc_ucontext(struct ib_ucontext *);
|
||||
|
||||
int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
|
||||
struct ib_pd *qedr_alloc_pd(struct ib_device *,
|
||||
struct ib_ucontext *, struct ib_udata *);
|
||||
int qedr_dealloc_pd(struct ib_pd *pd);
|
||||
int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
|
||||
struct ib_udata *udata);
|
||||
void qedr_dealloc_pd(struct ib_pd *pd);
|
||||
|
||||
struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele