IB/qib: Use rdmavt protection domain
Remove protection domain datastructure from qib and use rdmavts version. Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:

committed by
Doug Ledford

parent
eb636ac0e4
commit
f44728d69a
@@ -152,7 +152,7 @@ out:
|
|||||||
* Check the IB SGE for validity and initialize our internal version
|
* Check the IB SGE for validity and initialize our internal version
|
||||||
* of it.
|
* of it.
|
||||||
*/
|
*/
|
||||||
int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
|
int qib_lkey_ok(struct qib_lkey_table *rkt, struct rvt_pd *pd,
|
||||||
struct qib_sge *isge, struct ib_sge *sge, int acc)
|
struct qib_sge *isge, struct ib_sge *sge, int acc)
|
||||||
{
|
{
|
||||||
struct qib_mregion *mr;
|
struct qib_mregion *mr;
|
||||||
@@ -263,7 +263,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
|
|||||||
*/
|
*/
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (rkey == 0) {
|
if (rkey == 0) {
|
||||||
struct qib_pd *pd = to_ipd(qp->ibqp.pd);
|
struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
|
||||||
struct qib_ibdev *dev = to_idev(pd->ibpd.device);
|
struct qib_ibdev *dev = to_idev(pd->ibpd.device);
|
||||||
|
|
||||||
if (pd->user)
|
if (pd->user)
|
||||||
@@ -341,7 +341,7 @@ bail:
|
|||||||
int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr)
|
int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr)
|
||||||
{
|
{
|
||||||
struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
|
struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
|
||||||
struct qib_pd *pd = to_ipd(qp->ibqp.pd);
|
struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
|
||||||
struct qib_mr *mr = to_imr(wr->mr);
|
struct qib_mr *mr = to_imr(wr->mr);
|
||||||
struct qib_mregion *mrg;
|
struct qib_mregion *mrg;
|
||||||
u32 key = wr->key;
|
u32 key = wr->key;
|
||||||
|
@@ -99,7 +99,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
|
|||||||
struct ib_mr *ret;
|
struct ib_mr *ret;
|
||||||
int rval;
|
int rval;
|
||||||
|
|
||||||
if (to_ipd(pd)->user) {
|
if (ibpd_to_rvtpd(pd)->user) {
|
||||||
ret = ERR_PTR(-EPERM);
|
ret = ERR_PTR(-EPERM);
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
@@ -84,11 +84,11 @@ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
|
|||||||
int i, j, ret;
|
int i, j, ret;
|
||||||
struct ib_wc wc;
|
struct ib_wc wc;
|
||||||
struct qib_lkey_table *rkt;
|
struct qib_lkey_table *rkt;
|
||||||
struct qib_pd *pd;
|
struct rvt_pd *pd;
|
||||||
struct qib_sge_state *ss;
|
struct qib_sge_state *ss;
|
||||||
|
|
||||||
rkt = &to_idev(qp->ibqp.device)->lk_table;
|
rkt = &to_idev(qp->ibqp.device)->lk_table;
|
||||||
pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
|
pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
|
||||||
ss = &qp->r_sge;
|
ss = &qp->r_sge;
|
||||||
ss->sg_list = qp->r_sg_list;
|
ss->sg_list = qp->r_sg_list;
|
||||||
qp->r_len = 0;
|
qp->r_len = 0;
|
||||||
|
@@ -346,7 +346,7 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
|
|||||||
int ret;
|
int ret;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct qib_lkey_table *rkt;
|
struct qib_lkey_table *rkt;
|
||||||
struct qib_pd *pd;
|
struct rvt_pd *pd;
|
||||||
int avoid_schedule = 0;
|
int avoid_schedule = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&qp->s_lock, flags);
|
spin_lock_irqsave(&qp->s_lock, flags);
|
||||||
@@ -397,7 +397,7 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
rkt = &to_idev(qp->ibqp.device)->lk_table;
|
rkt = &to_idev(qp->ibqp.device)->lk_table;
|
||||||
pd = to_ipd(qp->ibqp.pd);
|
pd = ibpd_to_rvtpd(qp->ibqp.pd);
|
||||||
wqe = get_swqe_ptr(qp, qp->s_head);
|
wqe = get_swqe_ptr(qp, qp->s_head);
|
||||||
|
|
||||||
if (qp->ibqp.qp_type != IB_QPT_UC &&
|
if (qp->ibqp.qp_type != IB_QPT_UC &&
|
||||||
@@ -1604,7 +1604,7 @@ static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
|
|||||||
props->max_mr = dev->lk_table.max;
|
props->max_mr = dev->lk_table.max;
|
||||||
props->max_fmr = dev->lk_table.max;
|
props->max_fmr = dev->lk_table.max;
|
||||||
props->max_map_per_fmr = 32767;
|
props->max_map_per_fmr = 32767;
|
||||||
props->max_pd = ib_qib_max_pds;
|
props->max_pd = dev->rdi.dparms.props.max_pd;
|
||||||
props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
|
props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
|
||||||
props->max_qp_init_rd_atom = 255;
|
props->max_qp_init_rd_atom = 255;
|
||||||
/* props->max_res_rd_atom */
|
/* props->max_res_rd_atom */
|
||||||
@@ -1756,61 +1756,6 @@ static int qib_query_gid(struct ib_device *ibdev, u8 port,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
|
|
||||||
struct ib_ucontext *context,
|
|
||||||
struct ib_udata *udata)
|
|
||||||
{
|
|
||||||
struct qib_ibdev *dev = to_idev(ibdev);
|
|
||||||
struct qib_pd *pd;
|
|
||||||
struct ib_pd *ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is actually totally arbitrary. Some correctness tests
|
|
||||||
* assume there's a maximum number of PDs that can be allocated.
|
|
||||||
* We don't actually have this limit, but we fail the test if
|
|
||||||
* we allow allocations of more than we report for this value.
|
|
||||||
*/
|
|
||||||
|
|
||||||
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
|
|
||||||
if (!pd) {
|
|
||||||
ret = ERR_PTR(-ENOMEM);
|
|
||||||
goto bail;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock(&dev->n_pds_lock);
|
|
||||||
if (dev->n_pds_allocated == ib_qib_max_pds) {
|
|
||||||
spin_unlock(&dev->n_pds_lock);
|
|
||||||
kfree(pd);
|
|
||||||
ret = ERR_PTR(-ENOMEM);
|
|
||||||
goto bail;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev->n_pds_allocated++;
|
|
||||||
spin_unlock(&dev->n_pds_lock);
|
|
||||||
|
|
||||||
/* ib_alloc_pd() will initialize pd->ibpd. */
|
|
||||||
pd->user = udata != NULL;
|
|
||||||
|
|
||||||
ret = &pd->ibpd;
|
|
||||||
|
|
||||||
bail:
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int qib_dealloc_pd(struct ib_pd *ibpd)
|
|
||||||
{
|
|
||||||
struct qib_pd *pd = to_ipd(ibpd);
|
|
||||||
struct qib_ibdev *dev = to_idev(ibpd->device);
|
|
||||||
|
|
||||||
spin_lock(&dev->n_pds_lock);
|
|
||||||
dev->n_pds_allocated--;
|
|
||||||
spin_unlock(&dev->n_pds_lock);
|
|
||||||
|
|
||||||
kfree(pd);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
|
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
|
||||||
{
|
{
|
||||||
/* A multicast address requires a GRH (see ch. 8.4.1). */
|
/* A multicast address requires a GRH (see ch. 8.4.1). */
|
||||||
@@ -2115,7 +2060,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
|||||||
|
|
||||||
/* Only need to initialize non-zero fields. */
|
/* Only need to initialize non-zero fields. */
|
||||||
spin_lock_init(&dev->qpt_lock);
|
spin_lock_init(&dev->qpt_lock);
|
||||||
spin_lock_init(&dev->n_pds_lock);
|
|
||||||
spin_lock_init(&dev->n_ahs_lock);
|
spin_lock_init(&dev->n_ahs_lock);
|
||||||
spin_lock_init(&dev->n_cqs_lock);
|
spin_lock_init(&dev->n_cqs_lock);
|
||||||
spin_lock_init(&dev->n_qps_lock);
|
spin_lock_init(&dev->n_qps_lock);
|
||||||
@@ -2239,8 +2183,8 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
|||||||
ibdev->query_gid = qib_query_gid;
|
ibdev->query_gid = qib_query_gid;
|
||||||
ibdev->alloc_ucontext = qib_alloc_ucontext;
|
ibdev->alloc_ucontext = qib_alloc_ucontext;
|
||||||
ibdev->dealloc_ucontext = qib_dealloc_ucontext;
|
ibdev->dealloc_ucontext = qib_dealloc_ucontext;
|
||||||
ibdev->alloc_pd = qib_alloc_pd;
|
ibdev->alloc_pd = NULL;
|
||||||
ibdev->dealloc_pd = qib_dealloc_pd;
|
ibdev->dealloc_pd = NULL;
|
||||||
ibdev->create_ah = qib_create_ah;
|
ibdev->create_ah = qib_create_ah;
|
||||||
ibdev->destroy_ah = qib_destroy_ah;
|
ibdev->destroy_ah = qib_destroy_ah;
|
||||||
ibdev->modify_ah = qib_modify_ah;
|
ibdev->modify_ah = qib_modify_ah;
|
||||||
|
@@ -222,12 +222,6 @@ struct qib_mcast {
|
|||||||
int n_attached;
|
int n_attached;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Protection domain */
|
|
||||||
struct qib_pd {
|
|
||||||
struct ib_pd ibpd;
|
|
||||||
int user; /* non-zero if created from user space */
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Address Handle */
|
/* Address Handle */
|
||||||
struct qib_ah {
|
struct qib_ah {
|
||||||
struct ib_ah ibah;
|
struct ib_ah ibah;
|
||||||
@@ -819,11 +813,6 @@ static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
|
|||||||
return container_of(ibmr, struct qib_mr, ibmr);
|
return container_of(ibmr, struct qib_mr, ibmr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
|
|
||||||
{
|
|
||||||
return container_of(ibpd, struct qib_pd, ibpd);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct qib_ah *to_iah(struct ib_ah *ibah)
|
static inline struct qib_ah *to_iah(struct ib_ah *ibah)
|
||||||
{
|
{
|
||||||
return container_of(ibah, struct qib_ah, ibah);
|
return container_of(ibah, struct qib_ah, ibah);
|
||||||
@@ -994,7 +983,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
|
|||||||
|
|
||||||
void qib_free_lkey(struct qib_mregion *mr);
|
void qib_free_lkey(struct qib_mregion *mr);
|
||||||
|
|
||||||
int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
|
int qib_lkey_ok(struct qib_lkey_table *rkt, struct rvt_pd *pd,
|
||||||
struct qib_sge *isge, struct ib_sge *sge, int acc);
|
struct qib_sge *isge, struct ib_sge *sge, int acc);
|
||||||
|
|
||||||
int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
|
int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
|
||||||
|
Reference in New Issue
Block a user