Merge branch 'qedr' into k.o/for-next
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
@@ -49,7 +49,7 @@
|
||||
#include "qedr.h"
|
||||
#include "verbs.h"
|
||||
#include <rdma/qedr-abi.h>
|
||||
#include "qedr_cm.h"
|
||||
#include "qedr_roce_cm.h"
|
||||
|
||||
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
|
||||
|
||||
@@ -70,6 +70,20 @@ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
|
||||
int index, union ib_gid *sgid)
|
||||
{
|
||||
struct qedr_dev *dev = get_qedr_dev(ibdev);
|
||||
|
||||
memset(sgid->raw, 0, sizeof(sgid->raw));
|
||||
ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
|
||||
sgid->global.interface_id, sgid->global.subnet_prefix);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
|
||||
union ib_gid *sgid)
|
||||
{
|
||||
@@ -263,8 +277,13 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
|
||||
attr->sm_lid = 0;
|
||||
attr->sm_sl = 0;
|
||||
attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
|
||||
attr->gid_tbl_len = QEDR_MAX_SGID;
|
||||
attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
||||
attr->gid_tbl_len = 1;
|
||||
attr->pkey_tbl_len = 1;
|
||||
} else {
|
||||
attr->gid_tbl_len = QEDR_MAX_SGID;
|
||||
attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
|
||||
}
|
||||
attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
|
||||
attr->qkey_viol_cntr = 0;
|
||||
get_link_speed_and_width(rdma_port->link_speed,
|
||||
@@ -770,7 +789,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
|
||||
struct qedr_dev *dev,
|
||||
struct qedr_userq *q,
|
||||
u64 buf_addr, size_t buf_len,
|
||||
int access, int dmasync)
|
||||
int access, int dmasync,
|
||||
int alloc_and_init)
|
||||
{
|
||||
u32 fw_pages;
|
||||
int rc;
|
||||
@@ -791,19 +811,27 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
|
||||
if (rc)
|
||||
goto err0;
|
||||
|
||||
q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
|
||||
if (IS_ERR(q->pbl_tbl)) {
|
||||
rc = PTR_ERR(q->pbl_tbl);
|
||||
goto err0;
|
||||
}
|
||||
|
||||
if (alloc_and_init) {
|
||||
q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
|
||||
if (IS_ERR(q->pbl_tbl)) {
|
||||
rc = PTR_ERR(q->pbl_tbl);
|
||||
goto err0;
|
||||
}
|
||||
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
|
||||
FW_PAGE_SHIFT);
|
||||
} else {
|
||||
q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
|
||||
if (!q->pbl_tbl) {
|
||||
rc = -ENOMEM;
|
||||
goto err0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err0:
|
||||
ib_umem_release(q->umem);
|
||||
q->umem = NULL;
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -929,7 +957,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
|
||||
cq->cq_type = QEDR_CQ_TYPE_USER;
|
||||
|
||||
rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
|
||||
ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
|
||||
ureq.len, IB_ACCESS_LOCAL_WRITE,
|
||||
1, 1);
|
||||
if (rc)
|
||||
goto err0;
|
||||
|
||||
@@ -1222,18 +1251,34 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
|
||||
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
|
||||
struct qedr_create_qp_uresp *uresp,
|
||||
struct qedr_qp *qp)
|
||||
{
|
||||
uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
|
||||
/* iWARP requires two doorbells per RQ. */
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
||||
uresp->rq_db_offset =
|
||||
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
|
||||
uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
|
||||
} else {
|
||||
uresp->rq_db_offset =
|
||||
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
|
||||
}
|
||||
|
||||
uresp->rq_icid = qp->icid;
|
||||
}
|
||||
|
||||
static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
|
||||
static void qedr_copy_sq_uresp(struct qedr_dev *dev,
|
||||
struct qedr_create_qp_uresp *uresp,
|
||||
struct qedr_qp *qp)
|
||||
{
|
||||
uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
|
||||
uresp->sq_icid = qp->icid + 1;
|
||||
|
||||
/* iWARP uses the same cid for rq and sq */
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1))
|
||||
uresp->sq_icid = qp->icid;
|
||||
else
|
||||
uresp->sq_icid = qp->icid + 1;
|
||||
}
|
||||
|
||||
static int qedr_copy_qp_uresp(struct qedr_dev *dev,
|
||||
@@ -1243,8 +1288,8 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
|
||||
int rc;
|
||||
|
||||
memset(&uresp, 0, sizeof(uresp));
|
||||
qedr_copy_sq_uresp(&uresp, qp);
|
||||
qedr_copy_rq_uresp(&uresp, qp);
|
||||
qedr_copy_sq_uresp(dev, &uresp, qp);
|
||||
qedr_copy_rq_uresp(dev, &uresp, qp);
|
||||
|
||||
uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
|
||||
uresp.qp_id = qp->qp_id;
|
||||
@@ -1264,6 +1309,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
spin_lock_init(&qp->q_lock);
|
||||
atomic_set(&qp->refcnt, 1);
|
||||
qp->pd = pd;
|
||||
qp->qp_type = attrs->qp_type;
|
||||
qp->max_inline_data = attrs->cap.max_inline_data;
|
||||
@@ -1334,6 +1380,52 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
|
||||
}
|
||||
|
||||
static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!rdma_protocol_iwarp(&dev->ibdev, 1))
|
||||
return 0;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_irq(&dev->idr_lock);
|
||||
|
||||
rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
|
||||
|
||||
spin_unlock_irq(&dev->idr_lock);
|
||||
idr_preload_end();
|
||||
|
||||
return rc < 0 ? rc : 0;
|
||||
}
|
||||
|
||||
static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
|
||||
{
|
||||
if (!rdma_protocol_iwarp(&dev->ibdev, 1))
|
||||
return;
|
||||
|
||||
spin_lock_irq(&dev->idr_lock);
|
||||
idr_remove(&dev->qpidr, id);
|
||||
spin_unlock_irq(&dev->idr_lock);
|
||||
}
|
||||
|
||||
static inline void
|
||||
qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct qed_rdma_create_qp_out_params *out_params)
|
||||
{
|
||||
qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
|
||||
qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
|
||||
|
||||
qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
|
||||
&qp->usq.pbl_info, FW_PAGE_SHIFT);
|
||||
|
||||
qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
|
||||
qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
|
||||
|
||||
qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
|
||||
&qp->urq.pbl_info, FW_PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
if (qp->usq.umem)
|
||||
@@ -1357,6 +1449,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
|
||||
struct ib_ucontext *ib_ctx = NULL;
|
||||
struct qedr_ucontext *ctx = NULL;
|
||||
struct qedr_create_qp_ureq ureq;
|
||||
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
|
||||
int rc = -EINVAL;
|
||||
|
||||
ib_ctx = ibpd->uobject->context;
|
||||
@@ -1371,14 +1464,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
|
||||
|
||||
/* SQ - read access only (0), dma sync not required (0) */
|
||||
rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
|
||||
ureq.sq_len, 0, 0);
|
||||
ureq.sq_len, 0, 0, alloc_and_init);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* RQ - read access only (0), dma sync not required (0) */
|
||||
rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
|
||||
ureq.rq_len, 0, 0);
|
||||
|
||||
ureq.rq_len, 0, 0, alloc_and_init);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@@ -1399,6 +1491,9 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1))
|
||||
qedr_iwarp_populate_user_qp(dev, qp, &out_params);
|
||||
|
||||
qp->qp_id = out_params.qp_id;
|
||||
qp->icid = out_params.icid;
|
||||
|
||||
@@ -1419,6 +1514,21 @@ err1:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
{
|
||||
qp->sq.db = dev->db_addr +
|
||||
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
|
||||
qp->sq.db_data.data.icid = qp->icid;
|
||||
|
||||
qp->rq.db = dev->db_addr +
|
||||
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
|
||||
qp->rq.db_data.data.icid = qp->icid;
|
||||
qp->rq.iwarp_db2 = dev->db_addr +
|
||||
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
|
||||
qp->rq.iwarp_db2_data.data.icid = qp->icid;
|
||||
qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
|
||||
}
|
||||
|
||||
static int
|
||||
qedr_roce_create_kernel_qp(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
@@ -1465,8 +1575,71 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
|
||||
qp->icid = out_params.icid;
|
||||
|
||||
qedr_set_roce_db_info(dev, qp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
static int
|
||||
qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct qed_rdma_create_qp_in_params *in_params,
|
||||
u32 n_sq_elems, u32 n_rq_elems)
|
||||
{
|
||||
struct qed_rdma_create_qp_out_params out_params;
|
||||
struct qed_chain_ext_pbl ext_pbl;
|
||||
int rc;
|
||||
|
||||
in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
|
||||
QEDR_SQE_ELEMENT_SIZE,
|
||||
QED_CHAIN_MODE_PBL);
|
||||
in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
|
||||
QEDR_RQE_ELEMENT_SIZE,
|
||||
QED_CHAIN_MODE_PBL);
|
||||
|
||||
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
|
||||
in_params, &out_params);
|
||||
|
||||
if (!qp->qed_qp)
|
||||
return -EINVAL;
|
||||
|
||||
/* Now we allocate the chain */
|
||||
ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
|
||||
ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
|
||||
|
||||
rc = dev->ops->common->chain_alloc(dev->cdev,
|
||||
QED_CHAIN_USE_TO_PRODUCE,
|
||||
QED_CHAIN_MODE_PBL,
|
||||
QED_CHAIN_CNT_TYPE_U32,
|
||||
n_sq_elems,
|
||||
QEDR_SQE_ELEMENT_SIZE,
|
||||
&qp->sq.pbl, &ext_pbl);
|
||||
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
|
||||
ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
|
||||
|
||||
rc = dev->ops->common->chain_alloc(dev->cdev,
|
||||
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
|
||||
QED_CHAIN_MODE_PBL,
|
||||
QED_CHAIN_CNT_TYPE_U32,
|
||||
n_rq_elems,
|
||||
QEDR_RQE_ELEMENT_SIZE,
|
||||
&qp->rq.pbl, &ext_pbl);
|
||||
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
qp->qp_id = out_params.qp_id;
|
||||
qp->icid = out_params.icid;
|
||||
|
||||
qedr_set_iwarp_db_info(dev, qp);
|
||||
return rc;
|
||||
|
||||
err:
|
||||
dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
|
||||
@@ -1541,8 +1714,12 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
|
||||
|
||||
n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
|
||||
|
||||
rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
|
||||
n_sq_elems, n_rq_elems);
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1))
|
||||
rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
|
||||
n_sq_elems, n_rq_elems);
|
||||
else
|
||||
rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
|
||||
n_sq_elems, n_rq_elems);
|
||||
if (rc)
|
||||
qedr_cleanup_kernel(dev, qp);
|
||||
|
||||
@@ -1602,6 +1779,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
|
||||
|
||||
qp->ibqp.qp_num = qp->qp_id;
|
||||
|
||||
rc = qedr_idr_add(dev, qp, qp->qp_id);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
return &qp->ibqp;
|
||||
|
||||
err:
|
||||
@@ -1689,10 +1870,13 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
|
||||
/* Update doorbell (in case post_recv was
|
||||
* done before move to RTR)
|
||||
*/
|
||||
wmb();
|
||||
writel(qp->rq.db_data.raw, qp->rq.db);
|
||||
/* Make sure write takes effect */
|
||||
mmiowb();
|
||||
|
||||
if (rdma_protocol_roce(&dev->ibdev, 1)) {
|
||||
wmb();
|
||||
writel(qp->rq.db_data.raw, qp->rq.db);
|
||||
/* Make sure write takes effect */
|
||||
mmiowb();
|
||||
}
|
||||
break;
|
||||
case QED_ROCE_QP_STATE_ERR:
|
||||
break;
|
||||
@@ -1786,16 +1970,18 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
else
|
||||
new_qp_state = old_qp_state;
|
||||
|
||||
if (!ib_modify_qp_is_ok
|
||||
(old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
|
||||
IB_LINK_LAYER_ETHERNET)) {
|
||||
DP_ERR(dev,
|
||||
"modify qp: invalid attribute mask=0x%x specified for\n"
|
||||
"qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
|
||||
attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
|
||||
new_qp_state);
|
||||
rc = -EINVAL;
|
||||
goto err;
|
||||
if (rdma_protocol_roce(&dev->ibdev, 1)) {
|
||||
if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
|
||||
ibqp->qp_type, attr_mask,
|
||||
IB_LINK_LAYER_ETHERNET)) {
|
||||
DP_ERR(dev,
|
||||
"modify qp: invalid attribute mask=0x%x specified for\n"
|
||||
"qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
|
||||
attr_mask, qp->qp_id, ibqp->qp_type,
|
||||
old_qp_state, new_qp_state);
|
||||
rc = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Translate the masks... */
|
||||
@@ -2111,15 +2297,34 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
|
||||
DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
|
||||
qp, qp->qp_type);
|
||||
|
||||
if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
|
||||
(qp->state != QED_ROCE_QP_STATE_ERR) &&
|
||||
(qp->state != QED_ROCE_QP_STATE_INIT)) {
|
||||
if (rdma_protocol_roce(&dev->ibdev, 1)) {
|
||||
if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
|
||||
(qp->state != QED_ROCE_QP_STATE_ERR) &&
|
||||
(qp->state != QED_ROCE_QP_STATE_INIT)) {
|
||||
|
||||
attr.qp_state = IB_QPS_ERR;
|
||||
attr_mask |= IB_QP_STATE;
|
||||
attr.qp_state = IB_QPS_ERR;
|
||||
attr_mask |= IB_QP_STATE;
|
||||
|
||||
/* Change the QP state to ERROR */
|
||||
qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
|
||||
/* Change the QP state to ERROR */
|
||||
qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
|
||||
}
|
||||
} else {
|
||||
/* Wait for the connect/accept to complete */
|
||||
if (qp->ep) {
|
||||
int wait_count = 1;
|
||||
|
||||
while (qp->ep->during_connect) {
|
||||
DP_DEBUG(dev, QEDR_MSG_QP,
|
||||
"Still in during connect/accept\n");
|
||||
|
||||
msleep(100);
|
||||
if (wait_count++ > 200) {
|
||||
DP_NOTICE(dev,
|
||||
"during connect timeout\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (qp->qp_type == IB_QPT_GSI)
|
||||
@@ -2127,8 +2332,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
|
||||
|
||||
qedr_free_qp_resources(dev, qp);
|
||||
|
||||
kfree(qp);
|
||||
|
||||
if (atomic_dec_and_test(&qp->refcnt)) {
|
||||
qedr_idr_remove(dev, qp->qp_id);
|
||||
kfree(qp);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -2740,6 +2947,7 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
|
||||
case IB_WR_SEND_WITH_INV:
|
||||
return IB_WC_SEND;
|
||||
case IB_WR_RDMA_READ:
|
||||
case IB_WR_RDMA_READ_WITH_INV:
|
||||
return IB_WC_RDMA_READ;
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
return IB_WC_COMP_SWAP;
|
||||
@@ -2900,11 +3108,8 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
|
||||
break;
|
||||
case IB_WR_RDMA_READ_WITH_INV:
|
||||
DP_ERR(dev,
|
||||
"RDMA READ WITH INVALIDATE not supported\n");
|
||||
*bad_wr = wr;
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
|
||||
/* fallthrough... same is identical to RDMA READ */
|
||||
|
||||
case IB_WR_RDMA_READ:
|
||||
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
|
||||
@@ -3014,15 +3219,17 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
|
||||
spin_lock_irqsave(&qp->q_lock, flags);
|
||||
|
||||
if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
|
||||
(qp->state != QED_ROCE_QP_STATE_ERR) &&
|
||||
(qp->state != QED_ROCE_QP_STATE_SQD)) {
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
*bad_wr = wr;
|
||||
DP_DEBUG(dev, QEDR_MSG_CQ,
|
||||
"QP in wrong state! QP icid=0x%x state %d\n",
|
||||
qp->icid, qp->state);
|
||||
return -EINVAL;
|
||||
if (rdma_protocol_roce(&dev->ibdev, 1)) {
|
||||
if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
|
||||
(qp->state != QED_ROCE_QP_STATE_ERR) &&
|
||||
(qp->state != QED_ROCE_QP_STATE_SQD)) {
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
*bad_wr = wr;
|
||||
DP_DEBUG(dev, QEDR_MSG_CQ,
|
||||
"QP in wrong state! QP icid=0x%x state %d\n",
|
||||
qp->icid, qp->state);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
while (wr) {
|
||||
@@ -3142,6 +3349,11 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
/* Make sure write sticks */
|
||||
mmiowb();
|
||||
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
||||
writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
|
||||
mmiowb(); /* for second doorbell */
|
||||
}
|
||||
|
||||
wr = wr->next;
|
||||
}
|
||||
|
||||
@@ -3603,23 +3815,3 @@ int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
|
||||
mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
|
||||
return IB_MAD_RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
struct ib_port_immutable *immutable)
|
||||
{
|
||||
struct ib_port_attr attr;
|
||||
int err;
|
||||
|
||||
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
|
||||
RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
|
||||
|
||||
err = ib_query_port(ibdev, port_num, &attr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
immutable->pkey_tbl_len = attr.pkey_tbl_len;
|
||||
immutable->gid_tbl_len = attr.gid_tbl_len;
|
||||
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Viittaa uudesa ongelmassa
Block a user