RDMA/cxgb4: Fix QP flush logic
This patch makes following fixes in QP flush logic: - correctly flushes unsignaled WRs followed by a signaled WR - supports for flushing a CQ bound to multiple QPs - resets cidx_flush if a active queue starts getting HW CQEs again - marks WQ in error when we leave RTS. This was only being done for user queues, but we need it for kernel queues too so that post_send/post_recv will start returning the appropriate error synchronously - eats unsignaled read resp CQEs. HW always inserts CQEs so we must silently discard them if the read work request was unsignaled. - handles QP flushes with pending SW CQEs. The flush and out of order completion logic has a bug where if out of order completions are flushed but not yet polled by the consumer and the qp is then flushed then we end up inserting duplicate completions. - c4iw_flush_sq() should only flush wrs that have not already been flushed. Since we already track where in the SQ we've flushed via sq.cidx_flush, just start at that point and flush any remaining. This bug only caused a problem in the presence of unsignaled work requests. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Vipul Pandya <vipul@chelsio.com> [ Fixed sparse warning due to htonl/ntohl confusion. - Roland ] Signed-off-by: Roland Dreier <roland@purestorage.com>
このコミットが含まれているのは:
@@ -737,6 +737,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
swsqe->idx = qhp->wq.sq.pidx;
|
||||
swsqe->complete = 0;
|
||||
swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
|
||||
swsqe->flushed = 0;
|
||||
swsqe->wr_id = wr->wr_id;
|
||||
|
||||
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
|
||||
@@ -1006,7 +1007,15 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
|
||||
/* locking hierarchy: cq lock first, then qp lock. */
|
||||
spin_lock_irqsave(&rchp->lock, flag);
|
||||
spin_lock(&qhp->lock);
|
||||
c4iw_flush_hw_cq(&rchp->cq);
|
||||
|
||||
if (qhp->wq.flushed) {
|
||||
spin_unlock(&qhp->lock);
|
||||
spin_unlock_irqrestore(&rchp->lock, flag);
|
||||
return;
|
||||
}
|
||||
qhp->wq.flushed = 1;
|
||||
|
||||
c4iw_flush_hw_cq(rchp);
|
||||
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
|
||||
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
|
||||
spin_unlock(&qhp->lock);
|
||||
@@ -1020,9 +1029,9 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
|
||||
/* locking hierarchy: cq lock first, then qp lock. */
|
||||
spin_lock_irqsave(&schp->lock, flag);
|
||||
spin_lock(&qhp->lock);
|
||||
c4iw_flush_hw_cq(&schp->cq);
|
||||
c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
|
||||
flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
|
||||
if (schp != rchp)
|
||||
c4iw_flush_hw_cq(schp);
|
||||
flushed = c4iw_flush_sq(qhp);
|
||||
spin_unlock(&qhp->lock);
|
||||
spin_unlock_irqrestore(&schp->lock, flag);
|
||||
if (flushed) {
|
||||
@@ -1037,11 +1046,11 @@ static void flush_qp(struct c4iw_qp *qhp)
|
||||
struct c4iw_cq *rchp, *schp;
|
||||
unsigned long flag;
|
||||
|
||||
rchp = get_chp(qhp->rhp, qhp->attr.rcq);
|
||||
schp = get_chp(qhp->rhp, qhp->attr.scq);
|
||||
rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
|
||||
schp = to_c4iw_cq(qhp->ibqp.send_cq);
|
||||
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
if (qhp->ibqp.uobject) {
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
t4_set_cq_in_error(&rchp->cq);
|
||||
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
||||
@@ -1330,8 +1339,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
disconnect = 1;
|
||||
c4iw_get_ep(&qhp->ep->com);
|
||||
}
|
||||
if (qhp->ibqp.uobject)
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
ret = rdma_fini(rhp, qhp, ep);
|
||||
if (ret)
|
||||
goto err;
|
||||
@@ -1340,8 +1348,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
set_state(qhp, C4IW_QP_STATE_TERMINATE);
|
||||
qhp->attr.layer_etype = attrs->layer_etype;
|
||||
qhp->attr.ecode = attrs->ecode;
|
||||
if (qhp->ibqp.uobject)
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
ep = qhp->ep;
|
||||
if (!internal)
|
||||
terminate = 1;
|
||||
@@ -1350,8 +1357,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
break;
|
||||
case C4IW_QP_STATE_ERROR:
|
||||
set_state(qhp, C4IW_QP_STATE_ERROR);
|
||||
if (qhp->ibqp.uobject)
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
if (!internal) {
|
||||
abort = 1;
|
||||
disconnect = 1;
|
||||
@@ -1552,12 +1558,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
|
||||
ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
|
||||
|
||||
|
||||
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
|
||||
if (!qhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
qhp->wq.sq.size = sqsize;
|
||||
qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
|
||||
qhp->wq.sq.flush_cidx = -1;
|
||||
qhp->wq.rq.size = rqsize;
|
||||
qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
|
||||
|
||||
|
新しいイシューから参照
ユーザーをブロックする