IB/hfi1: Enable TID RDMA WRITE protocol
This patch enables TID RDMA WRITE protocol by converting a qualified RDMA WRITE request into a TID RDMA WRITE request internally: (1) The TID RDMA cability must be enabled; (2) The request must start on a 4K page boundary; (3) The request length must be a multiple of 4K and must be larger or equal to 256K. Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Kaike Wan <kaike.wan@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
@@ -3322,6 +3322,18 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
|
|||||||
new_opcode = IB_WR_TID_RDMA_READ;
|
new_opcode = IB_WR_TID_RDMA_READ;
|
||||||
do_tid_rdma = true;
|
do_tid_rdma = true;
|
||||||
}
|
}
|
||||||
|
} else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
|
||||||
|
/*
|
||||||
|
* TID RDMA is enabled for this RDMA WRITE request iff:
|
||||||
|
* 1. The remote address is page-aligned,
|
||||||
|
* 2. The length is larger than the minimum segment size,
|
||||||
|
* 3. The length is page-multiple.
|
||||||
|
*/
|
||||||
|
if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) &&
|
||||||
|
!(wqe->length & ~PAGE_MASK)) {
|
||||||
|
new_opcode = IB_WR_TID_RDMA_WRITE;
|
||||||
|
do_tid_rdma = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (do_tid_rdma) {
|
if (do_tid_rdma) {
|
||||||
@@ -3338,12 +3350,22 @@ void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
|
|||||||
priv->tid_req.n_flows = remote->max_read;
|
priv->tid_req.n_flows = remote->max_read;
|
||||||
qpriv->tid_r_reqs++;
|
qpriv->tid_r_reqs++;
|
||||||
wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
|
wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
|
||||||
|
} else {
|
||||||
|
wqe->lpsn += priv->tid_req.total_segs - 1;
|
||||||
|
atomic_inc(&qpriv->n_requests);
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->tid_req.cur_seg = 0;
|
priv->tid_req.cur_seg = 0;
|
||||||
priv->tid_req.comp_seg = 0;
|
priv->tid_req.comp_seg = 0;
|
||||||
priv->tid_req.ack_seg = 0;
|
priv->tid_req.ack_seg = 0;
|
||||||
priv->tid_req.state = TID_REQUEST_INACTIVE;
|
priv->tid_req.state = TID_REQUEST_INACTIVE;
|
||||||
|
/*
|
||||||
|
* Reset acked_tail.
|
||||||
|
* TID RDMA READ does not have ACKs so it does not
|
||||||
|
* update the pointer. We have to reset it so TID RDMA
|
||||||
|
* WRITE does not get confused.
|
||||||
|
*/
|
||||||
|
priv->tid_req.acked_tail = priv->tid_req.setup_head;
|
||||||
trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
|
trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
|
||||||
wqe->psn, wqe->lpsn,
|
wqe->psn, wqe->lpsn,
|
||||||
&priv->tid_req);
|
&priv->tid_req);
|
||||||
|
@@ -266,7 +266,8 @@ static inline void hfi1_setup_tid_rdma_wqe(struct rvt_qp *qp,
|
|||||||
struct rvt_swqe *wqe)
|
struct rvt_swqe *wqe)
|
||||||
{
|
{
|
||||||
if (wqe->priv &&
|
if (wqe->priv &&
|
||||||
wqe->wr.opcode == IB_WR_RDMA_READ &&
|
(wqe->wr.opcode == IB_WR_RDMA_READ ||
|
||||||
|
wqe->wr.opcode == IB_WR_RDMA_WRITE) &&
|
||||||
wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
|
wqe->length >= TID_RDMA_MIN_SEGMENT_SIZE)
|
||||||
setup_tid_rdma_wqe(qp, wqe);
|
setup_tid_rdma_wqe(qp, wqe);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user