IB/{hfi1, qib, rdmavt}: Move copy SGE logic into rdmavt
This patch moves hfi1_copy_sge() into rdmavt for sharing with qib. This patch also moves all the wss_*() functions into rdmavt as several wss_*() functions are called from hfi1_copy_sge() When SGE copy mode is adaptive, cacheless copy may be done in some cases for performance reasons. In those cases, X86 cacheless copy function is called since the drivers that use rdmavt and may set SGE copy mode to adaptive are X86 only. For this reason, this patch adds "depends on X86_64" to rdmavt/Kconfig. Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com> Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Brian Welty <brian.welty@intel.com> Signed-off-by: Harish Chegondi <harish.chegondi@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -1425,7 +1425,8 @@ read_middle:
|
||||
qp->s_rdma_read_len -= pmtu;
|
||||
update_last_psn(qp, psn);
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
|
||||
rvt_copy_sge(qp, &qp->s_rdma_read_sge,
|
||||
data, pmtu, false, false);
|
||||
goto bail;
|
||||
|
||||
case OP(RDMA_READ_RESPONSE_ONLY):
|
||||
@@ -1471,7 +1472,8 @@ read_last:
|
||||
if (unlikely(tlen != qp->s_rdma_read_len))
|
||||
goto ack_len_err;
|
||||
aeth = be32_to_cpu(ohdr->u.aeth);
|
||||
qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
|
||||
rvt_copy_sge(qp, &qp->s_rdma_read_sge,
|
||||
data, tlen, false, false);
|
||||
WARN_ON(qp->s_rdma_read_sge.num_sge);
|
||||
(void) do_rc_ack(qp, aeth, psn,
|
||||
OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
|
||||
@@ -1844,7 +1846,7 @@ send_middle:
|
||||
qp->r_rcv_len += pmtu;
|
||||
if (unlikely(qp->r_rcv_len > qp->r_len))
|
||||
goto nack_inv;
|
||||
qib_copy_sge(&qp->r_sge, data, pmtu, 1);
|
||||
rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
|
||||
break;
|
||||
|
||||
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
|
||||
@@ -1890,7 +1892,7 @@ send_last:
|
||||
wc.byte_len = tlen + qp->r_rcv_len;
|
||||
if (unlikely(wc.byte_len > qp->r_len))
|
||||
goto nack_inv;
|
||||
qib_copy_sge(&qp->r_sge, data, tlen, 1);
|
||||
rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
|
||||
rvt_put_ss(&qp->r_sge);
|
||||
qp->r_msn++;
|
||||
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
|
||||
|
@@ -354,7 +354,7 @@ again:
|
||||
if (len > sge->sge_length)
|
||||
len = sge->sge_length;
|
||||
BUG_ON(len == 0);
|
||||
qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
|
||||
rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, release, false);
|
||||
sge->vaddr += len;
|
||||
sge->length -= len;
|
||||
sge->sge_length -= len;
|
||||
|
@@ -359,7 +359,7 @@ send_first:
|
||||
qp->r_rcv_len += pmtu;
|
||||
if (unlikely(qp->r_rcv_len > qp->r_len))
|
||||
goto rewind;
|
||||
qib_copy_sge(&qp->r_sge, data, pmtu, 0);
|
||||
rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
|
||||
break;
|
||||
|
||||
case OP(SEND_LAST_WITH_IMMEDIATE):
|
||||
@@ -385,7 +385,7 @@ send_last:
|
||||
if (unlikely(wc.byte_len > qp->r_len))
|
||||
goto rewind;
|
||||
wc.opcode = IB_WC_RECV;
|
||||
qib_copy_sge(&qp->r_sge, data, tlen, 0);
|
||||
rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
|
||||
rvt_put_ss(&qp->s_rdma_read_sge);
|
||||
last_imm:
|
||||
wc.wr_id = qp->r_wr_id;
|
||||
@@ -449,7 +449,7 @@ rdma_first:
|
||||
qp->r_rcv_len += pmtu;
|
||||
if (unlikely(qp->r_rcv_len > qp->r_len))
|
||||
goto drop;
|
||||
qib_copy_sge(&qp->r_sge, data, pmtu, 1);
|
||||
rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
|
||||
break;
|
||||
|
||||
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
|
||||
@@ -479,7 +479,7 @@ rdma_last_imm:
|
||||
}
|
||||
wc.byte_len = qp->r_len;
|
||||
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
|
||||
qib_copy_sge(&qp->r_sge, data, tlen, 1);
|
||||
rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
|
||||
rvt_put_ss(&qp->r_sge);
|
||||
goto last_imm;
|
||||
|
||||
@@ -495,7 +495,7 @@ rdma_last:
|
||||
tlen -= (hdrsize + pad + 4);
|
||||
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
|
||||
goto drop;
|
||||
qib_copy_sge(&qp->r_sge, data, tlen, 1);
|
||||
rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
|
||||
rvt_put_ss(&qp->r_sge);
|
||||
break;
|
||||
|
||||
|
@@ -162,8 +162,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
||||
const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
|
||||
|
||||
qib_make_grh(ibp, &grh, grd, 0, 0);
|
||||
qib_copy_sge(&qp->r_sge, &grh,
|
||||
sizeof(grh), 1);
|
||||
rvt_copy_sge(qp, &qp->r_sge, &grh,
|
||||
sizeof(grh), true, false);
|
||||
wc.wc_flags |= IB_WC_GRH;
|
||||
} else
|
||||
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
|
||||
@@ -179,7 +179,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
||||
if (len > sge->sge_length)
|
||||
len = sge->sge_length;
|
||||
BUG_ON(len == 0);
|
||||
qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
|
||||
rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
|
||||
sge->vaddr += len;
|
||||
sge->length -= len;
|
||||
sge->sge_length -= len;
|
||||
@@ -551,12 +551,13 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
|
||||
goto drop;
|
||||
}
|
||||
if (has_grh) {
|
||||
qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
|
||||
sizeof(struct ib_grh), 1);
|
||||
rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
|
||||
sizeof(struct ib_grh), true, false);
|
||||
wc.wc_flags |= IB_WC_GRH;
|
||||
} else
|
||||
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
|
||||
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
|
||||
rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
|
||||
true, false);
|
||||
rvt_put_ss(&qp->r_sge);
|
||||
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
|
||||
return;
|
||||
|
@@ -131,27 +131,6 @@ const enum ib_wc_opcode ib_qib_wc_opcode[] = {
|
||||
*/
|
||||
__be64 ib_qib_sys_image_guid;
|
||||
|
||||
/**
|
||||
* qib_copy_sge - copy data to SGE memory
|
||||
* @ss: the SGE state
|
||||
* @data: the data to copy
|
||||
* @length: the length of the data
|
||||
*/
|
||||
void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
|
||||
{
|
||||
struct rvt_sge *sge = &ss->sge;
|
||||
|
||||
while (length) {
|
||||
u32 len = rvt_get_sge_length(sge, length);
|
||||
|
||||
WARN_ON_ONCE(len == 0);
|
||||
memcpy(sge->vaddr, data, len);
|
||||
rvt_update_sge(ss, len, release);
|
||||
data += len;
|
||||
length -= len;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Count the number of DMA descriptors needed to send length bytes of data.
|
||||
* Don't modify the qib_sge_state to get the count.
|
||||
@@ -1631,6 +1610,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
||||
dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
|
||||
dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
|
||||
dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY;
|
||||
|
||||
qib_fill_device_attr(dd);
|
||||
|
||||
|
@@ -292,9 +292,6 @@ void qib_put_txreq(struct qib_verbs_txreq *tx);
|
||||
int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
|
||||
u32 hdrwords, struct rvt_sge_state *ss, u32 len);
|
||||
|
||||
void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
|
||||
int release);
|
||||
|
||||
void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
|
||||
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
|
||||
|
||||
|
Reference in New Issue
Block a user