erdma_qp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Authors: Cheng Xu <[email protected]> */
  3. /* Kai Shen <[email protected]> */
  4. /* Copyright (c) 2020-2021, Alibaba Group */
  5. /* Authors: Bernard Metzler <[email protected]> */
  6. /* Copyright (c) 2008-2019, IBM Corporation */
  7. #include "erdma_cm.h"
  8. #include "erdma_verbs.h"
  9. void erdma_qp_llp_close(struct erdma_qp *qp)
  10. {
  11. struct erdma_qp_attrs qp_attrs;
  12. down_write(&qp->state_lock);
  13. switch (qp->attrs.state) {
  14. case ERDMA_QP_STATE_RTS:
  15. case ERDMA_QP_STATE_RTR:
  16. case ERDMA_QP_STATE_IDLE:
  17. case ERDMA_QP_STATE_TERMINATE:
  18. qp_attrs.state = ERDMA_QP_STATE_CLOSING;
  19. erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
  20. break;
  21. case ERDMA_QP_STATE_CLOSING:
  22. qp->attrs.state = ERDMA_QP_STATE_IDLE;
  23. break;
  24. default:
  25. break;
  26. }
  27. if (qp->cep) {
  28. erdma_cep_put(qp->cep);
  29. qp->cep = NULL;
  30. }
  31. up_write(&qp->state_lock);
  32. }
  33. struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
  34. {
  35. struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id);
  36. if (qp)
  37. return &qp->ibqp;
  38. return NULL;
  39. }
  40. static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
  41. struct erdma_qp_attrs *attrs,
  42. enum erdma_qp_attr_mask mask)
  43. {
  44. int ret;
  45. struct erdma_dev *dev = qp->dev;
  46. struct erdma_cmdq_modify_qp_req req;
  47. struct tcp_sock *tp;
  48. struct erdma_cep *cep = qp->cep;
  49. struct sockaddr_storage local_addr, remote_addr;
  50. if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
  51. return -EINVAL;
  52. if (!(mask & ERDMA_QP_ATTR_MPA))
  53. return -EINVAL;
  54. ret = getname_local(cep->sock, &local_addr);
  55. if (ret < 0)
  56. return ret;
  57. ret = getname_peer(cep->sock, &remote_addr);
  58. if (ret < 0)
  59. return ret;
  60. qp->attrs.state = ERDMA_QP_STATE_RTS;
  61. tp = tcp_sk(qp->cep->sock->sk);
  62. erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
  63. CMDQ_OPCODE_MODIFY_QP);
  64. req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
  65. FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
  66. FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
  67. req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
  68. req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
  69. req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
  70. req.dport = to_sockaddr_in(remote_addr).sin_port;
  71. req.sport = to_sockaddr_in(local_addr).sin_port;
  72. req.send_nxt = tp->snd_nxt;
  73. /* rsvd tcp seq for mpa-rsp in server. */
  74. if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
  75. req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
  76. req.recv_nxt = tp->rcv_nxt;
  77. return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
  78. }
  79. static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
  80. struct erdma_qp_attrs *attrs,
  81. enum erdma_qp_attr_mask mask)
  82. {
  83. struct erdma_dev *dev = qp->dev;
  84. struct erdma_cmdq_modify_qp_req req;
  85. qp->attrs.state = attrs->state;
  86. erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
  87. CMDQ_OPCODE_MODIFY_QP);
  88. req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
  89. FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
  90. return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
  91. }
  92. int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
  93. enum erdma_qp_attr_mask mask)
  94. {
  95. int drop_conn, ret = 0;
  96. if (!mask)
  97. return 0;
  98. if (!(mask & ERDMA_QP_ATTR_STATE))
  99. return 0;
  100. switch (qp->attrs.state) {
  101. case ERDMA_QP_STATE_IDLE:
  102. case ERDMA_QP_STATE_RTR:
  103. if (attrs->state == ERDMA_QP_STATE_RTS) {
  104. ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
  105. } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
  106. qp->attrs.state = ERDMA_QP_STATE_ERROR;
  107. if (qp->cep) {
  108. erdma_cep_put(qp->cep);
  109. qp->cep = NULL;
  110. }
  111. ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
  112. }
  113. break;
  114. case ERDMA_QP_STATE_RTS:
  115. drop_conn = 0;
  116. if (attrs->state == ERDMA_QP_STATE_CLOSING) {
  117. ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
  118. drop_conn = 1;
  119. } else if (attrs->state == ERDMA_QP_STATE_TERMINATE) {
  120. qp->attrs.state = ERDMA_QP_STATE_TERMINATE;
  121. ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
  122. drop_conn = 1;
  123. } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
  124. ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
  125. qp->attrs.state = ERDMA_QP_STATE_ERROR;
  126. drop_conn = 1;
  127. }
  128. if (drop_conn)
  129. erdma_qp_cm_drop(qp);
  130. break;
  131. case ERDMA_QP_STATE_TERMINATE:
  132. if (attrs->state == ERDMA_QP_STATE_ERROR)
  133. qp->attrs.state = ERDMA_QP_STATE_ERROR;
  134. break;
  135. case ERDMA_QP_STATE_CLOSING:
  136. if (attrs->state == ERDMA_QP_STATE_IDLE) {
  137. qp->attrs.state = ERDMA_QP_STATE_IDLE;
  138. } else if (attrs->state == ERDMA_QP_STATE_ERROR) {
  139. ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
  140. qp->attrs.state = ERDMA_QP_STATE_ERROR;
  141. } else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
  142. return -ECONNABORTED;
  143. }
  144. break;
  145. default:
  146. break;
  147. }
  148. return ret;
  149. }
  150. static void erdma_qp_safe_free(struct kref *ref)
  151. {
  152. struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
  153. complete(&qp->safe_free);
  154. }
  155. void erdma_qp_put(struct erdma_qp *qp)
  156. {
  157. WARN_ON(kref_read(&qp->ref) < 1);
  158. kref_put(&qp->ref, erdma_qp_safe_free);
  159. }
  160. void erdma_qp_get(struct erdma_qp *qp)
  161. {
  162. kref_get(&qp->ref);
  163. }
  164. static int fill_inline_data(struct erdma_qp *qp,
  165. const struct ib_send_wr *send_wr, u16 wqe_idx,
  166. u32 sgl_offset, __le32 *length_field)
  167. {
  168. u32 remain_size, copy_size, data_off, bytes = 0;
  169. char *data;
  170. int i = 0;
  171. wqe_idx += (sgl_offset >> SQEBB_SHIFT);
  172. sgl_offset &= (SQEBB_SIZE - 1);
  173. data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size,
  174. SQEBB_SHIFT);
  175. while (i < send_wr->num_sge) {
  176. bytes += send_wr->sg_list[i].length;
  177. if (bytes > (int)ERDMA_MAX_INLINE)
  178. return -EINVAL;
  179. remain_size = send_wr->sg_list[i].length;
  180. data_off = 0;
  181. while (1) {
  182. copy_size = min(remain_size, SQEBB_SIZE - sgl_offset);
  183. memcpy(data + sgl_offset,
  184. (void *)(uintptr_t)send_wr->sg_list[i].addr +
  185. data_off,
  186. copy_size);
  187. remain_size -= copy_size;
  188. data_off += copy_size;
  189. sgl_offset += copy_size;
  190. wqe_idx += (sgl_offset >> SQEBB_SHIFT);
  191. sgl_offset &= (SQEBB_SIZE - 1);
  192. data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
  193. qp->attrs.sq_size, SQEBB_SHIFT);
  194. if (!remain_size)
  195. break;
  196. }
  197. i++;
  198. }
  199. *length_field = cpu_to_le32(bytes);
  200. return bytes;
  201. }
  202. static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
  203. u16 wqe_idx, u32 sgl_offset, __le32 *length_field)
  204. {
  205. int i = 0;
  206. u32 bytes = 0;
  207. char *sgl;
  208. if (send_wr->num_sge > qp->dev->attrs.max_send_sge)
  209. return -EINVAL;
  210. if (sgl_offset & 0xF)
  211. return -EINVAL;
  212. while (i < send_wr->num_sge) {
  213. wqe_idx += (sgl_offset >> SQEBB_SHIFT);
  214. sgl_offset &= (SQEBB_SIZE - 1);
  215. sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
  216. qp->attrs.sq_size, SQEBB_SHIFT);
  217. bytes += send_wr->sg_list[i].length;
  218. memcpy(sgl + sgl_offset, &send_wr->sg_list[i],
  219. sizeof(struct ib_sge));
  220. sgl_offset += sizeof(struct ib_sge);
  221. i++;
  222. }
  223. *length_field = cpu_to_le32(bytes);
  224. return 0;
  225. }
  226. static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
  227. const struct ib_send_wr *send_wr)
  228. {
  229. u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
  230. u32 idx = *pi & (qp->attrs.sq_size - 1);
  231. enum ib_wr_opcode op = send_wr->opcode;
  232. struct erdma_readreq_sqe *read_sqe;
  233. struct erdma_reg_mr_sqe *regmr_sge;
  234. struct erdma_write_sqe *write_sqe;
  235. struct erdma_send_sqe *send_sqe;
  236. struct ib_rdma_wr *rdma_wr;
  237. struct erdma_mr *mr;
  238. __le32 *length_field;
  239. u64 wqe_hdr, *entry;
  240. struct ib_sge *sge;
  241. u32 attrs;
  242. int ret;
  243. entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
  244. SQEBB_SHIFT);
  245. /* Clear the SQE header section. */
  246. *entry = 0;
  247. qp->kern_qp.swr_tbl[idx] = send_wr->wr_id;
  248. flags = send_wr->send_flags;
  249. wqe_hdr = FIELD_PREP(
  250. ERDMA_SQE_HDR_CE_MASK,
  251. ((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0);
  252. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SE_MASK,
  253. flags & IB_SEND_SOLICITED ? 1 : 0);
  254. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_FENCE_MASK,
  255. flags & IB_SEND_FENCE ? 1 : 0);
  256. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_INLINE_MASK,
  257. flags & IB_SEND_INLINE ? 1 : 0);
  258. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp));
  259. switch (op) {
  260. case IB_WR_RDMA_WRITE:
  261. case IB_WR_RDMA_WRITE_WITH_IMM:
  262. hw_op = ERDMA_OP_WRITE;
  263. if (op == IB_WR_RDMA_WRITE_WITH_IMM)
  264. hw_op = ERDMA_OP_WRITE_WITH_IMM;
  265. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
  266. rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
  267. write_sqe = (struct erdma_write_sqe *)entry;
  268. write_sqe->imm_data = send_wr->ex.imm_data;
  269. write_sqe->sink_stag = cpu_to_le32(rdma_wr->rkey);
  270. write_sqe->sink_to_h =
  271. cpu_to_le32(upper_32_bits(rdma_wr->remote_addr));
  272. write_sqe->sink_to_l =
  273. cpu_to_le32(lower_32_bits(rdma_wr->remote_addr));
  274. length_field = &write_sqe->length;
  275. wqe_size = sizeof(struct erdma_write_sqe);
  276. sgl_offset = wqe_size;
  277. break;
  278. case IB_WR_RDMA_READ:
  279. case IB_WR_RDMA_READ_WITH_INV:
  280. read_sqe = (struct erdma_readreq_sqe *)entry;
  281. if (unlikely(send_wr->num_sge != 1))
  282. return -EINVAL;
  283. hw_op = ERDMA_OP_READ;
  284. if (op == IB_WR_RDMA_READ_WITH_INV) {
  285. hw_op = ERDMA_OP_READ_WITH_INV;
  286. read_sqe->invalid_stag =
  287. cpu_to_le32(send_wr->ex.invalidate_rkey);
  288. }
  289. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
  290. rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
  291. read_sqe->length = cpu_to_le32(send_wr->sg_list[0].length);
  292. read_sqe->sink_stag = cpu_to_le32(send_wr->sg_list[0].lkey);
  293. read_sqe->sink_to_l =
  294. cpu_to_le32(lower_32_bits(send_wr->sg_list[0].addr));
  295. read_sqe->sink_to_h =
  296. cpu_to_le32(upper_32_bits(send_wr->sg_list[0].addr));
  297. sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
  298. qp->attrs.sq_size, SQEBB_SHIFT);
  299. sge->addr = rdma_wr->remote_addr;
  300. sge->lkey = rdma_wr->rkey;
  301. sge->length = send_wr->sg_list[0].length;
  302. wqe_size = sizeof(struct erdma_readreq_sqe) +
  303. send_wr->num_sge * sizeof(struct ib_sge);
  304. goto out;
  305. case IB_WR_SEND:
  306. case IB_WR_SEND_WITH_IMM:
  307. case IB_WR_SEND_WITH_INV:
  308. send_sqe = (struct erdma_send_sqe *)entry;
  309. hw_op = ERDMA_OP_SEND;
  310. if (op == IB_WR_SEND_WITH_IMM) {
  311. hw_op = ERDMA_OP_SEND_WITH_IMM;
  312. send_sqe->imm_data = send_wr->ex.imm_data;
  313. } else if (op == IB_WR_SEND_WITH_INV) {
  314. hw_op = ERDMA_OP_SEND_WITH_INV;
  315. send_sqe->invalid_stag =
  316. cpu_to_le32(send_wr->ex.invalidate_rkey);
  317. }
  318. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
  319. length_field = &send_sqe->length;
  320. wqe_size = sizeof(struct erdma_send_sqe);
  321. sgl_offset = wqe_size;
  322. break;
  323. case IB_WR_REG_MR:
  324. wqe_hdr |=
  325. FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, ERDMA_OP_REG_MR);
  326. regmr_sge = (struct erdma_reg_mr_sqe *)entry;
  327. mr = to_emr(reg_wr(send_wr)->mr);
  328. mr->access = ERDMA_MR_ACC_LR |
  329. to_erdma_access_flags(reg_wr(send_wr)->access);
  330. regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
  331. regmr_sge->length = cpu_to_le32(mr->ibmr.length);
  332. regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
  333. attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
  334. FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
  335. FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
  336. mr->mem.mtt_nents);
  337. if (mr->mem.mtt_nents <= ERDMA_MAX_INLINE_MTT_ENTRIES) {
  338. attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
  339. /* Copy SGLs to SQE content to accelerate */
  340. memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
  341. qp->attrs.sq_size, SQEBB_SHIFT),
  342. mr->mem.mtt_buf, MTT_SIZE(mr->mem.mtt_nents));
  343. wqe_size = sizeof(struct erdma_reg_mr_sqe) +
  344. MTT_SIZE(mr->mem.mtt_nents);
  345. } else {
  346. attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 1);
  347. wqe_size = sizeof(struct erdma_reg_mr_sqe);
  348. }
  349. regmr_sge->attrs = cpu_to_le32(attrs);
  350. goto out;
  351. case IB_WR_LOCAL_INV:
  352. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
  353. ERDMA_OP_LOCAL_INV);
  354. regmr_sge = (struct erdma_reg_mr_sqe *)entry;
  355. regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
  356. wqe_size = sizeof(struct erdma_reg_mr_sqe);
  357. goto out;
  358. default:
  359. return -EOPNOTSUPP;
  360. }
  361. if (flags & IB_SEND_INLINE) {
  362. ret = fill_inline_data(qp, send_wr, idx, sgl_offset,
  363. length_field);
  364. if (ret < 0)
  365. return -EINVAL;
  366. wqe_size += ret;
  367. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK, ret);
  368. } else {
  369. ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field);
  370. if (ret)
  371. return -EINVAL;
  372. wqe_size += send_wr->num_sge * sizeof(struct ib_sge);
  373. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK,
  374. send_wr->num_sge);
  375. }
  376. out:
  377. wqebb_cnt = SQEBB_COUNT(wqe_size);
  378. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_CNT_MASK, wqebb_cnt - 1);
  379. *pi += wqebb_cnt;
  380. wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, *pi);
  381. *entry = wqe_hdr;
  382. return 0;
  383. }
  384. static void kick_sq_db(struct erdma_qp *qp, u16 pi)
  385. {
  386. u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
  387. FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
  388. *(u64 *)qp->kern_qp.sq_db_info = db_data;
  389. writeq(db_data, qp->kern_qp.hw_sq_db);
  390. }
  391. int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
  392. const struct ib_send_wr **bad_send_wr)
  393. {
  394. struct erdma_qp *qp = to_eqp(ibqp);
  395. int ret = 0;
  396. const struct ib_send_wr *wr = send_wr;
  397. unsigned long flags;
  398. u16 sq_pi;
  399. if (!send_wr)
  400. return -EINVAL;
  401. spin_lock_irqsave(&qp->lock, flags);
  402. sq_pi = qp->kern_qp.sq_pi;
  403. while (wr) {
  404. if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) {
  405. ret = -ENOMEM;
  406. *bad_send_wr = send_wr;
  407. break;
  408. }
  409. ret = erdma_push_one_sqe(qp, &sq_pi, wr);
  410. if (ret) {
  411. *bad_send_wr = wr;
  412. break;
  413. }
  414. qp->kern_qp.sq_pi = sq_pi;
  415. kick_sq_db(qp, sq_pi);
  416. wr = wr->next;
  417. }
  418. spin_unlock_irqrestore(&qp->lock, flags);
  419. return ret;
  420. }
  421. static int erdma_post_recv_one(struct erdma_qp *qp,
  422. const struct ib_recv_wr *recv_wr)
  423. {
  424. struct erdma_rqe *rqe =
  425. get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi,
  426. qp->attrs.rq_size, RQE_SHIFT);
  427. rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1);
  428. rqe->qpn = cpu_to_le32(QP_ID(qp));
  429. if (recv_wr->num_sge == 0) {
  430. rqe->length = 0;
  431. } else if (recv_wr->num_sge == 1) {
  432. rqe->stag = cpu_to_le32(recv_wr->sg_list[0].lkey);
  433. rqe->to = cpu_to_le64(recv_wr->sg_list[0].addr);
  434. rqe->length = cpu_to_le32(recv_wr->sg_list[0].length);
  435. } else {
  436. return -EINVAL;
  437. }
  438. *(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe;
  439. writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
  440. qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
  441. recv_wr->wr_id;
  442. qp->kern_qp.rq_pi++;
  443. return 0;
  444. }
  445. int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
  446. const struct ib_recv_wr **bad_recv_wr)
  447. {
  448. const struct ib_recv_wr *wr = recv_wr;
  449. struct erdma_qp *qp = to_eqp(ibqp);
  450. unsigned long flags;
  451. int ret;
  452. spin_lock_irqsave(&qp->lock, flags);
  453. while (wr) {
  454. ret = erdma_post_recv_one(qp, wr);
  455. if (ret) {
  456. *bad_recv_wr = wr;
  457. break;
  458. }
  459. wr = wr->next;
  460. }
  461. spin_unlock_irqrestore(&qp->lock, flags);
  462. return ret;
  463. }