erdma_cq.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Authors: Cheng Xu <[email protected]> */
  3. /* Kai Shen <[email protected]> */
  4. /* Copyright (c) 2020-2022, Alibaba Group. */
  5. #include "erdma_verbs.h"
  6. static void *get_next_valid_cqe(struct erdma_cq *cq)
  7. {
  8. __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
  9. cq->depth, CQE_SHIFT);
  10. u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
  11. __be32_to_cpu(READ_ONCE(*cqe)));
  12. return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
  13. }
  14. static void notify_cq(struct erdma_cq *cq, u8 solcitied)
  15. {
  16. u64 db_data =
  17. FIELD_PREP(ERDMA_CQDB_IDX_MASK, (cq->kern_cq.notify_cnt)) |
  18. FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) |
  19. FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
  20. FIELD_PREP(ERDMA_CQDB_SOL_MASK, solcitied) |
  21. FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
  22. FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
  23. *cq->kern_cq.db_record = db_data;
  24. writeq(db_data, cq->kern_cq.db);
  25. }
  26. int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  27. {
  28. struct erdma_cq *cq = to_ecq(ibcq);
  29. unsigned long irq_flags;
  30. int ret = 0;
  31. spin_lock_irqsave(&cq->kern_cq.lock, irq_flags);
  32. notify_cq(cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
  33. if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && get_next_valid_cqe(cq))
  34. ret = 1;
  35. cq->kern_cq.notify_cnt++;
  36. spin_unlock_irqrestore(&cq->kern_cq.lock, irq_flags);
  37. return ret;
  38. }
  39. static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
  40. [ERDMA_OP_WRITE] = IB_WC_RDMA_WRITE,
  41. [ERDMA_OP_READ] = IB_WC_RDMA_READ,
  42. [ERDMA_OP_SEND] = IB_WC_SEND,
  43. [ERDMA_OP_SEND_WITH_IMM] = IB_WC_SEND,
  44. [ERDMA_OP_RECEIVE] = IB_WC_RECV,
  45. [ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
  46. [ERDMA_OP_RECV_INV] = IB_WC_RECV,
  47. [ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
  48. [ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
  49. [ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
  50. [ERDMA_OP_REG_MR] = IB_WC_REG_MR,
  51. [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
  52. [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
  53. };
  54. static const struct {
  55. enum erdma_wc_status erdma;
  56. enum ib_wc_status base;
  57. enum erdma_vendor_err vendor;
  58. } map_cqe_status[ERDMA_NUM_WC_STATUS] = {
  59. { ERDMA_WC_SUCCESS, IB_WC_SUCCESS, ERDMA_WC_VENDOR_NO_ERR },
  60. { ERDMA_WC_GENERAL_ERR, IB_WC_GENERAL_ERR, ERDMA_WC_VENDOR_NO_ERR },
  61. { ERDMA_WC_RECV_WQE_FORMAT_ERR, IB_WC_GENERAL_ERR,
  62. ERDMA_WC_VENDOR_INVALID_RQE },
  63. { ERDMA_WC_RECV_STAG_INVALID_ERR, IB_WC_REM_ACCESS_ERR,
  64. ERDMA_WC_VENDOR_RQE_INVALID_STAG },
  65. { ERDMA_WC_RECV_ADDR_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
  66. ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION },
  67. { ERDMA_WC_RECV_RIGHT_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
  68. ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR },
  69. { ERDMA_WC_RECV_PDID_ERR, IB_WC_REM_ACCESS_ERR,
  70. ERDMA_WC_VENDOR_RQE_INVALID_PD },
  71. { ERDMA_WC_RECV_WARRPING_ERR, IB_WC_REM_ACCESS_ERR,
  72. ERDMA_WC_VENDOR_RQE_WRAP_ERR },
  73. { ERDMA_WC_SEND_WQE_FORMAT_ERR, IB_WC_LOC_QP_OP_ERR,
  74. ERDMA_WC_VENDOR_INVALID_SQE },
  75. { ERDMA_WC_SEND_WQE_ORD_EXCEED, IB_WC_GENERAL_ERR,
  76. ERDMA_WC_VENDOR_ZERO_ORD },
  77. { ERDMA_WC_SEND_STAG_INVALID_ERR, IB_WC_LOC_ACCESS_ERR,
  78. ERDMA_WC_VENDOR_SQE_INVALID_STAG },
  79. { ERDMA_WC_SEND_ADDR_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
  80. ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION },
  81. { ERDMA_WC_SEND_RIGHT_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
  82. ERDMA_WC_VENDOR_SQE_ACCESS_ERR },
  83. { ERDMA_WC_SEND_PDID_ERR, IB_WC_LOC_ACCESS_ERR,
  84. ERDMA_WC_VENDOR_SQE_INVALID_PD },
  85. { ERDMA_WC_SEND_WARRPING_ERR, IB_WC_LOC_ACCESS_ERR,
  86. ERDMA_WC_VENDOR_SQE_WARP_ERR },
  87. { ERDMA_WC_FLUSH_ERR, IB_WC_WR_FLUSH_ERR, ERDMA_WC_VENDOR_NO_ERR },
  88. { ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
  89. };
  90. #define ERDMA_POLLCQ_NO_QP 1
  91. static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
  92. {
  93. struct erdma_dev *dev = to_edev(cq->ibcq.device);
  94. u8 opcode, syndrome, qtype;
  95. struct erdma_kqp *kern_qp;
  96. struct erdma_cqe *cqe;
  97. struct erdma_qp *qp;
  98. u16 wqe_idx, depth;
  99. u32 qpn, cqe_hdr;
  100. u64 *id_table;
  101. u64 *wqe_hdr;
  102. cqe = get_next_valid_cqe(cq);
  103. if (!cqe)
  104. return -EAGAIN;
  105. cq->kern_cq.ci++;
  106. /* cqbuf should be ready when we poll */
  107. dma_rmb();
  108. qpn = be32_to_cpu(cqe->qpn);
  109. wqe_idx = be32_to_cpu(cqe->qe_idx);
  110. cqe_hdr = be32_to_cpu(cqe->hdr);
  111. qp = find_qp_by_qpn(dev, qpn);
  112. if (!qp)
  113. return ERDMA_POLLCQ_NO_QP;
  114. kern_qp = &qp->kern_qp;
  115. qtype = FIELD_GET(ERDMA_CQE_HDR_QTYPE_MASK, cqe_hdr);
  116. syndrome = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, cqe_hdr);
  117. opcode = FIELD_GET(ERDMA_CQE_HDR_OPCODE_MASK, cqe_hdr);
  118. if (qtype == ERDMA_CQE_QTYPE_SQ) {
  119. id_table = kern_qp->swr_tbl;
  120. depth = qp->attrs.sq_size;
  121. wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
  122. qp->attrs.sq_size, SQEBB_SHIFT);
  123. kern_qp->sq_ci =
  124. FIELD_GET(ERDMA_SQE_HDR_WQEBB_CNT_MASK, *wqe_hdr) +
  125. wqe_idx + 1;
  126. } else {
  127. id_table = kern_qp->rwr_tbl;
  128. depth = qp->attrs.rq_size;
  129. }
  130. wc->wr_id = id_table[wqe_idx & (depth - 1)];
  131. wc->byte_len = be32_to_cpu(cqe->size);
  132. wc->wc_flags = 0;
  133. wc->opcode = wc_mapping_table[opcode];
  134. if (opcode == ERDMA_OP_RECV_IMM || opcode == ERDMA_OP_RSP_SEND_IMM) {
  135. wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->imm_data));
  136. wc->wc_flags |= IB_WC_WITH_IMM;
  137. } else if (opcode == ERDMA_OP_RECV_INV) {
  138. wc->ex.invalidate_rkey = be32_to_cpu(cqe->inv_rkey);
  139. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  140. }
  141. if (syndrome >= ERDMA_NUM_WC_STATUS)
  142. syndrome = ERDMA_WC_GENERAL_ERR;
  143. wc->status = map_cqe_status[syndrome].base;
  144. wc->vendor_err = map_cqe_status[syndrome].vendor;
  145. wc->qp = &qp->ibqp;
  146. return 0;
  147. }
  148. int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  149. {
  150. struct erdma_cq *cq = to_ecq(ibcq);
  151. unsigned long flags;
  152. int npolled, ret;
  153. spin_lock_irqsave(&cq->kern_cq.lock, flags);
  154. for (npolled = 0; npolled < num_entries;) {
  155. ret = erdma_poll_one_cqe(cq, wc + npolled);
  156. if (ret == -EAGAIN) /* no received new CQEs. */
  157. break;
  158. else if (ret) /* ignore invalid CQEs. */
  159. continue;
  160. npolled++;
  161. }
  162. spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
  163. return npolled;
  164. }