rc.h 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
  2. /*
  3. * Copyright(c) 2018 Intel Corporation.
  4. *
  5. */
  6. #ifndef HFI1_RC_H
  7. #define HFI1_RC_H
  8. /* cut down ridiculously long IB macro names */
  9. #define OP(x) IB_OPCODE_RC_##x
  10. static inline void update_ack_queue(struct rvt_qp *qp, unsigned int n)
  11. {
  12. unsigned int next;
  13. next = n + 1;
  14. if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
  15. next = 0;
  16. qp->s_tail_ack_queue = next;
  17. qp->s_acked_ack_queue = next;
  18. qp->s_ack_state = OP(ACKNOWLEDGE);
  19. }
  20. static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
  21. struct rvt_qp *qp)
  22. {
  23. if (list_empty(&qp->rspwait)) {
  24. qp->r_flags |= RVT_R_RSP_NAK;
  25. rvt_get_qp(qp);
  26. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  27. }
  28. }
  29. static inline u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
  30. u32 psn, u32 pmtu)
  31. {
  32. u32 len;
  33. len = delta_psn(psn, wqe->psn) * pmtu;
  34. return rvt_restart_sge(ss, wqe, len);
  35. }
  36. static inline void release_rdma_sge_mr(struct rvt_ack_entry *e)
  37. {
  38. if (e->rdma_sge.mr) {
  39. rvt_put_mr(e->rdma_sge.mr);
  40. e->rdma_sge.mr = NULL;
  41. }
  42. }
  43. struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
  44. u8 *prev_ack, bool *scheduled);
  45. int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val,
  46. struct hfi1_ctxtdata *rcd);
  47. struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe,
  48. struct hfi1_ibport *ibp);
  49. #endif /* HFI1_RC_H */