wr.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
  2. /*
  3. * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
  4. */
  5. #ifndef _MLX5_IB_WR_H
  6. #define _MLX5_IB_WR_H
  7. #include "mlx5_ib.h"
  8. enum {
  9. MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
  10. };
  11. struct mlx5_wqe_eth_pad {
  12. u8 rsvd0[16];
  13. };
  14. /* get_sq_edge - Get the next nearby edge.
  15. *
  16. * An 'edge' is defined as the first following address after the end
  17. * of the fragment or the SQ. Accordingly, during the WQE construction
  18. * which repetitively increases the pointer to write the next data, it
  19. * simply should check if it gets to an edge.
  20. *
  21. * @sq - SQ buffer.
  22. * @idx - Stride index in the SQ buffer.
  23. *
  24. * Return:
  25. * The new edge.
  26. */
  27. static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
  28. {
  29. void *fragment_end;
  30. fragment_end = mlx5_frag_buf_get_wqe
  31. (&sq->fbc,
  32. mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
  33. return fragment_end + MLX5_SEND_WQE_BB;
  34. }
  35. /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
  36. * next nearby edge and get new address translation for current WQE position.
  37. * @sq: SQ buffer.
  38. * @seg: Current WQE position (16B aligned).
  39. * @wqe_sz: Total current WQE size [16B].
  40. * @cur_edge: Updated current edge.
  41. */
  42. static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
  43. u32 wqe_sz, void **cur_edge)
  44. {
  45. u32 idx;
  46. if (likely(*seg != *cur_edge))
  47. return;
  48. idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
  49. *cur_edge = get_sq_edge(sq, idx);
  50. *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
  51. }
  52. /* mlx5r_memcpy_send_wqe - copy data from src to WQE and update the relevant
  53. * WQ's pointers. At the end @seg is aligned to 16B regardless the copied size.
  54. * @sq: SQ buffer.
  55. * @cur_edge: Updated current edge.
  56. * @seg: Current WQE position (16B aligned).
  57. * @wqe_sz: Total current WQE size [16B].
  58. * @src: Pointer to copy from.
  59. * @n: Number of bytes to copy.
  60. */
  61. static inline void mlx5r_memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
  62. void **seg, u32 *wqe_sz,
  63. const void *src, size_t n)
  64. {
  65. while (likely(n)) {
  66. size_t leftlen = *cur_edge - *seg;
  67. size_t copysz = min_t(size_t, leftlen, n);
  68. size_t stride;
  69. memcpy(*seg, src, copysz);
  70. n -= copysz;
  71. src += copysz;
  72. stride = !n ? ALIGN(copysz, 16) : copysz;
  73. *seg += stride;
  74. *wqe_sz += stride >> 4;
  75. handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
  76. }
  77. }
  78. int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq);
  79. int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg,
  80. struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx,
  81. int *size, void **cur_edge, int nreq, __be32 general_id,
  82. bool send_signaled, bool solicited);
  83. void mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl,
  84. void *seg, u8 size, void *cur_edge, unsigned int idx,
  85. u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode);
  86. void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
  87. struct mlx5_wqe_ctrl_seg *ctrl);
  88. int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
  89. const struct ib_send_wr **bad_wr, bool drain);
  90. int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
  91. const struct ib_recv_wr **bad_wr, bool drain);
  92. static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
  93. const struct ib_send_wr *wr,
  94. const struct ib_send_wr **bad_wr)
  95. {
  96. return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
  97. }
  98. static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
  99. const struct ib_send_wr *wr,
  100. const struct ib_send_wr **bad_wr)
  101. {
  102. return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
  103. }
  104. static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
  105. const struct ib_recv_wr *wr,
  106. const struct ib_recv_wr **bad_wr)
  107. {
  108. return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
  109. }
  110. static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
  111. const struct ib_recv_wr *wr,
  112. const struct ib_recv_wr **bad_wr)
  113. {
  114. return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
  115. }
  116. #endif /* _MLX5_IB_WR_H */