erdma_verbs.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
  2. /* Authors: Cheng Xu <[email protected]> */
  3. /* Kai Shen <[email protected]> */
  4. /* Copyright (c) 2020-2022, Alibaba Group. */
  5. #ifndef __ERDMA_VERBS_H__
  6. #define __ERDMA_VERBS_H__
  7. #include "erdma.h"
  8. /* RDMA Capability. */
  9. #define ERDMA_MAX_PD (128 * 1024)
  10. #define ERDMA_MAX_SEND_WR 8192
  11. #define ERDMA_MAX_ORD 128
  12. #define ERDMA_MAX_IRD 128
  13. #define ERDMA_MAX_SGE_RD 1
  14. #define ERDMA_MAX_CONTEXT (128 * 1024)
  15. #define ERDMA_MAX_SEND_SGE 6
  16. #define ERDMA_MAX_RECV_SGE 1
  17. #define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE))
  18. #define ERDMA_MAX_FRMR_PA 512
  19. enum {
  20. ERDMA_MMAP_IO_NC = 0, /* no cache */
  21. };
  22. struct erdma_user_mmap_entry {
  23. struct rdma_user_mmap_entry rdma_entry;
  24. u64 address;
  25. u8 mmap_flag;
  26. };
  27. struct erdma_ucontext {
  28. struct ib_ucontext ibucontext;
  29. u32 sdb_type;
  30. u32 sdb_idx;
  31. u32 sdb_page_idx;
  32. u32 sdb_page_off;
  33. u64 sdb;
  34. u64 rdb;
  35. u64 cdb;
  36. struct rdma_user_mmap_entry *sq_db_mmap_entry;
  37. struct rdma_user_mmap_entry *rq_db_mmap_entry;
  38. struct rdma_user_mmap_entry *cq_db_mmap_entry;
  39. /* doorbell records */
  40. struct list_head dbrecords_page_list;
  41. struct mutex dbrecords_page_mutex;
  42. };
  43. struct erdma_pd {
  44. struct ib_pd ibpd;
  45. u32 pdn;
  46. };
  47. /*
  48. * MemoryRegion definition.
  49. */
  50. #define ERDMA_MAX_INLINE_MTT_ENTRIES 4
  51. #define MTT_SIZE(mtt_cnt) (mtt_cnt << 3) /* per mtt takes 8 Bytes. */
  52. #define ERDMA_MR_MAX_MTT_CNT 524288
  53. #define ERDMA_MTT_ENTRY_SIZE 8
  54. #define ERDMA_MR_TYPE_NORMAL 0
  55. #define ERDMA_MR_TYPE_FRMR 1
  56. #define ERDMA_MR_TYPE_DMA 2
  57. #define ERDMA_MR_INLINE_MTT 0
  58. #define ERDMA_MR_INDIRECT_MTT 1
  59. #define ERDMA_MR_ACC_LR BIT(0)
  60. #define ERDMA_MR_ACC_LW BIT(1)
  61. #define ERDMA_MR_ACC_RR BIT(2)
  62. #define ERDMA_MR_ACC_RW BIT(3)
  63. static inline u8 to_erdma_access_flags(int access)
  64. {
  65. return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) |
  66. (access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) |
  67. (access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0);
  68. }
  69. struct erdma_mem {
  70. struct ib_umem *umem;
  71. void *mtt_buf;
  72. u32 mtt_type;
  73. u32 page_size;
  74. u32 page_offset;
  75. u32 page_cnt;
  76. u32 mtt_nents;
  77. u64 va;
  78. u64 len;
  79. u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
  80. };
  81. struct erdma_mr {
  82. struct ib_mr ibmr;
  83. struct erdma_mem mem;
  84. u8 type;
  85. u8 access;
  86. u8 valid;
  87. };
  88. struct erdma_user_dbrecords_page {
  89. struct list_head list;
  90. struct ib_umem *umem;
  91. u64 va;
  92. int refcnt;
  93. };
  94. struct erdma_uqp {
  95. struct erdma_mem sq_mtt;
  96. struct erdma_mem rq_mtt;
  97. dma_addr_t sq_db_info_dma_addr;
  98. dma_addr_t rq_db_info_dma_addr;
  99. struct erdma_user_dbrecords_page *user_dbr_page;
  100. u32 rq_offset;
  101. };
  102. struct erdma_kqp {
  103. u16 sq_pi;
  104. u16 sq_ci;
  105. u16 rq_pi;
  106. u16 rq_ci;
  107. u64 *swr_tbl;
  108. u64 *rwr_tbl;
  109. void __iomem *hw_sq_db;
  110. void __iomem *hw_rq_db;
  111. void *sq_buf;
  112. dma_addr_t sq_buf_dma_addr;
  113. void *rq_buf;
  114. dma_addr_t rq_buf_dma_addr;
  115. void *sq_db_info;
  116. void *rq_db_info;
  117. u8 sig_all;
  118. };
  119. enum erdma_qp_state {
  120. ERDMA_QP_STATE_IDLE = 0,
  121. ERDMA_QP_STATE_RTR = 1,
  122. ERDMA_QP_STATE_RTS = 2,
  123. ERDMA_QP_STATE_CLOSING = 3,
  124. ERDMA_QP_STATE_TERMINATE = 4,
  125. ERDMA_QP_STATE_ERROR = 5,
  126. ERDMA_QP_STATE_UNDEF = 7,
  127. ERDMA_QP_STATE_COUNT = 8
  128. };
  129. enum erdma_qp_attr_mask {
  130. ERDMA_QP_ATTR_STATE = (1 << 0),
  131. ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
  132. ERDMA_QP_ATTR_ORD = (1 << 3),
  133. ERDMA_QP_ATTR_IRD = (1 << 4),
  134. ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
  135. ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
  136. ERDMA_QP_ATTR_MPA = (1 << 7)
  137. };
  138. struct erdma_qp_attrs {
  139. enum erdma_qp_state state;
  140. enum erdma_cc_alg cc; /* Congestion control algorithm */
  141. u32 sq_size;
  142. u32 rq_size;
  143. u32 orq_size;
  144. u32 irq_size;
  145. u32 max_send_sge;
  146. u32 max_recv_sge;
  147. u32 cookie;
  148. #define ERDMA_QP_ACTIVE 0
  149. #define ERDMA_QP_PASSIVE 1
  150. u8 qp_type;
  151. u8 pd_len;
  152. };
  153. struct erdma_qp {
  154. struct ib_qp ibqp;
  155. struct kref ref;
  156. struct completion safe_free;
  157. struct erdma_dev *dev;
  158. struct erdma_cep *cep;
  159. struct rw_semaphore state_lock;
  160. union {
  161. struct erdma_kqp kern_qp;
  162. struct erdma_uqp user_qp;
  163. };
  164. struct erdma_cq *scq;
  165. struct erdma_cq *rcq;
  166. struct erdma_qp_attrs attrs;
  167. spinlock_t lock;
  168. };
  169. struct erdma_kcq_info {
  170. void *qbuf;
  171. dma_addr_t qbuf_dma_addr;
  172. u32 ci;
  173. u32 cmdsn;
  174. u32 notify_cnt;
  175. spinlock_t lock;
  176. u8 __iomem *db;
  177. u64 *db_record;
  178. };
  179. struct erdma_ucq_info {
  180. struct erdma_mem qbuf_mtt;
  181. struct erdma_user_dbrecords_page *user_dbr_page;
  182. dma_addr_t db_info_dma_addr;
  183. };
  184. struct erdma_cq {
  185. struct ib_cq ibcq;
  186. u32 cqn;
  187. u32 depth;
  188. u32 assoc_eqn;
  189. union {
  190. struct erdma_kcq_info kern_cq;
  191. struct erdma_ucq_info user_cq;
  192. };
  193. };
  194. #define QP_ID(qp) ((qp)->ibqp.qp_num)
  195. static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
  196. {
  197. return (struct erdma_qp *)xa_load(&dev->qp_xa, id);
  198. }
  199. static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
  200. {
  201. return (struct erdma_cq *)xa_load(&dev->cq_xa, id);
  202. }
  203. void erdma_qp_get(struct erdma_qp *qp);
  204. void erdma_qp_put(struct erdma_qp *qp);
  205. int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
  206. enum erdma_qp_attr_mask mask);
  207. void erdma_qp_llp_close(struct erdma_qp *qp);
  208. void erdma_qp_cm_drop(struct erdma_qp *qp);
  209. static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
  210. {
  211. return container_of(ibctx, struct erdma_ucontext, ibucontext);
  212. }
  213. static inline struct erdma_pd *to_epd(struct ib_pd *pd)
  214. {
  215. return container_of(pd, struct erdma_pd, ibpd);
  216. }
  217. static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
  218. {
  219. return container_of(ibmr, struct erdma_mr, ibmr);
  220. }
  221. static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
  222. {
  223. return container_of(qp, struct erdma_qp, ibqp);
  224. }
  225. static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
  226. {
  227. return container_of(ibcq, struct erdma_cq, ibcq);
  228. }
  229. static inline struct erdma_user_mmap_entry *
  230. to_emmap(struct rdma_user_mmap_entry *ibmmap)
  231. {
  232. return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry);
  233. }
  234. int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data);
  235. void erdma_dealloc_ucontext(struct ib_ucontext *ibctx);
  236. int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
  237. struct ib_udata *data);
  238. int erdma_get_port_immutable(struct ib_device *dev, u32 port,
  239. struct ib_port_immutable *ib_port_immutable);
  240. int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
  241. struct ib_udata *data);
  242. int erdma_query_port(struct ib_device *dev, u32 port,
  243. struct ib_port_attr *attr);
  244. int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
  245. union ib_gid *gid);
  246. int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data);
  247. int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
  248. int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
  249. struct ib_udata *data);
  250. int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
  251. struct ib_qp_init_attr *init_attr);
  252. int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
  253. struct ib_udata *data);
  254. int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
  255. int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
  256. int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
  257. struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
  258. u64 virt, int access, struct ib_udata *udata);
  259. struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
  260. int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
  261. int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
  262. void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
  263. void erdma_qp_get_ref(struct ib_qp *ibqp);
  264. void erdma_qp_put_ref(struct ib_qp *ibqp);
  265. struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
  266. int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
  267. const struct ib_send_wr **bad_send_wr);
  268. int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
  269. const struct ib_recv_wr **bad_recv_wr);
  270. int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
  271. struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
  272. u32 max_num_sg);
  273. int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
  274. unsigned int *sg_offset);
  275. void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
  276. void erdma_set_mtu(struct erdma_dev *dev, u32 mtu);
  277. #endif