verbs.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
  2. /* Copyright (c) 2015 - 2021 Intel Corporation */
  3. #ifndef IRDMA_VERBS_H
  4. #define IRDMA_VERBS_H
  5. #define IRDMA_MAX_SAVED_PHY_PGADDR 4
  6. #define IRDMA_FLUSH_DELAY_MS 20
  7. #define IRDMA_PKEY_TBL_SZ 1
  8. #define IRDMA_DEFAULT_PKEY 0xFFFF
  9. struct irdma_ucontext {
  10. struct ib_ucontext ibucontext;
  11. struct irdma_device *iwdev;
  12. struct rdma_user_mmap_entry *db_mmap_entry;
  13. struct list_head cq_reg_mem_list;
  14. spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
  15. struct list_head qp_reg_mem_list;
  16. spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
  17. int abi_ver;
  18. bool legacy_mode;
  19. };
  20. struct irdma_pd {
  21. struct ib_pd ibpd;
  22. struct irdma_sc_pd sc_pd;
  23. };
  24. union irdma_sockaddr {
  25. struct sockaddr_in saddr_in;
  26. struct sockaddr_in6 saddr_in6;
  27. };
  28. struct irdma_av {
  29. u8 macaddr[16];
  30. struct rdma_ah_attr attrs;
  31. union irdma_sockaddr sgid_addr;
  32. union irdma_sockaddr dgid_addr;
  33. u8 net_type;
  34. };
  35. struct irdma_ah {
  36. struct ib_ah ibah;
  37. struct irdma_sc_ah sc_ah;
  38. struct irdma_pd *pd;
  39. struct irdma_av av;
  40. u8 sgid_index;
  41. union ib_gid dgid;
  42. struct hlist_node list;
  43. refcount_t refcnt;
  44. struct irdma_ah *parent_ah; /* AH from cached list */
  45. };
  46. struct irdma_hmc_pble {
  47. union {
  48. u32 idx;
  49. dma_addr_t addr;
  50. };
  51. };
  52. struct irdma_cq_mr {
  53. struct irdma_hmc_pble cq_pbl;
  54. dma_addr_t shadow;
  55. bool split;
  56. };
  57. struct irdma_qp_mr {
  58. struct irdma_hmc_pble sq_pbl;
  59. struct irdma_hmc_pble rq_pbl;
  60. dma_addr_t shadow;
  61. struct page *sq_page;
  62. };
  63. struct irdma_cq_buf {
  64. struct irdma_dma_mem kmem_buf;
  65. struct irdma_cq_uk cq_uk;
  66. struct irdma_hw *hw;
  67. struct list_head list;
  68. struct work_struct work;
  69. };
  70. struct irdma_pbl {
  71. struct list_head list;
  72. union {
  73. struct irdma_qp_mr qp_mr;
  74. struct irdma_cq_mr cq_mr;
  75. };
  76. bool pbl_allocated:1;
  77. bool on_list:1;
  78. u64 user_base;
  79. struct irdma_pble_alloc pble_alloc;
  80. struct irdma_mr *iwmr;
  81. };
  82. struct irdma_mr {
  83. union {
  84. struct ib_mr ibmr;
  85. struct ib_mw ibmw;
  86. };
  87. struct ib_umem *region;
  88. u16 type;
  89. u32 page_cnt;
  90. u64 page_size;
  91. u32 npages;
  92. u32 stag;
  93. u64 len;
  94. u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
  95. struct irdma_pbl iwpbl;
  96. };
  97. struct irdma_cq {
  98. struct ib_cq ibcq;
  99. struct irdma_sc_cq sc_cq;
  100. u16 cq_head;
  101. u16 cq_size;
  102. u16 cq_num;
  103. bool user_mode;
  104. atomic_t armed;
  105. enum irdma_cmpl_notify last_notify;
  106. u32 polled_cmpls;
  107. u32 cq_mem_size;
  108. struct irdma_dma_mem kmem;
  109. struct irdma_dma_mem kmem_shadow;
  110. spinlock_t lock; /* for poll cq */
  111. struct irdma_pbl *iwpbl;
  112. struct irdma_pbl *iwpbl_shadow;
  113. struct list_head resize_list;
  114. struct irdma_cq_poll_info cur_cqe;
  115. struct list_head cmpl_generated;
  116. };
  117. struct irdma_cmpl_gen {
  118. struct list_head list;
  119. struct irdma_cq_poll_info cpi;
  120. };
  121. struct disconn_work {
  122. struct work_struct work;
  123. struct irdma_qp *iwqp;
  124. };
  125. struct iw_cm_id;
  126. struct irdma_qp_kmode {
  127. struct irdma_dma_mem dma_mem;
  128. struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
  129. u64 *rq_wrid_mem;
  130. };
  131. struct irdma_qp {
  132. struct ib_qp ibqp;
  133. struct irdma_sc_qp sc_qp;
  134. struct irdma_device *iwdev;
  135. struct irdma_cq *iwscq;
  136. struct irdma_cq *iwrcq;
  137. struct irdma_pd *iwpd;
  138. struct rdma_user_mmap_entry *push_wqe_mmap_entry;
  139. struct rdma_user_mmap_entry *push_db_mmap_entry;
  140. struct irdma_qp_host_ctx_info ctx_info;
  141. union {
  142. struct irdma_iwarp_offload_info iwarp_info;
  143. struct irdma_roce_offload_info roce_info;
  144. };
  145. union {
  146. struct irdma_tcp_offload_info tcp_info;
  147. struct irdma_udp_offload_info udp_info;
  148. };
  149. struct irdma_ah roce_ah;
  150. struct list_head teardown_entry;
  151. refcount_t refcnt;
  152. struct iw_cm_id *cm_id;
  153. struct irdma_cm_node *cm_node;
  154. struct delayed_work dwork_flush;
  155. struct ib_mr *lsmm_mr;
  156. atomic_t hw_mod_qp_pend;
  157. enum ib_qp_state ibqp_state;
  158. u32 qp_mem_size;
  159. u32 last_aeq;
  160. int max_send_wr;
  161. int max_recv_wr;
  162. atomic_t close_timer_started;
  163. spinlock_t lock; /* serialize posting WRs to SQ/RQ */
  164. struct irdma_qp_context *iwqp_context;
  165. void *pbl_vbase;
  166. dma_addr_t pbl_pbase;
  167. struct page *page;
  168. u8 active_conn : 1;
  169. u8 user_mode : 1;
  170. u8 hte_added : 1;
  171. u8 flush_issued : 1;
  172. u8 sig_all : 1;
  173. u8 pau_mode : 1;
  174. u8 suspend_pending : 1;
  175. u8 rsvd : 1;
  176. u8 iwarp_state;
  177. u16 term_sq_flush_code;
  178. u16 term_rq_flush_code;
  179. u8 hw_iwarp_state;
  180. u8 hw_tcp_state;
  181. struct irdma_qp_kmode kqp;
  182. struct irdma_dma_mem host_ctx;
  183. struct timer_list terminate_timer;
  184. struct irdma_pbl *iwpbl;
  185. struct irdma_dma_mem q2_ctx_mem;
  186. struct irdma_dma_mem ietf_mem;
  187. struct completion free_qp;
  188. wait_queue_head_t waitq;
  189. wait_queue_head_t mod_qp_waitq;
  190. u8 rts_ae_rcvd;
  191. };
  192. enum irdma_mmap_flag {
  193. IRDMA_MMAP_IO_NC,
  194. IRDMA_MMAP_IO_WC,
  195. };
  196. struct irdma_user_mmap_entry {
  197. struct rdma_user_mmap_entry rdma_entry;
  198. u64 bar_offset;
  199. u8 mmap_flag;
  200. };
  201. static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
  202. {
  203. return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
  204. }
  205. static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
  206. {
  207. return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
  208. }
  209. static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
  210. struct ib_wc *entry)
  211. {
  212. switch (cq_poll_info->op_type) {
  213. case IRDMA_OP_TYPE_RDMA_WRITE:
  214. case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
  215. entry->opcode = IB_WC_RDMA_WRITE;
  216. break;
  217. case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
  218. case IRDMA_OP_TYPE_RDMA_READ:
  219. entry->opcode = IB_WC_RDMA_READ;
  220. break;
  221. case IRDMA_OP_TYPE_SEND_SOL:
  222. case IRDMA_OP_TYPE_SEND_SOL_INV:
  223. case IRDMA_OP_TYPE_SEND_INV:
  224. case IRDMA_OP_TYPE_SEND:
  225. entry->opcode = IB_WC_SEND;
  226. break;
  227. case IRDMA_OP_TYPE_FAST_REG_NSMR:
  228. entry->opcode = IB_WC_REG_MR;
  229. break;
  230. case IRDMA_OP_TYPE_INV_STAG:
  231. entry->opcode = IB_WC_LOCAL_INV;
  232. break;
  233. default:
  234. entry->status = IB_WC_GENERAL_ERR;
  235. }
  236. }
  237. static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
  238. struct ib_wc *entry, bool send_imm_support)
  239. {
  240. /**
  241. * iWARP does not support sendImm, so the presence of Imm data
  242. * must be WriteImm.
  243. */
  244. if (!send_imm_support) {
  245. entry->opcode = cq_poll_info->imm_valid ?
  246. IB_WC_RECV_RDMA_WITH_IMM :
  247. IB_WC_RECV;
  248. return;
  249. }
  250. switch (cq_poll_info->op_type) {
  251. case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
  252. case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
  253. entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  254. break;
  255. default:
  256. entry->opcode = IB_WC_RECV;
  257. }
  258. }
  259. void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
  260. int irdma_ib_register_device(struct irdma_device *iwdev);
  261. void irdma_ib_unregister_device(struct irdma_device *iwdev);
  262. void irdma_ib_dealloc_device(struct ib_device *ibdev);
  263. void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
  264. void irdma_generate_flush_completions(struct irdma_qp *iwqp);
  265. void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
  266. int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
  267. #endif /* IRDMA_VERBS_H */