main.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
  2. /* Copyright (c) 2015 - 2021 Intel Corporation */
  3. #ifndef IRDMA_MAIN_H
  4. #define IRDMA_MAIN_H
  5. #include <linux/ip.h>
  6. #include <linux/tcp.h>
  7. #include <linux/if_vlan.h>
  8. #include <net/addrconf.h>
  9. #include <net/netevent.h>
  10. #include <net/tcp.h>
  11. #include <net/ip6_route.h>
  12. #include <net/flow.h>
  13. #include <net/secure_seq.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/inetdevice.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/kernel.h>
  19. #include <linux/delay.h>
  20. #include <linux/pci.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/slab.h>
  24. #include <linux/io.h>
  25. #include <linux/crc32c.h>
  26. #include <linux/kthread.h>
  27. #ifndef CONFIG_64BIT
  28. #include <linux/io-64-nonatomic-lo-hi.h>
  29. #endif
  30. #include <linux/auxiliary_bus.h>
  31. #include <linux/net/intel/iidc.h>
  32. #include <crypto/hash.h>
  33. #include <rdma/ib_smi.h>
  34. #include <rdma/ib_verbs.h>
  35. #include <rdma/ib_pack.h>
  36. #include <rdma/rdma_cm.h>
  37. #include <rdma/iw_cm.h>
  38. #include <rdma/ib_user_verbs.h>
  39. #include <rdma/ib_umem.h>
  40. #include <rdma/ib_cache.h>
  41. #include <rdma/uverbs_ioctl.h>
  42. #include "osdep.h"
  43. #include "defs.h"
  44. #include "hmc.h"
  45. #include "type.h"
  46. #include "ws.h"
  47. #include "protos.h"
  48. #include "pble.h"
  49. #include "cm.h"
  50. #include <rdma/irdma-abi.h>
  51. #include "verbs.h"
  52. #include "user.h"
  53. #include "puda.h"
  54. extern struct auxiliary_driver i40iw_auxiliary_drv;
  55. #define IRDMA_FW_VER_DEFAULT 2
  56. #define IRDMA_HW_VER 2
  57. #define IRDMA_ARP_ADD 1
  58. #define IRDMA_ARP_DELETE 2
  59. #define IRDMA_ARP_RESOLVE 3
  60. #define IRDMA_MACIP_ADD 1
  61. #define IRDMA_MACIP_DELETE 2
  62. #define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
  63. #define IW_CEQ_SIZE 2048
  64. #define IW_AEQ_SIZE 2048
  65. #define RX_BUF_SIZE (1536 + 8)
  66. #define IW_REG0_SIZE (4 * 1024)
  67. #define IW_TX_TIMEOUT (6 * HZ)
  68. #define IW_FIRST_QPN 1
  69. #define IW_SW_CONTEXT_ALIGN 1024
  70. #define MAX_DPC_ITERATIONS 128
  71. #define IRDMA_EVENT_TIMEOUT_MS 5000
  72. #define IRDMA_VCHNL_EVENT_TIMEOUT 100000
  73. #define IRDMA_RST_TIMEOUT_HZ 4
  74. #define IRDMA_NO_QSET 0xffff
  75. #define IW_CFG_FPM_QP_COUNT 32768
  76. #define IRDMA_MAX_PAGES_PER_FMR 262144
  77. #define IRDMA_MIN_PAGES_PER_FMR 1
  78. #define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
  79. #define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
  80. #define IRDMA_Q_TYPE_PE_AEQ 0x80
  81. #define IRDMA_Q_INVALID_IDX 0xffff
  82. #define IRDMA_REM_ENDPOINT_TRK_QPID 3
  83. #define IRDMA_DRV_OPT_ENA_MPA_VER_0 0x00000001
  84. #define IRDMA_DRV_OPT_DISABLE_MPA_CRC 0x00000002
  85. #define IRDMA_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
  86. #define IRDMA_DRV_OPT_DISABLE_INTF 0x00000008
  87. #define IRDMA_DRV_OPT_ENA_MSI 0x00000010
  88. #define IRDMA_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
  89. #define IRDMA_DRV_OPT_NO_INLINE_DATA 0x00000080
  90. #define IRDMA_DRV_OPT_DISABLE_INT_MOD 0x00000100
  91. #define IRDMA_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
  92. #define IRDMA_DRV_OPT_ENA_PAU 0x00000400
  93. #define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
  94. #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
  95. #define IRDMA_ROCE_CWND_DEFAULT 0x400
  96. #define IRDMA_ROCE_ACKCREDS_DEFAULT 0x1E
  97. #define IRDMA_FLUSH_SQ BIT(0)
  98. #define IRDMA_FLUSH_RQ BIT(1)
  99. #define IRDMA_REFLUSH BIT(2)
  100. #define IRDMA_FLUSH_WAIT BIT(3)
  101. enum init_completion_state {
  102. INVALID_STATE = 0,
  103. INITIAL_STATE,
  104. CQP_CREATED,
  105. HMC_OBJS_CREATED,
  106. HW_RSRC_INITIALIZED,
  107. CCQ_CREATED,
  108. CEQ0_CREATED, /* Last state of probe */
  109. ILQ_CREATED,
  110. IEQ_CREATED,
  111. CEQS_CREATED,
  112. PBLE_CHUNK_MEM,
  113. AEQ_CREATED,
  114. IP_ADDR_REGISTERED, /* Last state of open */
  115. };
  116. struct irdma_rsrc_limits {
  117. u32 qplimit;
  118. u32 mrlimit;
  119. u32 cqlimit;
  120. };
  121. struct irdma_cqp_err_info {
  122. u16 maj;
  123. u16 min;
  124. const char *desc;
  125. };
  126. struct irdma_cqp_compl_info {
  127. u32 op_ret_val;
  128. u16 maj_err_code;
  129. u16 min_err_code;
  130. bool error;
  131. u8 op_code;
  132. };
  133. struct irdma_cqp_request {
  134. struct cqp_cmds_info info;
  135. wait_queue_head_t waitq;
  136. struct list_head list;
  137. refcount_t refcnt;
  138. void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
  139. void *param;
  140. struct irdma_cqp_compl_info compl_info;
  141. bool request_done; /* READ/WRITE_ONCE macros operate on it */
  142. bool waiting:1;
  143. bool dynamic:1;
  144. };
  145. struct irdma_cqp {
  146. struct irdma_sc_cqp sc_cqp;
  147. spinlock_t req_lock; /* protect CQP request list */
  148. spinlock_t compl_lock; /* protect CQP completion processing */
  149. wait_queue_head_t waitq;
  150. wait_queue_head_t remove_wq;
  151. struct irdma_dma_mem sq;
  152. struct irdma_dma_mem host_ctx;
  153. u64 *scratch_array;
  154. struct irdma_cqp_request *cqp_requests;
  155. struct list_head cqp_avail_reqs;
  156. struct list_head cqp_pending_reqs;
  157. };
  158. struct irdma_ccq {
  159. struct irdma_sc_cq sc_cq;
  160. struct irdma_dma_mem mem_cq;
  161. struct irdma_dma_mem shadow_area;
  162. };
  163. struct irdma_ceq {
  164. struct irdma_sc_ceq sc_ceq;
  165. struct irdma_dma_mem mem;
  166. u32 irq;
  167. u32 msix_idx;
  168. struct irdma_pci_f *rf;
  169. struct tasklet_struct dpc_tasklet;
  170. spinlock_t ce_lock; /* sync cq destroy with cq completion event notification */
  171. };
  172. struct irdma_aeq {
  173. struct irdma_sc_aeq sc_aeq;
  174. struct irdma_dma_mem mem;
  175. struct irdma_pble_alloc palloc;
  176. bool virtual_map;
  177. };
  178. struct irdma_arp_entry {
  179. u32 ip_addr[4];
  180. u8 mac_addr[ETH_ALEN];
  181. };
  182. struct irdma_msix_vector {
  183. u32 idx;
  184. u32 irq;
  185. u32 cpu_affinity;
  186. u32 ceq_id;
  187. cpumask_t mask;
  188. };
  189. struct irdma_mc_table_info {
  190. u32 mgn;
  191. u32 dest_ip[4];
  192. bool lan_fwd:1;
  193. bool ipv4_valid:1;
  194. };
  195. struct mc_table_list {
  196. struct list_head list;
  197. struct irdma_mc_table_info mc_info;
  198. struct irdma_mcast_grp_info mc_grp_ctx;
  199. };
  200. struct irdma_qv_info {
  201. u32 v_idx; /* msix_vector */
  202. u16 ceq_idx;
  203. u16 aeq_idx;
  204. u8 itr_idx;
  205. };
  206. struct irdma_qvlist_info {
  207. u32 num_vectors;
  208. struct irdma_qv_info qv_info[];
  209. };
  210. struct irdma_gen_ops {
  211. void (*request_reset)(struct irdma_pci_f *rf);
  212. int (*register_qset)(struct irdma_sc_vsi *vsi,
  213. struct irdma_ws_node *tc_node);
  214. void (*unregister_qset)(struct irdma_sc_vsi *vsi,
  215. struct irdma_ws_node *tc_node);
  216. };
  217. struct irdma_pci_f {
  218. bool reset:1;
  219. bool rsrc_created:1;
  220. bool msix_shared:1;
  221. u8 rsrc_profile;
  222. u8 *hmc_info_mem;
  223. u8 *mem_rsrc;
  224. u8 rdma_ver;
  225. u8 rst_to;
  226. u8 pf_id;
  227. enum irdma_protocol_used protocol_used;
  228. u32 sd_type;
  229. u32 msix_count;
  230. u32 max_mr;
  231. u32 max_qp;
  232. u32 max_cq;
  233. u32 max_ah;
  234. u32 next_ah;
  235. u32 max_mcg;
  236. u32 next_mcg;
  237. u32 max_pd;
  238. u32 next_qp;
  239. u32 next_cq;
  240. u32 next_pd;
  241. u32 max_mr_size;
  242. u32 max_cqe;
  243. u32 mr_stagmask;
  244. u32 used_pds;
  245. u32 used_cqs;
  246. u32 used_mrs;
  247. u32 used_qps;
  248. u32 arp_table_size;
  249. u32 next_arp_index;
  250. u32 ceqs_count;
  251. u32 next_ws_node_id;
  252. u32 max_ws_node_id;
  253. u32 limits_sel;
  254. unsigned long *allocated_ws_nodes;
  255. unsigned long *allocated_qps;
  256. unsigned long *allocated_cqs;
  257. unsigned long *allocated_mrs;
  258. unsigned long *allocated_pds;
  259. unsigned long *allocated_mcgs;
  260. unsigned long *allocated_ahs;
  261. unsigned long *allocated_arps;
  262. enum init_completion_state init_state;
  263. struct irdma_sc_dev sc_dev;
  264. struct pci_dev *pcidev;
  265. void *cdev;
  266. struct irdma_hw hw;
  267. struct irdma_cqp cqp;
  268. struct irdma_ccq ccq;
  269. struct irdma_aeq aeq;
  270. struct irdma_ceq *ceqlist;
  271. struct irdma_hmc_pble_rsrc *pble_rsrc;
  272. struct irdma_arp_entry *arp_table;
  273. spinlock_t arp_lock; /*protect ARP table access*/
  274. spinlock_t rsrc_lock; /* protect HW resource array access */
  275. spinlock_t qptable_lock; /*protect QP table access*/
  276. struct irdma_qp **qp_table;
  277. spinlock_t qh_list_lock; /* protect mc_qht_list */
  278. struct mc_table_list mc_qht_list;
  279. struct irdma_msix_vector *iw_msixtbl;
  280. struct irdma_qvlist_info *iw_qvlist;
  281. struct tasklet_struct dpc_tasklet;
  282. struct msix_entry *msix_entries;
  283. struct irdma_dma_mem obj_mem;
  284. struct irdma_dma_mem obj_next;
  285. atomic_t vchnl_msgs;
  286. wait_queue_head_t vchnl_waitq;
  287. struct workqueue_struct *cqp_cmpl_wq;
  288. struct work_struct cqp_cmpl_work;
  289. struct irdma_sc_vsi default_vsi;
  290. void *back_fcn;
  291. struct irdma_gen_ops gen_ops;
  292. struct irdma_device *iwdev;
  293. };
  294. struct irdma_device {
  295. struct ib_device ibdev;
  296. struct irdma_pci_f *rf;
  297. struct net_device *netdev;
  298. struct workqueue_struct *cleanup_wq;
  299. struct irdma_sc_vsi vsi;
  300. struct irdma_cm_core cm_core;
  301. DECLARE_HASHTABLE(ah_hash_tbl, 8);
  302. struct mutex ah_tbl_lock; /* protect AH hash table access */
  303. u32 roce_cwnd;
  304. u32 roce_ackcreds;
  305. u32 vendor_id;
  306. u32 vendor_part_id;
  307. u32 push_mode;
  308. u32 rcv_wnd;
  309. u16 mac_ip_table_idx;
  310. u16 vsi_num;
  311. u8 rcv_wscale;
  312. u8 iw_status;
  313. bool roce_mode:1;
  314. bool roce_dcqcn_en:1;
  315. bool dcb_vlan_mode:1;
  316. bool iw_ooo:1;
  317. enum init_completion_state init_state;
  318. wait_queue_head_t suspend_wq;
  319. };
  320. static inline struct irdma_device *to_iwdev(struct ib_device *ibdev)
  321. {
  322. return container_of(ibdev, struct irdma_device, ibdev);
  323. }
  324. static inline struct irdma_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
  325. {
  326. return container_of(ibucontext, struct irdma_ucontext, ibucontext);
  327. }
  328. static inline struct irdma_user_mmap_entry *
  329. to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
  330. {
  331. return container_of(rdma_entry, struct irdma_user_mmap_entry,
  332. rdma_entry);
  333. }
  334. static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)
  335. {
  336. return container_of(ibpd, struct irdma_pd, ibpd);
  337. }
  338. static inline struct irdma_ah *to_iwah(struct ib_ah *ibah)
  339. {
  340. return container_of(ibah, struct irdma_ah, ibah);
  341. }
  342. static inline struct irdma_mr *to_iwmr(struct ib_mr *ibmr)
  343. {
  344. return container_of(ibmr, struct irdma_mr, ibmr);
  345. }
  346. static inline struct irdma_mr *to_iwmw(struct ib_mw *ibmw)
  347. {
  348. return container_of(ibmw, struct irdma_mr, ibmw);
  349. }
  350. static inline struct irdma_cq *to_iwcq(struct ib_cq *ibcq)
  351. {
  352. return container_of(ibcq, struct irdma_cq, ibcq);
  353. }
  354. static inline struct irdma_qp *to_iwqp(struct ib_qp *ibqp)
  355. {
  356. return container_of(ibqp, struct irdma_qp, ibqp);
  357. }
  358. static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
  359. {
  360. return container_of(dev, struct irdma_pci_f, sc_dev);
  361. }
  362. /**
  363. * irdma_alloc_resource - allocate a resource
  364. * @iwdev: device pointer
  365. * @resource_array: resource bit array:
  366. * @max_resources: maximum resource number
  367. * @req_resources_num: Allocated resource number
  368. * @next: next free id
  369. **/
  370. static inline int irdma_alloc_rsrc(struct irdma_pci_f *rf,
  371. unsigned long *rsrc_array, u32 max_rsrc,
  372. u32 *req_rsrc_num, u32 *next)
  373. {
  374. u32 rsrc_num;
  375. unsigned long flags;
  376. spin_lock_irqsave(&rf->rsrc_lock, flags);
  377. rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next);
  378. if (rsrc_num >= max_rsrc) {
  379. rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc);
  380. if (rsrc_num >= max_rsrc) {
  381. spin_unlock_irqrestore(&rf->rsrc_lock, flags);
  382. ibdev_dbg(&rf->iwdev->ibdev,
  383. "ERR: resource [%d] allocation failed\n",
  384. rsrc_num);
  385. return -EOVERFLOW;
  386. }
  387. }
  388. __set_bit(rsrc_num, rsrc_array);
  389. *next = rsrc_num + 1;
  390. if (*next == max_rsrc)
  391. *next = 0;
  392. *req_rsrc_num = rsrc_num;
  393. spin_unlock_irqrestore(&rf->rsrc_lock, flags);
  394. return 0;
  395. }
  396. /**
  397. * irdma_free_resource - free a resource
  398. * @iwdev: device pointer
  399. * @resource_array: resource array for the resource_num
  400. * @resource_num: resource number to free
  401. **/
  402. static inline void irdma_free_rsrc(struct irdma_pci_f *rf,
  403. unsigned long *rsrc_array, u32 rsrc_num)
  404. {
  405. unsigned long flags;
  406. spin_lock_irqsave(&rf->rsrc_lock, flags);
  407. __clear_bit(rsrc_num, rsrc_array);
  408. spin_unlock_irqrestore(&rf->rsrc_lock, flags);
  409. }
  410. int irdma_ctrl_init_hw(struct irdma_pci_f *rf);
  411. void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf);
  412. int irdma_rt_init_hw(struct irdma_device *iwdev,
  413. struct irdma_l2params *l2params);
  414. void irdma_rt_deinit_hw(struct irdma_device *iwdev);
  415. void irdma_qp_add_ref(struct ib_qp *ibqp);
  416. void irdma_qp_rem_ref(struct ib_qp *ibqp);
  417. void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
  418. struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
  419. void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
  420. void irdma_manage_arp_cache(struct irdma_pci_f *rf,
  421. const unsigned char *mac_addr,
  422. u32 *ip_addr, bool ipv4, u32 action);
  423. struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
  424. void irdma_del_apbvt(struct irdma_device *iwdev,
  425. struct irdma_apbvt_entry *entry);
  426. struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
  427. bool wait);
  428. void irdma_free_cqp_request(struct irdma_cqp *cqp,
  429. struct irdma_cqp_request *cqp_request);
  430. void irdma_put_cqp_request(struct irdma_cqp *cqp,
  431. struct irdma_cqp_request *cqp_request);
  432. int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
  433. int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx);
  434. void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
  435. u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
  436. void irdma_port_ibevent(struct irdma_device *iwdev);
  437. void irdma_cm_disconn(struct irdma_qp *qp);
  438. bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
  439. u16 maj_err_code, u16 min_err_code);
  440. int irdma_handle_cqp_op(struct irdma_pci_f *rf,
  441. struct irdma_cqp_request *cqp_request);
  442. int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
  443. struct ib_udata *udata);
  444. int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  445. int attr_mask, struct ib_udata *udata);
  446. void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
  447. void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
  448. int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
  449. struct irdma_modify_qp_info *info, bool wait);
  450. int irdma_qp_suspend_resume(struct irdma_sc_qp *qp, bool suspend);
  451. int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
  452. enum irdma_quad_entry_type etype,
  453. enum irdma_quad_hash_manage_type mtype, void *cmnode,
  454. bool wait);
  455. void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
  456. void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
  457. void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
  458. int irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
  459. void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
  460. void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
  461. u8 term_len);
  462. int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack);
  463. int irdma_send_reset(struct irdma_cm_node *cm_node);
  464. struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
  465. u16 rem_port, u32 *rem_addr, u16 loc_port,
  466. u32 *loc_addr, u16 vlan_id);
  467. int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
  468. struct irdma_qp_flush_info *info, bool wait);
  469. void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
  470. struct irdma_gen_ae_info *info, bool wait);
  471. void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
  472. void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
  473. u16 irdma_get_vlan_ipv4(u32 *addr);
  474. struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
  475. struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
  476. int acc, u64 *iova_start);
  477. int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
  478. void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
  479. int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
  480. bool wait,
  481. void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
  482. void *cb_param);
  483. void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
  484. bool irdma_cq_empty(struct irdma_cq *iwcq);
  485. int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
  486. void *ptr);
  487. int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
  488. void *ptr);
  489. int irdma_net_event(struct notifier_block *notifier, unsigned long event,
  490. void *ptr);
  491. int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
  492. void *ptr);
  493. void irdma_add_ip(struct irdma_device *iwdev);
  494. void cqp_compl_worker(struct work_struct *work);
  495. #endif /* IRDMA_MAIN_H */