gdma.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
  2. /* Copyright (c) 2021, Microsoft Corporation. */
  3. #ifndef _GDMA_H
  4. #define _GDMA_H
  5. #include <linux/dma-mapping.h>
  6. #include <linux/netdevice.h>
  7. #include "shm_channel.h"
  8. /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
  9. * them are naturally aligned and hence don't need __packed.
  10. */
  11. enum gdma_request_type {
  12. GDMA_VERIFY_VF_DRIVER_VERSION = 1,
  13. GDMA_QUERY_MAX_RESOURCES = 2,
  14. GDMA_LIST_DEVICES = 3,
  15. GDMA_REGISTER_DEVICE = 4,
  16. GDMA_DEREGISTER_DEVICE = 5,
  17. GDMA_GENERATE_TEST_EQE = 10,
  18. GDMA_CREATE_QUEUE = 12,
  19. GDMA_DISABLE_QUEUE = 13,
  20. GDMA_CREATE_DMA_REGION = 25,
  21. GDMA_DMA_REGION_ADD_PAGES = 26,
  22. GDMA_DESTROY_DMA_REGION = 27,
  23. };
  24. enum gdma_queue_type {
  25. GDMA_INVALID_QUEUE,
  26. GDMA_SQ,
  27. GDMA_RQ,
  28. GDMA_CQ,
  29. GDMA_EQ,
  30. };
  31. enum gdma_work_request_flags {
  32. GDMA_WR_NONE = 0,
  33. GDMA_WR_OOB_IN_SGL = BIT(0),
  34. GDMA_WR_PAD_BY_SGE0 = BIT(1),
  35. };
  36. enum gdma_eqe_type {
  37. GDMA_EQE_COMPLETION = 3,
  38. GDMA_EQE_TEST_EVENT = 64,
  39. GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
  40. GDMA_EQE_HWC_INIT_DATA = 130,
  41. GDMA_EQE_HWC_INIT_DONE = 131,
  42. };
  43. enum {
  44. GDMA_DEVICE_NONE = 0,
  45. GDMA_DEVICE_HWC = 1,
  46. GDMA_DEVICE_MANA = 2,
  47. };
  48. struct gdma_resource {
  49. /* Protect the bitmap */
  50. spinlock_t lock;
  51. /* The bitmap size in bits. */
  52. u32 size;
  53. /* The bitmap tracks the resources. */
  54. unsigned long *map;
  55. };
  56. union gdma_doorbell_entry {
  57. u64 as_uint64;
  58. struct {
  59. u64 id : 24;
  60. u64 reserved : 8;
  61. u64 tail_ptr : 31;
  62. u64 arm : 1;
  63. } cq;
  64. struct {
  65. u64 id : 24;
  66. u64 wqe_cnt : 8;
  67. u64 tail_ptr : 32;
  68. } rq;
  69. struct {
  70. u64 id : 24;
  71. u64 reserved : 8;
  72. u64 tail_ptr : 32;
  73. } sq;
  74. struct {
  75. u64 id : 16;
  76. u64 reserved : 16;
  77. u64 tail_ptr : 31;
  78. u64 arm : 1;
  79. } eq;
  80. }; /* HW DATA */
  81. struct gdma_msg_hdr {
  82. u32 hdr_type;
  83. u32 msg_type;
  84. u16 msg_version;
  85. u16 hwc_msg_id;
  86. u32 msg_size;
  87. }; /* HW DATA */
  88. struct gdma_dev_id {
  89. union {
  90. struct {
  91. u16 type;
  92. u16 instance;
  93. };
  94. u32 as_uint32;
  95. };
  96. }; /* HW DATA */
  97. struct gdma_req_hdr {
  98. struct gdma_msg_hdr req;
  99. struct gdma_msg_hdr resp; /* The expected response */
  100. struct gdma_dev_id dev_id;
  101. u32 activity_id;
  102. }; /* HW DATA */
  103. struct gdma_resp_hdr {
  104. struct gdma_msg_hdr response;
  105. struct gdma_dev_id dev_id;
  106. u32 activity_id;
  107. u32 status;
  108. u32 reserved;
  109. }; /* HW DATA */
  110. struct gdma_general_req {
  111. struct gdma_req_hdr hdr;
  112. }; /* HW DATA */
  113. #define GDMA_MESSAGE_V1 1
  114. struct gdma_general_resp {
  115. struct gdma_resp_hdr hdr;
  116. }; /* HW DATA */
  117. #define GDMA_STANDARD_HEADER_TYPE 0
  118. static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
  119. u32 req_size, u32 resp_size)
  120. {
  121. hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
  122. hdr->req.msg_type = code;
  123. hdr->req.msg_version = GDMA_MESSAGE_V1;
  124. hdr->req.msg_size = req_size;
  125. hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
  126. hdr->resp.msg_type = code;
  127. hdr->resp.msg_version = GDMA_MESSAGE_V1;
  128. hdr->resp.msg_size = resp_size;
  129. }
  130. /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
  131. struct gdma_sge {
  132. u64 address;
  133. u32 mem_key;
  134. u32 size;
  135. }; /* HW DATA */
  136. struct gdma_wqe_request {
  137. struct gdma_sge *sgl;
  138. u32 num_sge;
  139. u32 inline_oob_size;
  140. const void *inline_oob_data;
  141. u32 flags;
  142. u32 client_data_unit;
  143. };
  144. enum gdma_page_type {
  145. GDMA_PAGE_TYPE_4K,
  146. };
  147. #define GDMA_INVALID_DMA_REGION 0
  148. struct gdma_mem_info {
  149. struct device *dev;
  150. dma_addr_t dma_handle;
  151. void *virt_addr;
  152. u64 length;
  153. /* Allocated by the PF driver */
  154. u64 gdma_region;
  155. };
  156. #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
  157. struct gdma_dev {
  158. struct gdma_context *gdma_context;
  159. struct gdma_dev_id dev_id;
  160. u32 pdid;
  161. u32 doorbell;
  162. u32 gpa_mkey;
  163. /* GDMA driver specific pointer */
  164. void *driver_data;
  165. };
  166. #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
  167. #define GDMA_CQE_SIZE 64
  168. #define GDMA_EQE_SIZE 16
  169. #define GDMA_MAX_SQE_SIZE 512
  170. #define GDMA_MAX_RQE_SIZE 256
  171. #define GDMA_COMP_DATA_SIZE 0x3C
  172. #define GDMA_EVENT_DATA_SIZE 0xC
  173. /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
  174. #define GDMA_WQE_BU_SIZE 32
  175. #define INVALID_PDID UINT_MAX
  176. #define INVALID_DOORBELL UINT_MAX
  177. #define INVALID_MEM_KEY UINT_MAX
  178. #define INVALID_QUEUE_ID UINT_MAX
  179. #define INVALID_PCI_MSIX_INDEX UINT_MAX
  180. struct gdma_comp {
  181. u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
  182. u32 wq_num;
  183. bool is_sq;
  184. };
  185. struct gdma_event {
  186. u32 details[GDMA_EVENT_DATA_SIZE / 4];
  187. u8 type;
  188. };
  189. struct gdma_queue;
  190. struct mana_eq {
  191. struct gdma_queue *eq;
  192. };
  193. typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
  194. struct gdma_event *e);
  195. typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
  196. /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
  197. * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
  198. * driver increases the 'head' in BUs rather than in bytes, and notifies
  199. * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
  200. * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
  201. *
  202. * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
  203. * processed, the driver increases the 'tail' to indicate that WQEs have
  204. * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
  205. *
  206. * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
  207. * that the EQ/CQ is big enough so they can't overflow, and the driver uses
  208. * the owner bits mechanism to detect if the queue has become empty.
  209. */
  210. struct gdma_queue {
  211. struct gdma_dev *gdma_dev;
  212. enum gdma_queue_type type;
  213. u32 id;
  214. struct gdma_mem_info mem_info;
  215. void *queue_mem_ptr;
  216. u32 queue_size;
  217. bool monitor_avl_buf;
  218. u32 head;
  219. u32 tail;
  220. /* Extra fields specific to EQ/CQ. */
  221. union {
  222. struct {
  223. bool disable_needed;
  224. gdma_eq_callback *callback;
  225. void *context;
  226. unsigned int msix_index;
  227. u32 log2_throttle_limit;
  228. } eq;
  229. struct {
  230. gdma_cq_callback *callback;
  231. void *context;
  232. struct gdma_queue *parent; /* For CQ/EQ relationship */
  233. } cq;
  234. };
  235. };
  236. struct gdma_queue_spec {
  237. enum gdma_queue_type type;
  238. bool monitor_avl_buf;
  239. unsigned int queue_size;
  240. /* Extra fields specific to EQ/CQ. */
  241. union {
  242. struct {
  243. gdma_eq_callback *callback;
  244. void *context;
  245. unsigned long log2_throttle_limit;
  246. } eq;
  247. struct {
  248. gdma_cq_callback *callback;
  249. void *context;
  250. struct gdma_queue *parent_eq;
  251. } cq;
  252. };
  253. };
  254. #define MANA_IRQ_NAME_SZ 32
  255. struct gdma_irq_context {
  256. void (*handler)(void *arg);
  257. void *arg;
  258. char name[MANA_IRQ_NAME_SZ];
  259. };
  260. struct gdma_context {
  261. struct device *dev;
  262. /* Per-vPort max number of queues */
  263. unsigned int max_num_queues;
  264. unsigned int max_num_msix;
  265. unsigned int num_msix_usable;
  266. struct gdma_resource msix_resource;
  267. struct gdma_irq_context *irq_contexts;
  268. /* This maps a CQ index to the queue structure. */
  269. unsigned int max_num_cqs;
  270. struct gdma_queue **cq_table;
  271. /* Protect eq_test_event and test_event_eq_id */
  272. struct mutex eq_test_event_mutex;
  273. struct completion eq_test_event;
  274. u32 test_event_eq_id;
  275. bool is_pf;
  276. void __iomem *bar0_va;
  277. void __iomem *shm_base;
  278. void __iomem *db_page_base;
  279. u32 db_page_size;
  280. /* Shared memory chanenl (used to bootstrap HWC) */
  281. struct shm_channel shm_channel;
  282. /* Hardware communication channel (HWC) */
  283. struct gdma_dev hwc;
  284. /* Azure network adapter */
  285. struct gdma_dev mana;
  286. };
  287. #define MAX_NUM_GDMA_DEVICES 4
  288. static inline bool mana_gd_is_mana(struct gdma_dev *gd)
  289. {
  290. return gd->dev_id.type == GDMA_DEVICE_MANA;
  291. }
  292. static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
  293. {
  294. return gd->dev_id.type == GDMA_DEVICE_HWC;
  295. }
  296. u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
  297. u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
  298. int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
  299. int mana_gd_create_hwc_queue(struct gdma_dev *gd,
  300. const struct gdma_queue_spec *spec,
  301. struct gdma_queue **queue_ptr);
  302. int mana_gd_create_mana_eq(struct gdma_dev *gd,
  303. const struct gdma_queue_spec *spec,
  304. struct gdma_queue **queue_ptr);
  305. int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
  306. const struct gdma_queue_spec *spec,
  307. struct gdma_queue **queue_ptr);
  308. void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
  309. int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
  310. void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
  311. struct gdma_wqe {
  312. u32 reserved :24;
  313. u32 last_vbytes :8;
  314. union {
  315. u32 flags;
  316. struct {
  317. u32 num_sge :8;
  318. u32 inline_oob_size_div4:3;
  319. u32 client_oob_in_sgl :1;
  320. u32 reserved1 :4;
  321. u32 client_data_unit :14;
  322. u32 reserved2 :2;
  323. };
  324. };
  325. }; /* HW DATA */
  326. #define INLINE_OOB_SMALL_SIZE 8
  327. #define INLINE_OOB_LARGE_SIZE 24
  328. #define MAX_TX_WQE_SIZE 512
  329. #define MAX_RX_WQE_SIZE 256
  330. struct gdma_cqe {
  331. u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
  332. union {
  333. u32 as_uint32;
  334. struct {
  335. u32 wq_num : 24;
  336. u32 is_sq : 1;
  337. u32 reserved : 4;
  338. u32 owner_bits : 3;
  339. };
  340. } cqe_info;
  341. }; /* HW DATA */
  342. #define GDMA_CQE_OWNER_BITS 3
  343. #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
  344. #define SET_ARM_BIT 1
  345. #define GDMA_EQE_OWNER_BITS 3
  346. union gdma_eqe_info {
  347. u32 as_uint32;
  348. struct {
  349. u32 type : 8;
  350. u32 reserved1 : 8;
  351. u32 client_id : 2;
  352. u32 reserved2 : 11;
  353. u32 owner_bits : 3;
  354. };
  355. }; /* HW DATA */
  356. #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
  357. #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
  358. struct gdma_eqe {
  359. u32 details[GDMA_EVENT_DATA_SIZE / 4];
  360. u32 eqe_info;
  361. }; /* HW DATA */
  362. #define GDMA_REG_DB_PAGE_OFFSET 8
  363. #define GDMA_REG_DB_PAGE_SIZE 0x10
  364. #define GDMA_REG_SHM_OFFSET 0x18
  365. #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
  366. #define GDMA_PF_REG_DB_PAGE_OFF 0xC8
  367. #define GDMA_PF_REG_SHM_OFF 0x70
  368. #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
  369. #define MANA_PF_DEVICE_ID 0x00B9
  370. #define MANA_VF_DEVICE_ID 0x00BA
  371. struct gdma_posted_wqe_info {
  372. u32 wqe_size_in_bu;
  373. };
  374. /* GDMA_GENERATE_TEST_EQE */
  375. struct gdma_generate_test_event_req {
  376. struct gdma_req_hdr hdr;
  377. u32 queue_index;
  378. }; /* HW DATA */
  379. /* GDMA_VERIFY_VF_DRIVER_VERSION */
  380. enum {
  381. GDMA_PROTOCOL_V1 = 1,
  382. GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
  383. GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
  384. };
  385. #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
  386. /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
  387. * so the driver is able to reliably support features like busy_poll.
  388. */
  389. #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
  390. #define GDMA_DRV_CAP_FLAGS1 \
  391. (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
  392. GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
  393. #define GDMA_DRV_CAP_FLAGS2 0
  394. #define GDMA_DRV_CAP_FLAGS3 0
  395. #define GDMA_DRV_CAP_FLAGS4 0
  396. struct gdma_verify_ver_req {
  397. struct gdma_req_hdr hdr;
  398. /* Mandatory fields required for protocol establishment */
  399. u64 protocol_ver_min;
  400. u64 protocol_ver_max;
  401. /* Gdma Driver Capability Flags */
  402. u64 gd_drv_cap_flags1;
  403. u64 gd_drv_cap_flags2;
  404. u64 gd_drv_cap_flags3;
  405. u64 gd_drv_cap_flags4;
  406. /* Advisory fields */
  407. u64 drv_ver;
  408. u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
  409. u32 reserved;
  410. u32 os_ver_major;
  411. u32 os_ver_minor;
  412. u32 os_ver_build;
  413. u32 os_ver_platform;
  414. u64 reserved_2;
  415. u8 os_ver_str1[128];
  416. u8 os_ver_str2[128];
  417. u8 os_ver_str3[128];
  418. u8 os_ver_str4[128];
  419. }; /* HW DATA */
  420. struct gdma_verify_ver_resp {
  421. struct gdma_resp_hdr hdr;
  422. u64 gdma_protocol_ver;
  423. u64 pf_cap_flags1;
  424. u64 pf_cap_flags2;
  425. u64 pf_cap_flags3;
  426. u64 pf_cap_flags4;
  427. }; /* HW DATA */
  428. /* GDMA_QUERY_MAX_RESOURCES */
  429. struct gdma_query_max_resources_resp {
  430. struct gdma_resp_hdr hdr;
  431. u32 status;
  432. u32 max_sq;
  433. u32 max_rq;
  434. u32 max_cq;
  435. u32 max_eq;
  436. u32 max_db;
  437. u32 max_mst;
  438. u32 max_cq_mod_ctx;
  439. u32 max_mod_cq;
  440. u32 max_msix;
  441. }; /* HW DATA */
  442. /* GDMA_LIST_DEVICES */
  443. struct gdma_list_devices_resp {
  444. struct gdma_resp_hdr hdr;
  445. u32 num_of_devs;
  446. u32 reserved;
  447. struct gdma_dev_id devs[64];
  448. }; /* HW DATA */
  449. /* GDMA_REGISTER_DEVICE */
  450. struct gdma_register_device_resp {
  451. struct gdma_resp_hdr hdr;
  452. u32 pdid;
  453. u32 gpa_mkey;
  454. u32 db_id;
  455. }; /* HW DATA */
  456. /* GDMA_CREATE_QUEUE */
  457. struct gdma_create_queue_req {
  458. struct gdma_req_hdr hdr;
  459. u32 type;
  460. u32 reserved1;
  461. u32 pdid;
  462. u32 doolbell_id;
  463. u64 gdma_region;
  464. u32 reserved2;
  465. u32 queue_size;
  466. u32 log2_throttle_limit;
  467. u32 eq_pci_msix_index;
  468. u32 cq_mod_ctx_id;
  469. u32 cq_parent_eq_id;
  470. u8 rq_drop_on_overrun;
  471. u8 rq_err_on_wqe_overflow;
  472. u8 rq_chain_rec_wqes;
  473. u8 sq_hw_db;
  474. u32 reserved3;
  475. }; /* HW DATA */
  476. struct gdma_create_queue_resp {
  477. struct gdma_resp_hdr hdr;
  478. u32 queue_index;
  479. }; /* HW DATA */
  480. /* GDMA_DISABLE_QUEUE */
  481. struct gdma_disable_queue_req {
  482. struct gdma_req_hdr hdr;
  483. u32 type;
  484. u32 queue_index;
  485. u32 alloc_res_id_on_creation;
  486. }; /* HW DATA */
  487. /* GDMA_CREATE_DMA_REGION */
  488. struct gdma_create_dma_region_req {
  489. struct gdma_req_hdr hdr;
  490. /* The total size of the DMA region */
  491. u64 length;
  492. /* The offset in the first page */
  493. u32 offset_in_page;
  494. /* enum gdma_page_type */
  495. u32 gdma_page_type;
  496. /* The total number of pages */
  497. u32 page_count;
  498. /* If page_addr_list_len is smaller than page_count,
  499. * the remaining page addresses will be added via the
  500. * message GDMA_DMA_REGION_ADD_PAGES.
  501. */
  502. u32 page_addr_list_len;
  503. u64 page_addr_list[];
  504. }; /* HW DATA */
  505. struct gdma_create_dma_region_resp {
  506. struct gdma_resp_hdr hdr;
  507. u64 gdma_region;
  508. }; /* HW DATA */
  509. /* GDMA_DMA_REGION_ADD_PAGES */
  510. struct gdma_dma_region_add_pages_req {
  511. struct gdma_req_hdr hdr;
  512. u64 gdma_region;
  513. u32 page_addr_list_len;
  514. u32 reserved3;
  515. u64 page_addr_list[];
  516. }; /* HW DATA */
  517. /* GDMA_DESTROY_DMA_REGION */
  518. struct gdma_destroy_dma_region_req {
  519. struct gdma_req_hdr hdr;
  520. u64 gdma_region;
  521. }; /* HW DATA */
  522. int mana_gd_verify_vf_version(struct pci_dev *pdev);
  523. int mana_gd_register_device(struct gdma_dev *gd);
  524. int mana_gd_deregister_device(struct gdma_dev *gd);
  525. int mana_gd_post_work_request(struct gdma_queue *wq,
  526. const struct gdma_wqe_request *wqe_req,
  527. struct gdma_posted_wqe_info *wqe_info);
  528. int mana_gd_post_and_ring(struct gdma_queue *queue,
  529. const struct gdma_wqe_request *wqe,
  530. struct gdma_posted_wqe_info *wqe_info);
  531. int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
  532. void mana_gd_free_res_map(struct gdma_resource *r);
  533. void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
  534. struct gdma_queue *queue);
  535. int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
  536. struct gdma_mem_info *gmi);
  537. void mana_gd_free_memory(struct gdma_mem_info *gmi);
  538. int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
  539. u32 resp_len, void *resp);
  540. #endif /* _GDMA_H */