pvrdma.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. /*
  2. * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of EITHER the GNU General Public License
  6. * version 2 as published by the Free Software Foundation or the BSD
  7. * 2-Clause License. This program is distributed in the hope that it
  8. * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
  9. * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
  10. * See the GNU General Public License version 2 for more details at
  11. * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program available in the file COPYING in the main
  15. * directory of this source tree.
  16. *
  17. * The BSD 2-Clause License
  18. *
  19. * Redistribution and use in source and binary forms, with or
  20. * without modification, are permitted provided that the following
  21. * conditions are met:
  22. *
  23. * - Redistributions of source code must retain the above
  24. * copyright notice, this list of conditions and the following
  25. * disclaimer.
  26. *
  27. * - Redistributions in binary form must reproduce the above
  28. * copyright notice, this list of conditions and the following
  29. * disclaimer in the documentation and/or other materials
  30. * provided with the distribution.
  31. *
  32. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  33. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  34. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  35. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  36. * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
  37. * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  38. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  39. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  40. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  41. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  42. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  43. * OF THE POSSIBILITY OF SUCH DAMAGE.
  44. */
  45. #ifndef __PVRDMA_H__
  46. #define __PVRDMA_H__
  47. #include <linux/compiler.h>
  48. #include <linux/interrupt.h>
  49. #include <linux/list.h>
  50. #include <linux/mutex.h>
  51. #include <linux/pci.h>
  52. #include <linux/semaphore.h>
  53. #include <linux/workqueue.h>
  54. #include <rdma/ib_umem.h>
  55. #include <rdma/ib_verbs.h>
  56. #include <rdma/vmw_pvrdma-abi.h>
  57. #include "pvrdma_ring.h"
  58. #include "pvrdma_dev_api.h"
  59. #include "pvrdma_verbs.h"
  60. /* NOT the same as BIT_MASK(). */
  61. #define PVRDMA_MASK(n) ((n << 1) - 1)
  62. /*
  63. * VMware PVRDMA PCI device id.
  64. */
  65. #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
  66. #define PVRDMA_NUM_RING_PAGES 4
  67. #define PVRDMA_QP_NUM_HEADER_PAGES 1
  68. struct pvrdma_dev;
  69. struct pvrdma_page_dir {
  70. dma_addr_t dir_dma;
  71. u64 *dir;
  72. int ntables;
  73. u64 **tables;
  74. u64 npages;
  75. void **pages;
  76. };
  77. struct pvrdma_cq {
  78. struct ib_cq ibcq;
  79. int offset;
  80. spinlock_t cq_lock; /* Poll lock. */
  81. struct pvrdma_uar_map *uar;
  82. struct ib_umem *umem;
  83. struct pvrdma_ring_state *ring_state;
  84. struct pvrdma_page_dir pdir;
  85. u32 cq_handle;
  86. bool is_kernel;
  87. refcount_t refcnt;
  88. struct completion free;
  89. };
  90. struct pvrdma_id_table {
  91. u32 last;
  92. u32 top;
  93. u32 max;
  94. u32 mask;
  95. spinlock_t lock; /* Table lock. */
  96. unsigned long *table;
  97. };
  98. struct pvrdma_uar_map {
  99. unsigned long pfn;
  100. void __iomem *map;
  101. int index;
  102. };
  103. struct pvrdma_uar_table {
  104. struct pvrdma_id_table tbl;
  105. int size;
  106. };
  107. struct pvrdma_ucontext {
  108. struct ib_ucontext ibucontext;
  109. struct pvrdma_dev *dev;
  110. struct pvrdma_uar_map uar;
  111. u64 ctx_handle;
  112. };
  113. struct pvrdma_pd {
  114. struct ib_pd ibpd;
  115. u32 pdn;
  116. u32 pd_handle;
  117. int privileged;
  118. };
  119. struct pvrdma_mr {
  120. u32 mr_handle;
  121. u64 iova;
  122. u64 size;
  123. };
  124. struct pvrdma_user_mr {
  125. struct ib_mr ibmr;
  126. struct ib_umem *umem;
  127. struct pvrdma_mr mmr;
  128. struct pvrdma_page_dir pdir;
  129. u64 *pages;
  130. u32 npages;
  131. u32 max_pages;
  132. u32 page_shift;
  133. };
  134. struct pvrdma_wq {
  135. struct pvrdma_ring *ring;
  136. spinlock_t lock; /* Work queue lock. */
  137. int wqe_cnt;
  138. int wqe_size;
  139. int max_sg;
  140. int offset;
  141. };
  142. struct pvrdma_ah {
  143. struct ib_ah ibah;
  144. struct pvrdma_av av;
  145. };
  146. struct pvrdma_srq {
  147. struct ib_srq ibsrq;
  148. int offset;
  149. spinlock_t lock; /* SRQ lock. */
  150. int wqe_cnt;
  151. int wqe_size;
  152. int max_gs;
  153. struct ib_umem *umem;
  154. struct pvrdma_ring_state *ring;
  155. struct pvrdma_page_dir pdir;
  156. u32 srq_handle;
  157. int npages;
  158. refcount_t refcnt;
  159. struct completion free;
  160. };
  161. struct pvrdma_qp {
  162. struct ib_qp ibqp;
  163. u32 qp_handle;
  164. u32 qkey;
  165. struct pvrdma_wq sq;
  166. struct pvrdma_wq rq;
  167. struct ib_umem *rumem;
  168. struct ib_umem *sumem;
  169. struct pvrdma_page_dir pdir;
  170. struct pvrdma_srq *srq;
  171. int npages;
  172. int npages_send;
  173. int npages_recv;
  174. u32 flags;
  175. u8 port;
  176. u8 state;
  177. bool is_kernel;
  178. struct mutex mutex; /* QP state mutex. */
  179. refcount_t refcnt;
  180. struct completion free;
  181. };
  182. struct pvrdma_dev {
  183. /* PCI device-related information. */
  184. struct ib_device ib_dev;
  185. struct pci_dev *pdev;
  186. void __iomem *regs;
  187. struct pvrdma_device_shared_region *dsr; /* Shared region pointer */
  188. dma_addr_t dsrbase; /* Shared region base address */
  189. void *cmd_slot;
  190. void *resp_slot;
  191. unsigned long flags;
  192. struct list_head device_link;
  193. unsigned int dsr_version;
  194. /* Locking and interrupt information. */
  195. spinlock_t cmd_lock; /* Command lock. */
  196. struct semaphore cmd_sema;
  197. struct completion cmd_done;
  198. unsigned int nr_vectors;
  199. /* RDMA-related device information. */
  200. union ib_gid *sgid_tbl;
  201. struct pvrdma_ring_state *async_ring_state;
  202. struct pvrdma_page_dir async_pdir;
  203. struct pvrdma_ring_state *cq_ring_state;
  204. struct pvrdma_page_dir cq_pdir;
  205. struct pvrdma_cq **cq_tbl;
  206. spinlock_t cq_tbl_lock;
  207. struct pvrdma_srq **srq_tbl;
  208. spinlock_t srq_tbl_lock;
  209. struct pvrdma_qp **qp_tbl;
  210. spinlock_t qp_tbl_lock;
  211. struct pvrdma_uar_table uar_table;
  212. struct pvrdma_uar_map driver_uar;
  213. __be64 sys_image_guid;
  214. spinlock_t desc_lock; /* Device modification lock. */
  215. u32 port_cap_mask;
  216. struct mutex port_mutex; /* Port modification mutex. */
  217. bool ib_active;
  218. atomic_t num_qps;
  219. atomic_t num_cqs;
  220. atomic_t num_srqs;
  221. atomic_t num_pds;
  222. atomic_t num_ahs;
  223. /* Network device information. */
  224. struct net_device *netdev;
  225. struct notifier_block nb_netdev;
  226. };
  227. struct pvrdma_netdevice_work {
  228. struct work_struct work;
  229. struct net_device *event_netdev;
  230. unsigned long event;
  231. };
  232. static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev)
  233. {
  234. return container_of(ibdev, struct pvrdma_dev, ib_dev);
  235. }
  236. static inline struct
  237. pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext)
  238. {
  239. return container_of(ibucontext, struct pvrdma_ucontext, ibucontext);
  240. }
  241. static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd)
  242. {
  243. return container_of(ibpd, struct pvrdma_pd, ibpd);
  244. }
  245. static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
  246. {
  247. return container_of(ibcq, struct pvrdma_cq, ibcq);
  248. }
  249. static inline struct pvrdma_srq *to_vsrq(struct ib_srq *ibsrq)
  250. {
  251. return container_of(ibsrq, struct pvrdma_srq, ibsrq);
  252. }
  253. static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
  254. {
  255. return container_of(ibmr, struct pvrdma_user_mr, ibmr);
  256. }
  257. static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp)
  258. {
  259. return container_of(ibqp, struct pvrdma_qp, ibqp);
  260. }
  261. static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah)
  262. {
  263. return container_of(ibah, struct pvrdma_ah, ibah);
  264. }
  265. static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val)
  266. {
  267. writel(cpu_to_le32(val), dev->regs + reg);
  268. }
  269. static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg)
  270. {
  271. return le32_to_cpu(readl(dev->regs + reg));
  272. }
  273. static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val)
  274. {
  275. writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
  276. }
  277. static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val)
  278. {
  279. writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
  280. }
  281. static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir,
  282. u64 offset)
  283. {
  284. return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
  285. }
  286. static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu)
  287. {
  288. return (enum pvrdma_mtu)mtu;
  289. }
  290. static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu)
  291. {
  292. return (enum ib_mtu)mtu;
  293. }
  294. static inline enum pvrdma_port_state ib_port_state_to_pvrdma(
  295. enum ib_port_state state)
  296. {
  297. return (enum pvrdma_port_state)state;
  298. }
  299. static inline enum ib_port_state pvrdma_port_state_to_ib(
  300. enum pvrdma_port_state state)
  301. {
  302. return (enum ib_port_state)state;
  303. }
  304. static inline int pvrdma_port_cap_flags_to_ib(int flags)
  305. {
  306. return flags;
  307. }
  308. static inline enum pvrdma_port_width ib_port_width_to_pvrdma(
  309. enum ib_port_width width)
  310. {
  311. return (enum pvrdma_port_width)width;
  312. }
  313. static inline enum ib_port_width pvrdma_port_width_to_ib(
  314. enum pvrdma_port_width width)
  315. {
  316. return (enum ib_port_width)width;
  317. }
  318. static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma(
  319. enum ib_port_speed speed)
  320. {
  321. return (enum pvrdma_port_speed)speed;
  322. }
  323. static inline enum ib_port_speed pvrdma_port_speed_to_ib(
  324. enum pvrdma_port_speed speed)
  325. {
  326. return (enum ib_port_speed)speed;
  327. }
  328. static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
  329. {
  330. return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
  331. }
  332. static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma(
  333. enum ib_mig_state state)
  334. {
  335. return (enum pvrdma_mig_state)state;
  336. }
  337. static inline enum ib_mig_state pvrdma_mig_state_to_ib(
  338. enum pvrdma_mig_state state)
  339. {
  340. return (enum ib_mig_state)state;
  341. }
  342. static inline int ib_access_flags_to_pvrdma(int flags)
  343. {
  344. return flags;
  345. }
  346. static inline int pvrdma_access_flags_to_ib(int flags)
  347. {
  348. return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX);
  349. }
  350. static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
  351. {
  352. return (enum pvrdma_qp_type)type;
  353. }
  354. static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
  355. {
  356. return (enum pvrdma_qp_state)state;
  357. }
  358. static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
  359. {
  360. return (enum ib_qp_state)state;
  361. }
  362. static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
  363. {
  364. switch (op) {
  365. case IB_WR_RDMA_WRITE:
  366. return PVRDMA_WR_RDMA_WRITE;
  367. case IB_WR_RDMA_WRITE_WITH_IMM:
  368. return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
  369. case IB_WR_SEND:
  370. return PVRDMA_WR_SEND;
  371. case IB_WR_SEND_WITH_IMM:
  372. return PVRDMA_WR_SEND_WITH_IMM;
  373. case IB_WR_RDMA_READ:
  374. return PVRDMA_WR_RDMA_READ;
  375. case IB_WR_ATOMIC_CMP_AND_SWP:
  376. return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
  377. case IB_WR_ATOMIC_FETCH_AND_ADD:
  378. return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
  379. case IB_WR_LSO:
  380. return PVRDMA_WR_LSO;
  381. case IB_WR_SEND_WITH_INV:
  382. return PVRDMA_WR_SEND_WITH_INV;
  383. case IB_WR_RDMA_READ_WITH_INV:
  384. return PVRDMA_WR_RDMA_READ_WITH_INV;
  385. case IB_WR_LOCAL_INV:
  386. return PVRDMA_WR_LOCAL_INV;
  387. case IB_WR_REG_MR:
  388. return PVRDMA_WR_FAST_REG_MR;
  389. case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
  390. return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
  391. case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
  392. return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
  393. case IB_WR_REG_MR_INTEGRITY:
  394. return PVRDMA_WR_REG_SIG_MR;
  395. default:
  396. return PVRDMA_WR_ERROR;
  397. }
  398. }
  399. static inline enum ib_wc_status pvrdma_wc_status_to_ib(
  400. enum pvrdma_wc_status status)
  401. {
  402. return (enum ib_wc_status)status;
  403. }
  404. static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
  405. {
  406. switch (opcode) {
  407. case PVRDMA_WC_SEND:
  408. return IB_WC_SEND;
  409. case PVRDMA_WC_RDMA_WRITE:
  410. return IB_WC_RDMA_WRITE;
  411. case PVRDMA_WC_RDMA_READ:
  412. return IB_WC_RDMA_READ;
  413. case PVRDMA_WC_COMP_SWAP:
  414. return IB_WC_COMP_SWAP;
  415. case PVRDMA_WC_FETCH_ADD:
  416. return IB_WC_FETCH_ADD;
  417. case PVRDMA_WC_LOCAL_INV:
  418. return IB_WC_LOCAL_INV;
  419. case PVRDMA_WC_FAST_REG_MR:
  420. return IB_WC_REG_MR;
  421. case PVRDMA_WC_MASKED_COMP_SWAP:
  422. return IB_WC_MASKED_COMP_SWAP;
  423. case PVRDMA_WC_MASKED_FETCH_ADD:
  424. return IB_WC_MASKED_FETCH_ADD;
  425. case PVRDMA_WC_RECV:
  426. return IB_WC_RECV;
  427. case PVRDMA_WC_RECV_RDMA_WITH_IMM:
  428. return IB_WC_RECV_RDMA_WITH_IMM;
  429. default:
  430. return IB_WC_SEND;
  431. }
  432. }
  433. static inline int pvrdma_wc_flags_to_ib(int flags)
  434. {
  435. return flags;
  436. }
  437. static inline int ib_send_flags_to_pvrdma(int flags)
  438. {
  439. return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
  440. }
  441. static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
  442. {
  443. switch (type) {
  444. case PVRDMA_NETWORK_ROCE_V1:
  445. return RDMA_NETWORK_ROCE_V1;
  446. case PVRDMA_NETWORK_IPV4:
  447. return RDMA_NETWORK_IPV4;
  448. case PVRDMA_NETWORK_IPV6:
  449. return RDMA_NETWORK_IPV6;
  450. default:
  451. return RDMA_NETWORK_IPV6;
  452. }
  453. }
  454. void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
  455. const struct pvrdma_qp_cap *src);
  456. void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
  457. const struct ib_qp_cap *src);
  458. void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src);
  459. void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src);
  460. void pvrdma_global_route_to_ib(struct ib_global_route *dst,
  461. const struct pvrdma_global_route *src);
  462. void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
  463. const struct ib_global_route *src);
  464. void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
  465. const struct pvrdma_ah_attr *src);
  466. void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
  467. const struct rdma_ah_attr *src);
  468. u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type);
  469. int pvrdma_uar_table_init(struct pvrdma_dev *dev);
  470. void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
  471. int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
  472. void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
  473. void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
  474. int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
  475. u64 npages, bool alloc_pages);
  476. void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
  477. struct pvrdma_page_dir *pdir);
  478. int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
  479. dma_addr_t daddr);
  480. int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
  481. struct ib_umem *umem, u64 offset);
  482. dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx);
  483. int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
  484. u64 *page_list, int num_pages);
  485. int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
  486. union pvrdma_cmd_resp *rsp, unsigned resp_code);
  487. #endif /* __PVRDMA_H__ */