rtrs.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * RDMA Transport Layer
  4. *
  5. * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
  6. * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
  7. * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
  8. */
  9. #undef pr_fmt
  10. #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
  11. #include <linux/module.h>
  12. #include <linux/inet.h>
  13. #include "rtrs-pri.h"
  14. #include "rtrs-log.h"
  15. MODULE_DESCRIPTION("RDMA Transport Core");
  16. MODULE_LICENSE("GPL");
  17. struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
  18. struct ib_device *dma_dev,
  19. enum dma_data_direction dir,
  20. void (*done)(struct ib_cq *cq, struct ib_wc *wc))
  21. {
  22. struct rtrs_iu *ius, *iu;
  23. int i;
  24. ius = kcalloc(iu_num, sizeof(*ius), gfp_mask);
  25. if (!ius)
  26. return NULL;
  27. for (i = 0; i < iu_num; i++) {
  28. iu = &ius[i];
  29. iu->direction = dir;
  30. iu->buf = kzalloc(size, gfp_mask);
  31. if (!iu->buf)
  32. goto err;
  33. iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
  34. if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) {
  35. kfree(iu->buf);
  36. goto err;
  37. }
  38. iu->cqe.done = done;
  39. iu->size = size;
  40. }
  41. return ius;
  42. err:
  43. rtrs_iu_free(ius, dma_dev, i);
  44. return NULL;
  45. }
  46. EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
  47. void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_num)
  48. {
  49. struct rtrs_iu *iu;
  50. int i;
  51. if (!ius)
  52. return;
  53. for (i = 0; i < queue_num; i++) {
  54. iu = &ius[i];
  55. ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, iu->direction);
  56. kfree(iu->buf);
  57. }
  58. kfree(ius);
  59. }
  60. EXPORT_SYMBOL_GPL(rtrs_iu_free);
  61. int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
  62. {
  63. struct rtrs_path *path = con->path;
  64. struct ib_recv_wr wr;
  65. struct ib_sge list;
  66. list.addr = iu->dma_addr;
  67. list.length = iu->size;
  68. list.lkey = path->dev->ib_pd->local_dma_lkey;
  69. if (list.length == 0) {
  70. rtrs_wrn(con->path,
  71. "Posting receive work request failed, sg list is empty\n");
  72. return -EINVAL;
  73. }
  74. wr = (struct ib_recv_wr) {
  75. .wr_cqe = &iu->cqe,
  76. .sg_list = &list,
  77. .num_sge = 1,
  78. };
  79. return ib_post_recv(con->qp, &wr, NULL);
  80. }
  81. EXPORT_SYMBOL_GPL(rtrs_iu_post_recv);
  82. int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
  83. {
  84. struct ib_recv_wr wr;
  85. wr = (struct ib_recv_wr) {
  86. .wr_cqe = cqe,
  87. };
  88. return ib_post_recv(con->qp, &wr, NULL);
  89. }
  90. EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
  91. static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
  92. struct ib_send_wr *wr, struct ib_send_wr *tail)
  93. {
  94. if (head) {
  95. struct ib_send_wr *next = head;
  96. while (next->next)
  97. next = next->next;
  98. next->next = wr;
  99. } else {
  100. head = wr;
  101. }
  102. if (tail)
  103. wr->next = tail;
  104. return ib_post_send(qp, head, NULL);
  105. }
  106. int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
  107. struct ib_send_wr *head)
  108. {
  109. struct rtrs_path *path = con->path;
  110. struct ib_send_wr wr;
  111. struct ib_sge list;
  112. if (WARN_ON(size == 0))
  113. return -EINVAL;
  114. list.addr = iu->dma_addr;
  115. list.length = size;
  116. list.lkey = path->dev->ib_pd->local_dma_lkey;
  117. wr = (struct ib_send_wr) {
  118. .wr_cqe = &iu->cqe,
  119. .sg_list = &list,
  120. .num_sge = 1,
  121. .opcode = IB_WR_SEND,
  122. .send_flags = IB_SEND_SIGNALED,
  123. };
  124. return rtrs_post_send(con->qp, head, &wr, NULL);
  125. }
  126. EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
  127. int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
  128. struct ib_sge *sge, unsigned int num_sge,
  129. u32 rkey, u64 rdma_addr, u32 imm_data,
  130. enum ib_send_flags flags,
  131. struct ib_send_wr *head,
  132. struct ib_send_wr *tail)
  133. {
  134. struct ib_rdma_wr wr;
  135. int i;
  136. wr = (struct ib_rdma_wr) {
  137. .wr.wr_cqe = &iu->cqe,
  138. .wr.sg_list = sge,
  139. .wr.num_sge = num_sge,
  140. .rkey = rkey,
  141. .remote_addr = rdma_addr,
  142. .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
  143. .wr.ex.imm_data = cpu_to_be32(imm_data),
  144. .wr.send_flags = flags,
  145. };
  146. /*
  147. * If one of the sges has 0 size, the operation will fail with a
  148. * length error
  149. */
  150. for (i = 0; i < num_sge; i++)
  151. if (WARN_ONCE(sge[i].length == 0, "sg %d is zero length\n", i))
  152. return -EINVAL;
  153. return rtrs_post_send(con->qp, head, &wr.wr, tail);
  154. }
  155. EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
  156. static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con,
  157. struct ib_cqe *cqe,
  158. u32 imm_data,
  159. struct ib_send_wr *head)
  160. {
  161. struct ib_rdma_wr wr;
  162. struct rtrs_path *path = con->path;
  163. enum ib_send_flags sflags;
  164. atomic_dec_if_positive(&con->sq_wr_avail);
  165. sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ?
  166. 0 : IB_SEND_SIGNALED;
  167. wr = (struct ib_rdma_wr) {
  168. .wr.wr_cqe = cqe,
  169. .wr.send_flags = sflags,
  170. .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
  171. .wr.ex.imm_data = cpu_to_be32(imm_data),
  172. };
  173. return rtrs_post_send(con->qp, head, &wr.wr, NULL);
  174. }
  175. static void qp_event_handler(struct ib_event *ev, void *ctx)
  176. {
  177. struct rtrs_con *con = ctx;
  178. switch (ev->event) {
  179. case IB_EVENT_COMM_EST:
  180. rtrs_info(con->path, "QP event %s (%d) received\n",
  181. ib_event_msg(ev->event), ev->event);
  182. rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
  183. break;
  184. default:
  185. rtrs_info(con->path, "Unhandled QP event %s (%d) received\n",
  186. ib_event_msg(ev->event), ev->event);
  187. break;
  188. }
  189. }
  190. static bool is_pollqueue(struct rtrs_con *con)
  191. {
  192. return con->cid >= con->path->irq_con_num;
  193. }
  194. static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
  195. enum ib_poll_context poll_ctx)
  196. {
  197. struct rdma_cm_id *cm_id = con->cm_id;
  198. struct ib_cq *cq;
  199. if (is_pollqueue(con))
  200. cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector,
  201. poll_ctx);
  202. else
  203. cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
  204. if (IS_ERR(cq)) {
  205. rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n",
  206. PTR_ERR(cq));
  207. return PTR_ERR(cq);
  208. }
  209. con->cq = cq;
  210. con->nr_cqe = nr_cqe;
  211. return 0;
  212. }
  213. static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
  214. u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
  215. {
  216. struct ib_qp_init_attr init_attr = {NULL};
  217. struct rdma_cm_id *cm_id = con->cm_id;
  218. int ret;
  219. init_attr.cap.max_send_wr = max_send_wr;
  220. init_attr.cap.max_recv_wr = max_recv_wr;
  221. init_attr.cap.max_recv_sge = 1;
  222. init_attr.event_handler = qp_event_handler;
  223. init_attr.qp_context = con;
  224. init_attr.cap.max_send_sge = max_sge;
  225. init_attr.qp_type = IB_QPT_RC;
  226. init_attr.send_cq = con->cq;
  227. init_attr.recv_cq = con->cq;
  228. init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  229. ret = rdma_create_qp(cm_id, pd, &init_attr);
  230. if (ret) {
  231. rtrs_err(con->path, "Creating QP failed, err: %d\n", ret);
  232. return ret;
  233. }
  234. con->qp = cm_id->qp;
  235. return ret;
  236. }
  237. static void destroy_cq(struct rtrs_con *con)
  238. {
  239. if (con->cq) {
  240. if (is_pollqueue(con))
  241. ib_free_cq(con->cq);
  242. else
  243. ib_cq_pool_put(con->cq, con->nr_cqe);
  244. }
  245. con->cq = NULL;
  246. }
  247. int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
  248. u32 max_send_sge, int cq_vector, int nr_cqe,
  249. u32 max_send_wr, u32 max_recv_wr,
  250. enum ib_poll_context poll_ctx)
  251. {
  252. int err;
  253. err = create_cq(con, cq_vector, nr_cqe, poll_ctx);
  254. if (err)
  255. return err;
  256. err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr,
  257. max_send_sge);
  258. if (err) {
  259. destroy_cq(con);
  260. return err;
  261. }
  262. con->path = path;
  263. return 0;
  264. }
  265. EXPORT_SYMBOL_GPL(rtrs_cq_qp_create);
  266. void rtrs_cq_qp_destroy(struct rtrs_con *con)
  267. {
  268. if (con->qp) {
  269. rdma_destroy_qp(con->cm_id);
  270. con->qp = NULL;
  271. }
  272. destroy_cq(con);
  273. }
  274. EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
  275. static void schedule_hb(struct rtrs_path *path)
  276. {
  277. queue_delayed_work(path->hb_wq, &path->hb_dwork,
  278. msecs_to_jiffies(path->hb_interval_ms));
  279. }
  280. void rtrs_send_hb_ack(struct rtrs_path *path)
  281. {
  282. struct rtrs_con *usr_con = path->con[0];
  283. u32 imm;
  284. int err;
  285. imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
  286. err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
  287. NULL);
  288. if (err) {
  289. rtrs_err(path, "send HB ACK failed, errno: %d\n", err);
  290. path->hb_err_handler(usr_con);
  291. return;
  292. }
  293. }
  294. EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
  295. static void hb_work(struct work_struct *work)
  296. {
  297. struct rtrs_con *usr_con;
  298. struct rtrs_path *path;
  299. u32 imm;
  300. int err;
  301. path = container_of(to_delayed_work(work), typeof(*path), hb_dwork);
  302. usr_con = path->con[0];
  303. if (path->hb_missed_cnt > path->hb_missed_max) {
  304. rtrs_err(path, "HB missed max reached.\n");
  305. path->hb_err_handler(usr_con);
  306. return;
  307. }
  308. if (path->hb_missed_cnt++) {
  309. /* Reschedule work without sending hb */
  310. schedule_hb(path);
  311. return;
  312. }
  313. path->hb_last_sent = ktime_get();
  314. imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
  315. err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
  316. NULL);
  317. if (err) {
  318. rtrs_err(path, "HB send failed, errno: %d\n", err);
  319. path->hb_err_handler(usr_con);
  320. return;
  321. }
  322. schedule_hb(path);
  323. }
  324. void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
  325. unsigned int interval_ms, unsigned int missed_max,
  326. void (*err_handler)(struct rtrs_con *con),
  327. struct workqueue_struct *wq)
  328. {
  329. path->hb_cqe = cqe;
  330. path->hb_interval_ms = interval_ms;
  331. path->hb_err_handler = err_handler;
  332. path->hb_wq = wq;
  333. path->hb_missed_max = missed_max;
  334. path->hb_missed_cnt = 0;
  335. INIT_DELAYED_WORK(&path->hb_dwork, hb_work);
  336. }
  337. EXPORT_SYMBOL_GPL(rtrs_init_hb);
  338. void rtrs_start_hb(struct rtrs_path *path)
  339. {
  340. schedule_hb(path);
  341. }
  342. EXPORT_SYMBOL_GPL(rtrs_start_hb);
  343. void rtrs_stop_hb(struct rtrs_path *path)
  344. {
  345. cancel_delayed_work_sync(&path->hb_dwork);
  346. path->hb_missed_cnt = 0;
  347. }
  348. EXPORT_SYMBOL_GPL(rtrs_stop_hb);
  349. static int rtrs_str_gid_to_sockaddr(const char *addr, size_t len,
  350. short port, struct sockaddr_storage *dst)
  351. {
  352. struct sockaddr_ib *dst_ib = (struct sockaddr_ib *)dst;
  353. int ret;
  354. /*
  355. * We can use some of the IPv6 functions since GID is a valid
  356. * IPv6 address format
  357. */
  358. ret = in6_pton(addr, len, dst_ib->sib_addr.sib_raw, '\0', NULL);
  359. if (ret == 0)
  360. return -EINVAL;
  361. dst_ib->sib_family = AF_IB;
  362. /*
  363. * Use the same TCP server port number as the IB service ID
  364. * on the IB port space range
  365. */
  366. dst_ib->sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port);
  367. dst_ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
  368. dst_ib->sib_pkey = cpu_to_be16(0xffff);
  369. return 0;
  370. }
  371. /**
  372. * rtrs_str_to_sockaddr() - Convert rtrs address string to sockaddr
  373. * @addr: String representation of an addr (IPv4, IPv6 or IB GID):
  374. * - "ip:192.168.1.1"
  375. * - "ip:fe80::200:5aee:feaa:20a2"
  376. * - "gid:fe80::200:5aee:feaa:20a2"
  377. * @len: String address length
  378. * @port: Destination port
  379. * @dst: Destination sockaddr structure
  380. *
  381. * Returns 0 if conversion successful. Non-zero on error.
  382. */
  383. static int rtrs_str_to_sockaddr(const char *addr, size_t len,
  384. u16 port, struct sockaddr_storage *dst)
  385. {
  386. if (strncmp(addr, "gid:", 4) == 0) {
  387. return rtrs_str_gid_to_sockaddr(addr + 4, len - 4, port, dst);
  388. } else if (strncmp(addr, "ip:", 3) == 0) {
  389. char port_str[8];
  390. char *cpy;
  391. int err;
  392. snprintf(port_str, sizeof(port_str), "%u", port);
  393. cpy = kstrndup(addr + 3, len - 3, GFP_KERNEL);
  394. err = cpy ? inet_pton_with_scope(&init_net, AF_UNSPEC,
  395. cpy, port_str, dst) : -ENOMEM;
  396. kfree(cpy);
  397. return err;
  398. }
  399. return -EPROTONOSUPPORT;
  400. }
  401. /**
  402. * sockaddr_to_str() - convert sockaddr to a string.
  403. * @addr: the sockadddr structure to be converted.
  404. * @buf: string containing socket addr.
  405. * @len: string length.
  406. *
  407. * The return value is the number of characters written into buf not
  408. * including the trailing '\0'. If len is == 0 the function returns 0..
  409. */
  410. int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
  411. {
  412. switch (addr->sa_family) {
  413. case AF_IB:
  414. return scnprintf(buf, len, "gid:%pI6",
  415. &((struct sockaddr_ib *)addr)->sib_addr.sib_raw);
  416. case AF_INET:
  417. return scnprintf(buf, len, "ip:%pI4",
  418. &((struct sockaddr_in *)addr)->sin_addr);
  419. case AF_INET6:
  420. return scnprintf(buf, len, "ip:%pI6c",
  421. &((struct sockaddr_in6 *)addr)->sin6_addr);
  422. }
  423. return scnprintf(buf, len, "<invalid address family>");
  424. }
  425. EXPORT_SYMBOL(sockaddr_to_str);
  426. /**
  427. * rtrs_addr_to_str() - convert rtrs_addr to a string "src@dst"
  428. * @addr: the rtrs_addr structure to be converted
  429. * @buf: string containing source and destination addr of a path
  430. * separated by '@' I.e. "ip:1.1.1.1@ip:1.1.1.2"
  431. * "ip:1.1.1.1@ip:1.1.1.2".
  432. * @len: string length
  433. *
  434. * The return value is the number of characters written into buf not
  435. * including the trailing '\0'.
  436. */
  437. int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len)
  438. {
  439. int cnt;
  440. cnt = sockaddr_to_str((struct sockaddr *)addr->src,
  441. buf, len);
  442. cnt += scnprintf(buf + cnt, len - cnt, "@");
  443. sockaddr_to_str((struct sockaddr *)addr->dst,
  444. buf + cnt, len - cnt);
  445. return cnt;
  446. }
  447. EXPORT_SYMBOL(rtrs_addr_to_str);
  448. /**
  449. * rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
  450. * to sockaddreses
  451. * @str: string containing source and destination addr of a path
  452. * separated by ',' or '@' I.e. "ip:1.1.1.1,ip:1.1.1.2" or
  453. * "ip:1.1.1.1@ip:1.1.1.2". If str contains only one address it's
  454. * considered to be destination.
  455. * @len: string length
  456. * @port: Destination port number.
  457. * @addr: will be set to the source/destination address or to NULL
  458. * if str doesn't contain any source address.
  459. *
  460. * Returns zero if conversion successful. Non-zero otherwise.
  461. */
  462. int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
  463. struct rtrs_addr *addr)
  464. {
  465. const char *d;
  466. d = strchr(str, ',');
  467. if (!d)
  468. d = strchr(str, '@');
  469. if (d) {
  470. if (rtrs_str_to_sockaddr(str, d - str, 0, addr->src))
  471. return -EINVAL;
  472. d += 1;
  473. len -= d - str;
  474. str = d;
  475. } else {
  476. addr->src = NULL;
  477. }
  478. return rtrs_str_to_sockaddr(str, len, port, addr->dst);
  479. }
  480. EXPORT_SYMBOL(rtrs_addr_to_sockaddr);
  481. void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
  482. struct rtrs_rdma_dev_pd *pool)
  483. {
  484. WARN_ON(pool->ops && (!pool->ops->alloc ^ !pool->ops->free));
  485. INIT_LIST_HEAD(&pool->list);
  486. mutex_init(&pool->mutex);
  487. pool->pd_flags = pd_flags;
  488. }
  489. EXPORT_SYMBOL(rtrs_rdma_dev_pd_init);
  490. void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool)
  491. {
  492. mutex_destroy(&pool->mutex);
  493. WARN_ON(!list_empty(&pool->list));
  494. }
  495. EXPORT_SYMBOL(rtrs_rdma_dev_pd_deinit);
  496. static void dev_free(struct kref *ref)
  497. {
  498. struct rtrs_rdma_dev_pd *pool;
  499. struct rtrs_ib_dev *dev;
  500. dev = container_of(ref, typeof(*dev), ref);
  501. pool = dev->pool;
  502. mutex_lock(&pool->mutex);
  503. list_del(&dev->entry);
  504. mutex_unlock(&pool->mutex);
  505. if (pool->ops && pool->ops->deinit)
  506. pool->ops->deinit(dev);
  507. ib_dealloc_pd(dev->ib_pd);
  508. if (pool->ops && pool->ops->free)
  509. pool->ops->free(dev);
  510. else
  511. kfree(dev);
  512. }
  513. int rtrs_ib_dev_put(struct rtrs_ib_dev *dev)
  514. {
  515. return kref_put(&dev->ref, dev_free);
  516. }
  517. EXPORT_SYMBOL(rtrs_ib_dev_put);
  518. static int rtrs_ib_dev_get(struct rtrs_ib_dev *dev)
  519. {
  520. return kref_get_unless_zero(&dev->ref);
  521. }
  522. struct rtrs_ib_dev *
  523. rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
  524. struct rtrs_rdma_dev_pd *pool)
  525. {
  526. struct rtrs_ib_dev *dev;
  527. mutex_lock(&pool->mutex);
  528. list_for_each_entry(dev, &pool->list, entry) {
  529. if (dev->ib_dev->node_guid == ib_dev->node_guid &&
  530. rtrs_ib_dev_get(dev))
  531. goto out_unlock;
  532. }
  533. mutex_unlock(&pool->mutex);
  534. if (pool->ops && pool->ops->alloc)
  535. dev = pool->ops->alloc();
  536. else
  537. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  538. if (IS_ERR_OR_NULL(dev))
  539. goto out_err;
  540. kref_init(&dev->ref);
  541. dev->pool = pool;
  542. dev->ib_dev = ib_dev;
  543. dev->ib_pd = ib_alloc_pd(ib_dev, pool->pd_flags);
  544. if (IS_ERR(dev->ib_pd))
  545. goto out_free_dev;
  546. if (pool->ops && pool->ops->init && pool->ops->init(dev))
  547. goto out_free_pd;
  548. mutex_lock(&pool->mutex);
  549. list_add(&dev->entry, &pool->list);
  550. out_unlock:
  551. mutex_unlock(&pool->mutex);
  552. return dev;
  553. out_free_pd:
  554. ib_dealloc_pd(dev->ib_pd);
  555. out_free_dev:
  556. if (pool->ops && pool->ops->free)
  557. pool->ops->free(dev);
  558. else
  559. kfree(dev);
  560. out_err:
  561. return NULL;
  562. }
  563. EXPORT_SYMBOL(rtrs_ib_dev_find_or_add);