trans_rdma.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * RDMA transport layer based on the trans_fd.c implementation.
  4. *
  5. * Copyright (C) 2008 by Tom Tucker <[email protected]>
  6. * Copyright (C) 2006 by Russ Cox <[email protected]>
  7. * Copyright (C) 2004-2005 by Latchesar Ionkov <[email protected]>
  8. * Copyright (C) 2004-2008 by Eric Van Hensbergen <[email protected]>
  9. * Copyright (C) 1997-2002 by Ron Minnich <[email protected]>
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/in.h>
  13. #include <linux/module.h>
  14. #include <linux/net.h>
  15. #include <linux/ipv6.h>
  16. #include <linux/kthread.h>
  17. #include <linux/errno.h>
  18. #include <linux/kernel.h>
  19. #include <linux/un.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/inet.h>
  22. #include <linux/idr.h>
  23. #include <linux/file.h>
  24. #include <linux/parser.h>
  25. #include <linux/semaphore.h>
  26. #include <linux/slab.h>
  27. #include <linux/seq_file.h>
  28. #include <net/9p/9p.h>
  29. #include <net/9p/client.h>
  30. #include <net/9p/transport.h>
  31. #include <rdma/ib_verbs.h>
  32. #include <rdma/rdma_cm.h>
  33. #define P9_PORT 5640
  34. #define P9_RDMA_SQ_DEPTH 32
  35. #define P9_RDMA_RQ_DEPTH 32
  36. #define P9_RDMA_SEND_SGE 4
  37. #define P9_RDMA_RECV_SGE 4
  38. #define P9_RDMA_IRD 0
  39. #define P9_RDMA_ORD 0
  40. #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
  41. #define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */
  42. /**
  43. * struct p9_trans_rdma - RDMA transport instance
  44. *
  45. * @state: tracks the transport state machine for connection setup and tear down
  46. * @cm_id: The RDMA CM ID
  47. * @pd: Protection Domain pointer
  48. * @qp: Queue Pair pointer
  49. * @cq: Completion Queue pointer
  50. * @timeout: Number of uSecs to wait for connection management events
  51. * @privport: Whether a privileged port may be used
  52. * @port: The port to use
  53. * @sq_depth: The depth of the Send Queue
  54. * @sq_sem: Semaphore for the SQ
  55. * @rq_depth: The depth of the Receive Queue.
  56. * @rq_sem: Semaphore for the RQ
  57. * @excess_rc : Amount of posted Receive Contexts without a pending request.
  58. * See rdma_request()
  59. * @addr: The remote peer's address
  60. * @req_lock: Protects the active request list
  61. * @cm_done: Completion event for connection management tracking
  62. */
  63. struct p9_trans_rdma {
  64. enum {
  65. P9_RDMA_INIT,
  66. P9_RDMA_ADDR_RESOLVED,
  67. P9_RDMA_ROUTE_RESOLVED,
  68. P9_RDMA_CONNECTED,
  69. P9_RDMA_FLUSHING,
  70. P9_RDMA_CLOSING,
  71. P9_RDMA_CLOSED,
  72. } state;
  73. struct rdma_cm_id *cm_id;
  74. struct ib_pd *pd;
  75. struct ib_qp *qp;
  76. struct ib_cq *cq;
  77. long timeout;
  78. bool privport;
  79. u16 port;
  80. int sq_depth;
  81. struct semaphore sq_sem;
  82. int rq_depth;
  83. struct semaphore rq_sem;
  84. atomic_t excess_rc;
  85. struct sockaddr_in addr;
  86. spinlock_t req_lock;
  87. struct completion cm_done;
  88. };
  89. struct p9_rdma_req;
  90. /**
  91. * struct p9_rdma_context - Keeps track of in-process WR
  92. *
  93. * @cqe: completion queue entry
  94. * @busa: Bus address to unmap when the WR completes
  95. * @req: Keeps track of requests (send)
  96. * @rc: Keepts track of replies (receive)
  97. */
  98. struct p9_rdma_context {
  99. struct ib_cqe cqe;
  100. dma_addr_t busa;
  101. union {
  102. struct p9_req_t *req;
  103. struct p9_fcall rc;
  104. };
  105. };
  106. /**
  107. * struct p9_rdma_opts - Collection of mount options
  108. * @port: port of connection
  109. * @privport: Whether a privileged port may be used
  110. * @sq_depth: The requested depth of the SQ. This really doesn't need
  111. * to be any deeper than the number of threads used in the client
  112. * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
  113. * @timeout: Time to wait in msecs for CM events
  114. */
  115. struct p9_rdma_opts {
  116. short port;
  117. bool privport;
  118. int sq_depth;
  119. int rq_depth;
  120. long timeout;
  121. };
  122. /*
  123. * Option Parsing (code inspired by NFS code)
  124. */
  125. enum {
  126. /* Options that take integer arguments */
  127. Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout,
  128. /* Options that take no argument */
  129. Opt_privport,
  130. Opt_err,
  131. };
  132. static match_table_t tokens = {
  133. {Opt_port, "port=%u"},
  134. {Opt_sq_depth, "sq=%u"},
  135. {Opt_rq_depth, "rq=%u"},
  136. {Opt_timeout, "timeout=%u"},
  137. {Opt_privport, "privport"},
  138. {Opt_err, NULL},
  139. };
  140. static int p9_rdma_show_options(struct seq_file *m, struct p9_client *clnt)
  141. {
  142. struct p9_trans_rdma *rdma = clnt->trans;
  143. if (rdma->port != P9_PORT)
  144. seq_printf(m, ",port=%u", rdma->port);
  145. if (rdma->sq_depth != P9_RDMA_SQ_DEPTH)
  146. seq_printf(m, ",sq=%u", rdma->sq_depth);
  147. if (rdma->rq_depth != P9_RDMA_RQ_DEPTH)
  148. seq_printf(m, ",rq=%u", rdma->rq_depth);
  149. if (rdma->timeout != P9_RDMA_TIMEOUT)
  150. seq_printf(m, ",timeout=%lu", rdma->timeout);
  151. if (rdma->privport)
  152. seq_puts(m, ",privport");
  153. return 0;
  154. }
  155. /**
  156. * parse_opts - parse mount options into rdma options structure
  157. * @params: options string passed from mount
  158. * @opts: rdma transport-specific structure to parse options into
  159. *
  160. * Returns 0 upon success, -ERRNO upon failure
  161. */
  162. static int parse_opts(char *params, struct p9_rdma_opts *opts)
  163. {
  164. char *p;
  165. substring_t args[MAX_OPT_ARGS];
  166. int option;
  167. char *options, *tmp_options;
  168. opts->port = P9_PORT;
  169. opts->sq_depth = P9_RDMA_SQ_DEPTH;
  170. opts->rq_depth = P9_RDMA_RQ_DEPTH;
  171. opts->timeout = P9_RDMA_TIMEOUT;
  172. opts->privport = false;
  173. if (!params)
  174. return 0;
  175. tmp_options = kstrdup(params, GFP_KERNEL);
  176. if (!tmp_options) {
  177. p9_debug(P9_DEBUG_ERROR,
  178. "failed to allocate copy of option string\n");
  179. return -ENOMEM;
  180. }
  181. options = tmp_options;
  182. while ((p = strsep(&options, ",")) != NULL) {
  183. int token;
  184. int r;
  185. if (!*p)
  186. continue;
  187. token = match_token(p, tokens, args);
  188. if ((token != Opt_err) && (token != Opt_privport)) {
  189. r = match_int(&args[0], &option);
  190. if (r < 0) {
  191. p9_debug(P9_DEBUG_ERROR,
  192. "integer field, but no integer?\n");
  193. continue;
  194. }
  195. }
  196. switch (token) {
  197. case Opt_port:
  198. opts->port = option;
  199. break;
  200. case Opt_sq_depth:
  201. opts->sq_depth = option;
  202. break;
  203. case Opt_rq_depth:
  204. opts->rq_depth = option;
  205. break;
  206. case Opt_timeout:
  207. opts->timeout = option;
  208. break;
  209. case Opt_privport:
  210. opts->privport = true;
  211. break;
  212. default:
  213. continue;
  214. }
  215. }
  216. /* RQ must be at least as large as the SQ */
  217. opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
  218. kfree(tmp_options);
  219. return 0;
  220. }
  221. static int
  222. p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
  223. {
  224. struct p9_client *c = id->context;
  225. struct p9_trans_rdma *rdma = c->trans;
  226. switch (event->event) {
  227. case RDMA_CM_EVENT_ADDR_RESOLVED:
  228. BUG_ON(rdma->state != P9_RDMA_INIT);
  229. rdma->state = P9_RDMA_ADDR_RESOLVED;
  230. break;
  231. case RDMA_CM_EVENT_ROUTE_RESOLVED:
  232. BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
  233. rdma->state = P9_RDMA_ROUTE_RESOLVED;
  234. break;
  235. case RDMA_CM_EVENT_ESTABLISHED:
  236. BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
  237. rdma->state = P9_RDMA_CONNECTED;
  238. break;
  239. case RDMA_CM_EVENT_DISCONNECTED:
  240. if (rdma)
  241. rdma->state = P9_RDMA_CLOSED;
  242. c->status = Disconnected;
  243. break;
  244. case RDMA_CM_EVENT_TIMEWAIT_EXIT:
  245. break;
  246. case RDMA_CM_EVENT_ADDR_CHANGE:
  247. case RDMA_CM_EVENT_ROUTE_ERROR:
  248. case RDMA_CM_EVENT_DEVICE_REMOVAL:
  249. case RDMA_CM_EVENT_MULTICAST_JOIN:
  250. case RDMA_CM_EVENT_MULTICAST_ERROR:
  251. case RDMA_CM_EVENT_REJECTED:
  252. case RDMA_CM_EVENT_CONNECT_REQUEST:
  253. case RDMA_CM_EVENT_CONNECT_RESPONSE:
  254. case RDMA_CM_EVENT_CONNECT_ERROR:
  255. case RDMA_CM_EVENT_ADDR_ERROR:
  256. case RDMA_CM_EVENT_UNREACHABLE:
  257. c->status = Disconnected;
  258. rdma_disconnect(rdma->cm_id);
  259. break;
  260. default:
  261. BUG();
  262. }
  263. complete(&rdma->cm_done);
  264. return 0;
  265. }
  266. static void
  267. recv_done(struct ib_cq *cq, struct ib_wc *wc)
  268. {
  269. struct p9_client *client = cq->cq_context;
  270. struct p9_trans_rdma *rdma = client->trans;
  271. struct p9_rdma_context *c =
  272. container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
  273. struct p9_req_t *req;
  274. int err = 0;
  275. int16_t tag;
  276. req = NULL;
  277. ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
  278. DMA_FROM_DEVICE);
  279. if (wc->status != IB_WC_SUCCESS)
  280. goto err_out;
  281. c->rc.size = wc->byte_len;
  282. err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1);
  283. if (err)
  284. goto err_out;
  285. req = p9_tag_lookup(client, tag);
  286. if (!req)
  287. goto err_out;
  288. /* Check that we have not yet received a reply for this request.
  289. */
  290. if (unlikely(req->rc.sdata)) {
  291. pr_err("Duplicate reply for request %d", tag);
  292. goto err_out;
  293. }
  294. req->rc.size = c->rc.size;
  295. req->rc.sdata = c->rc.sdata;
  296. p9_client_cb(client, req, REQ_STATUS_RCVD);
  297. out:
  298. up(&rdma->rq_sem);
  299. kfree(c);
  300. return;
  301. err_out:
  302. p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
  303. req, err, wc->status);
  304. rdma->state = P9_RDMA_FLUSHING;
  305. client->status = Disconnected;
  306. goto out;
  307. }
  308. static void
  309. send_done(struct ib_cq *cq, struct ib_wc *wc)
  310. {
  311. struct p9_client *client = cq->cq_context;
  312. struct p9_trans_rdma *rdma = client->trans;
  313. struct p9_rdma_context *c =
  314. container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
  315. ib_dma_unmap_single(rdma->cm_id->device,
  316. c->busa, c->req->tc.size,
  317. DMA_TO_DEVICE);
  318. up(&rdma->sq_sem);
  319. p9_req_put(client, c->req);
  320. kfree(c);
  321. }
  322. static void qp_event_handler(struct ib_event *event, void *context)
  323. {
  324. p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n",
  325. event->event, context);
  326. }
  327. static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
  328. {
  329. if (!rdma)
  330. return;
  331. if (rdma->qp && !IS_ERR(rdma->qp))
  332. ib_destroy_qp(rdma->qp);
  333. if (rdma->pd && !IS_ERR(rdma->pd))
  334. ib_dealloc_pd(rdma->pd);
  335. if (rdma->cq && !IS_ERR(rdma->cq))
  336. ib_free_cq(rdma->cq);
  337. if (rdma->cm_id && !IS_ERR(rdma->cm_id))
  338. rdma_destroy_id(rdma->cm_id);
  339. kfree(rdma);
  340. }
  341. static int
  342. post_recv(struct p9_client *client, struct p9_rdma_context *c)
  343. {
  344. struct p9_trans_rdma *rdma = client->trans;
  345. struct ib_recv_wr wr;
  346. struct ib_sge sge;
  347. int ret;
  348. c->busa = ib_dma_map_single(rdma->cm_id->device,
  349. c->rc.sdata, client->msize,
  350. DMA_FROM_DEVICE);
  351. if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
  352. goto error;
  353. c->cqe.done = recv_done;
  354. sge.addr = c->busa;
  355. sge.length = client->msize;
  356. sge.lkey = rdma->pd->local_dma_lkey;
  357. wr.next = NULL;
  358. wr.wr_cqe = &c->cqe;
  359. wr.sg_list = &sge;
  360. wr.num_sge = 1;
  361. ret = ib_post_recv(rdma->qp, &wr, NULL);
  362. if (ret)
  363. ib_dma_unmap_single(rdma->cm_id->device, c->busa,
  364. client->msize, DMA_FROM_DEVICE);
  365. return ret;
  366. error:
  367. p9_debug(P9_DEBUG_ERROR, "EIO\n");
  368. return -EIO;
  369. }
  370. static int rdma_request(struct p9_client *client, struct p9_req_t *req)
  371. {
  372. struct p9_trans_rdma *rdma = client->trans;
  373. struct ib_send_wr wr;
  374. struct ib_sge sge;
  375. int err = 0;
  376. unsigned long flags;
  377. struct p9_rdma_context *c = NULL;
  378. struct p9_rdma_context *rpl_context = NULL;
  379. /* When an error occurs between posting the recv and the send,
  380. * there will be a receive context posted without a pending request.
  381. * Since there is no way to "un-post" it, we remember it and skip
  382. * post_recv() for the next request.
  383. * So here,
  384. * see if we are this `next request' and need to absorb an excess rc.
  385. * If yes, then drop and free our own, and do not recv_post().
  386. **/
  387. if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
  388. if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
  389. /* Got one! */
  390. p9_fcall_fini(&req->rc);
  391. req->rc.sdata = NULL;
  392. goto dont_need_post_recv;
  393. } else {
  394. /* We raced and lost. */
  395. atomic_inc(&rdma->excess_rc);
  396. }
  397. }
  398. /* Allocate an fcall for the reply */
  399. rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
  400. if (!rpl_context) {
  401. err = -ENOMEM;
  402. goto recv_error;
  403. }
  404. rpl_context->rc.sdata = req->rc.sdata;
  405. /*
  406. * Post a receive buffer for this request. We need to ensure
  407. * there is a reply buffer available for every outstanding
  408. * request. A flushed request can result in no reply for an
  409. * outstanding request, so we must keep a count to avoid
  410. * overflowing the RQ.
  411. */
  412. if (down_interruptible(&rdma->rq_sem)) {
  413. err = -EINTR;
  414. goto recv_error;
  415. }
  416. err = post_recv(client, rpl_context);
  417. if (err) {
  418. p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
  419. goto recv_error;
  420. }
  421. /* remove posted receive buffer from request structure */
  422. req->rc.sdata = NULL;
  423. dont_need_post_recv:
  424. /* Post the request */
  425. c = kmalloc(sizeof *c, GFP_NOFS);
  426. if (!c) {
  427. err = -ENOMEM;
  428. goto send_error;
  429. }
  430. c->req = req;
  431. c->busa = ib_dma_map_single(rdma->cm_id->device,
  432. c->req->tc.sdata, c->req->tc.size,
  433. DMA_TO_DEVICE);
  434. if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
  435. err = -EIO;
  436. goto send_error;
  437. }
  438. c->cqe.done = send_done;
  439. sge.addr = c->busa;
  440. sge.length = c->req->tc.size;
  441. sge.lkey = rdma->pd->local_dma_lkey;
  442. wr.next = NULL;
  443. wr.wr_cqe = &c->cqe;
  444. wr.opcode = IB_WR_SEND;
  445. wr.send_flags = IB_SEND_SIGNALED;
  446. wr.sg_list = &sge;
  447. wr.num_sge = 1;
  448. if (down_interruptible(&rdma->sq_sem)) {
  449. err = -EINTR;
  450. goto dma_unmap;
  451. }
  452. /* Mark request as `sent' *before* we actually send it,
  453. * because doing if after could erase the REQ_STATUS_RCVD
  454. * status in case of a very fast reply.
  455. */
  456. WRITE_ONCE(req->status, REQ_STATUS_SENT);
  457. err = ib_post_send(rdma->qp, &wr, NULL);
  458. if (err)
  459. goto dma_unmap;
  460. /* Success */
  461. return 0;
  462. dma_unmap:
  463. ib_dma_unmap_single(rdma->cm_id->device, c->busa,
  464. c->req->tc.size, DMA_TO_DEVICE);
  465. /* Handle errors that happened during or while preparing the send: */
  466. send_error:
  467. WRITE_ONCE(req->status, REQ_STATUS_ERROR);
  468. kfree(c);
  469. p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
  470. /* Ach.
  471. * We did recv_post(), but not send. We have one recv_post in excess.
  472. */
  473. atomic_inc(&rdma->excess_rc);
  474. return err;
  475. /* Handle errors that happened during or while preparing post_recv(): */
  476. recv_error:
  477. kfree(rpl_context);
  478. spin_lock_irqsave(&rdma->req_lock, flags);
  479. if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
  480. rdma->state = P9_RDMA_CLOSING;
  481. spin_unlock_irqrestore(&rdma->req_lock, flags);
  482. rdma_disconnect(rdma->cm_id);
  483. } else
  484. spin_unlock_irqrestore(&rdma->req_lock, flags);
  485. return err;
  486. }
  487. static void rdma_close(struct p9_client *client)
  488. {
  489. struct p9_trans_rdma *rdma;
  490. if (!client)
  491. return;
  492. rdma = client->trans;
  493. if (!rdma)
  494. return;
  495. client->status = Disconnected;
  496. rdma_disconnect(rdma->cm_id);
  497. rdma_destroy_trans(rdma);
  498. }
  499. /**
  500. * alloc_rdma - Allocate and initialize the rdma transport structure
  501. * @opts: Mount options structure
  502. */
  503. static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
  504. {
  505. struct p9_trans_rdma *rdma;
  506. rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
  507. if (!rdma)
  508. return NULL;
  509. rdma->port = opts->port;
  510. rdma->privport = opts->privport;
  511. rdma->sq_depth = opts->sq_depth;
  512. rdma->rq_depth = opts->rq_depth;
  513. rdma->timeout = opts->timeout;
  514. spin_lock_init(&rdma->req_lock);
  515. init_completion(&rdma->cm_done);
  516. sema_init(&rdma->sq_sem, rdma->sq_depth);
  517. sema_init(&rdma->rq_sem, rdma->rq_depth);
  518. atomic_set(&rdma->excess_rc, 0);
  519. return rdma;
  520. }
  521. static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
  522. {
  523. /* Nothing to do here.
  524. * We will take care of it (if we have to) in rdma_cancelled()
  525. */
  526. return 1;
  527. }
  528. /* A request has been fully flushed without a reply.
  529. * That means we have posted one buffer in excess.
  530. */
  531. static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
  532. {
  533. struct p9_trans_rdma *rdma = client->trans;
  534. atomic_inc(&rdma->excess_rc);
  535. return 0;
  536. }
  537. static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma)
  538. {
  539. struct sockaddr_in cl = {
  540. .sin_family = AF_INET,
  541. .sin_addr.s_addr = htonl(INADDR_ANY),
  542. };
  543. int port, err = -EINVAL;
  544. for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) {
  545. cl.sin_port = htons((ushort)port);
  546. err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl);
  547. if (err != -EADDRINUSE)
  548. break;
  549. }
  550. return err;
  551. }
  552. /**
  553. * rdma_create_trans - Transport method for creating a transport instance
  554. * @client: client instance
  555. * @addr: IP address string
  556. * @args: Mount options string
  557. */
  558. static int
  559. rdma_create_trans(struct p9_client *client, const char *addr, char *args)
  560. {
  561. int err;
  562. struct p9_rdma_opts opts;
  563. struct p9_trans_rdma *rdma;
  564. struct rdma_conn_param conn_param;
  565. struct ib_qp_init_attr qp_attr;
  566. if (addr == NULL)
  567. return -EINVAL;
  568. /* Parse the transport specific mount options */
  569. err = parse_opts(args, &opts);
  570. if (err < 0)
  571. return err;
  572. /* Create and initialize the RDMA transport structure */
  573. rdma = alloc_rdma(&opts);
  574. if (!rdma)
  575. return -ENOMEM;
  576. /* Create the RDMA CM ID */
  577. rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client,
  578. RDMA_PS_TCP, IB_QPT_RC);
  579. if (IS_ERR(rdma->cm_id))
  580. goto error;
  581. /* Associate the client with the transport */
  582. client->trans = rdma;
  583. /* Bind to a privileged port if we need to */
  584. if (opts.privport) {
  585. err = p9_rdma_bind_privport(rdma);
  586. if (err < 0) {
  587. pr_err("%s (%d): problem binding to privport: %d\n",
  588. __func__, task_pid_nr(current), -err);
  589. goto error;
  590. }
  591. }
  592. /* Resolve the server's address */
  593. rdma->addr.sin_family = AF_INET;
  594. rdma->addr.sin_addr.s_addr = in_aton(addr);
  595. rdma->addr.sin_port = htons(opts.port);
  596. err = rdma_resolve_addr(rdma->cm_id, NULL,
  597. (struct sockaddr *)&rdma->addr,
  598. rdma->timeout);
  599. if (err)
  600. goto error;
  601. err = wait_for_completion_interruptible(&rdma->cm_done);
  602. if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
  603. goto error;
  604. /* Resolve the route to the server */
  605. err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
  606. if (err)
  607. goto error;
  608. err = wait_for_completion_interruptible(&rdma->cm_done);
  609. if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
  610. goto error;
  611. /* Create the Completion Queue */
  612. rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client,
  613. opts.sq_depth + opts.rq_depth + 1,
  614. IB_POLL_SOFTIRQ);
  615. if (IS_ERR(rdma->cq))
  616. goto error;
  617. /* Create the Protection Domain */
  618. rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0);
  619. if (IS_ERR(rdma->pd))
  620. goto error;
  621. /* Create the Queue Pair */
  622. memset(&qp_attr, 0, sizeof qp_attr);
  623. qp_attr.event_handler = qp_event_handler;
  624. qp_attr.qp_context = client;
  625. qp_attr.cap.max_send_wr = opts.sq_depth;
  626. qp_attr.cap.max_recv_wr = opts.rq_depth;
  627. qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
  628. qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
  629. qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  630. qp_attr.qp_type = IB_QPT_RC;
  631. qp_attr.send_cq = rdma->cq;
  632. qp_attr.recv_cq = rdma->cq;
  633. err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
  634. if (err)
  635. goto error;
  636. rdma->qp = rdma->cm_id->qp;
  637. /* Request a connection */
  638. memset(&conn_param, 0, sizeof(conn_param));
  639. conn_param.private_data = NULL;
  640. conn_param.private_data_len = 0;
  641. conn_param.responder_resources = P9_RDMA_IRD;
  642. conn_param.initiator_depth = P9_RDMA_ORD;
  643. err = rdma_connect(rdma->cm_id, &conn_param);
  644. if (err)
  645. goto error;
  646. err = wait_for_completion_interruptible(&rdma->cm_done);
  647. if (err || (rdma->state != P9_RDMA_CONNECTED))
  648. goto error;
  649. client->status = Connected;
  650. return 0;
  651. error:
  652. rdma_destroy_trans(rdma);
  653. return -ENOTCONN;
  654. }
  655. static struct p9_trans_module p9_rdma_trans = {
  656. .name = "rdma",
  657. .maxsize = P9_RDMA_MAXSIZE,
  658. .pooled_rbuffers = true,
  659. .def = 0,
  660. .owner = THIS_MODULE,
  661. .create = rdma_create_trans,
  662. .close = rdma_close,
  663. .request = rdma_request,
  664. .cancel = rdma_cancel,
  665. .cancelled = rdma_cancelled,
  666. .show_options = p9_rdma_show_options,
  667. };
  668. /**
  669. * p9_trans_rdma_init - Register the 9P RDMA transport driver
  670. */
  671. static int __init p9_trans_rdma_init(void)
  672. {
  673. v9fs_register_trans(&p9_rdma_trans);
  674. return 0;
  675. }
  676. static void __exit p9_trans_rdma_exit(void)
  677. {
  678. v9fs_unregister_trans(&p9_rdma_trans);
  679. }
  680. module_init(p9_trans_rdma_init);
  681. module_exit(p9_trans_rdma_exit);
  682. MODULE_ALIAS_9P("rdma");
  683. MODULE_AUTHOR("Tom Tucker <[email protected]>");
  684. MODULE_DESCRIPTION("RDMA Transport for 9P");
  685. MODULE_LICENSE("Dual BSD/GPL");