svc_rdma_rw.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016-2018 Oracle. All rights reserved.
  4. *
  5. * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
  6. */
  7. #include <rdma/rw.h>
  8. #include <linux/sunrpc/xdr.h>
  9. #include <linux/sunrpc/rpc_rdma.h>
  10. #include <linux/sunrpc/svc_rdma.h>
  11. #include "xprt_rdma.h"
  12. #include <trace/events/rpcrdma.h>
  13. static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
  14. static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
  15. /* Each R/W context contains state for one chain of RDMA Read or
  16. * Write Work Requests.
  17. *
  18. * Each WR chain handles a single contiguous server-side buffer,
  19. * because scatterlist entries after the first have to start on
  20. * page alignment. xdr_buf iovecs cannot guarantee alignment.
  21. *
  22. * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
  23. * from a client may contain a unique R_key, so each WR chain moves
  24. * up to one segment at a time.
  25. *
  26. * The scatterlist makes this data structure over 4KB in size. To
  27. * make it less likely to fail, and to handle the allocation for
  28. * smaller I/O requests without disabling bottom-halves, these
  29. * contexts are created on demand, but cached and reused until the
  30. * controlling svcxprt_rdma is destroyed.
  31. */
  32. struct svc_rdma_rw_ctxt {
  33. struct llist_node rw_node;
  34. struct list_head rw_list;
  35. struct rdma_rw_ctx rw_ctx;
  36. unsigned int rw_nents;
  37. struct sg_table rw_sg_table;
  38. struct scatterlist rw_first_sgl[];
  39. };
  40. static inline struct svc_rdma_rw_ctxt *
  41. svc_rdma_next_ctxt(struct list_head *list)
  42. {
  43. return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
  44. rw_list);
  45. }
  46. static struct svc_rdma_rw_ctxt *
  47. svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
  48. {
  49. struct svc_rdma_rw_ctxt *ctxt;
  50. struct llist_node *node;
  51. spin_lock(&rdma->sc_rw_ctxt_lock);
  52. node = llist_del_first(&rdma->sc_rw_ctxts);
  53. spin_unlock(&rdma->sc_rw_ctxt_lock);
  54. if (node) {
  55. ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
  56. } else {
  57. ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
  58. GFP_KERNEL);
  59. if (!ctxt)
  60. goto out_noctx;
  61. INIT_LIST_HEAD(&ctxt->rw_list);
  62. }
  63. ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
  64. if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
  65. ctxt->rw_sg_table.sgl,
  66. SG_CHUNK_SIZE))
  67. goto out_free;
  68. return ctxt;
  69. out_free:
  70. kfree(ctxt);
  71. out_noctx:
  72. trace_svcrdma_no_rwctx_err(rdma, sges);
  73. return NULL;
  74. }
  75. static void __svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
  76. struct svc_rdma_rw_ctxt *ctxt,
  77. struct llist_head *list)
  78. {
  79. sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
  80. llist_add(&ctxt->rw_node, list);
  81. }
  82. static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
  83. struct svc_rdma_rw_ctxt *ctxt)
  84. {
  85. __svc_rdma_put_rw_ctxt(rdma, ctxt, &rdma->sc_rw_ctxts);
  86. }
  87. /**
  88. * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
  89. * @rdma: transport about to be destroyed
  90. *
  91. */
  92. void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
  93. {
  94. struct svc_rdma_rw_ctxt *ctxt;
  95. struct llist_node *node;
  96. while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) {
  97. ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node);
  98. kfree(ctxt);
  99. }
  100. }
  101. /**
  102. * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
  103. * @rdma: controlling transport instance
  104. * @ctxt: R/W context to prepare
  105. * @offset: RDMA offset
  106. * @handle: RDMA tag/handle
  107. * @direction: I/O direction
  108. *
  109. * Returns on success, the number of WQEs that will be needed
  110. * on the workqueue, or a negative errno.
  111. */
  112. static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
  113. struct svc_rdma_rw_ctxt *ctxt,
  114. u64 offset, u32 handle,
  115. enum dma_data_direction direction)
  116. {
  117. int ret;
  118. ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
  119. ctxt->rw_sg_table.sgl, ctxt->rw_nents,
  120. 0, offset, handle, direction);
  121. if (unlikely(ret < 0)) {
  122. svc_rdma_put_rw_ctxt(rdma, ctxt);
  123. trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
  124. }
  125. return ret;
  126. }
  127. /* A chunk context tracks all I/O for moving one Read or Write
  128. * chunk. This is a set of rdma_rw's that handle data movement
  129. * for all segments of one chunk.
  130. *
  131. * These are small, acquired with a single allocator call, and
  132. * no more than one is needed per chunk. They are allocated on
  133. * demand, and not cached.
  134. */
  135. struct svc_rdma_chunk_ctxt {
  136. struct rpc_rdma_cid cc_cid;
  137. struct ib_cqe cc_cqe;
  138. struct svcxprt_rdma *cc_rdma;
  139. struct list_head cc_rwctxts;
  140. ktime_t cc_posttime;
  141. int cc_sqecount;
  142. enum ib_wc_status cc_status;
  143. struct completion cc_done;
  144. };
  145. static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
  146. struct rpc_rdma_cid *cid)
  147. {
  148. cid->ci_queue_id = rdma->sc_sq_cq->res.id;
  149. cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
  150. }
  151. static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
  152. struct svc_rdma_chunk_ctxt *cc)
  153. {
  154. svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
  155. cc->cc_rdma = rdma;
  156. INIT_LIST_HEAD(&cc->cc_rwctxts);
  157. cc->cc_sqecount = 0;
  158. }
  159. /*
  160. * The consumed rw_ctx's are cleaned and placed on a local llist so
  161. * that only one atomic llist operation is needed to put them all
  162. * back on the free list.
  163. */
  164. static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
  165. enum dma_data_direction dir)
  166. {
  167. struct svcxprt_rdma *rdma = cc->cc_rdma;
  168. struct llist_node *first, *last;
  169. struct svc_rdma_rw_ctxt *ctxt;
  170. LLIST_HEAD(free);
  171. first = last = NULL;
  172. while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
  173. list_del(&ctxt->rw_list);
  174. rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
  175. rdma->sc_port_num, ctxt->rw_sg_table.sgl,
  176. ctxt->rw_nents, dir);
  177. __svc_rdma_put_rw_ctxt(rdma, ctxt, &free);
  178. ctxt->rw_node.next = first;
  179. first = &ctxt->rw_node;
  180. if (!last)
  181. last = first;
  182. }
  183. if (first)
  184. llist_add_batch(first, last, &rdma->sc_rw_ctxts);
  185. }
  186. /* State for sending a Write or Reply chunk.
  187. * - Tracks progress of writing one chunk over all its segments
  188. * - Stores arguments for the SGL constructor functions
  189. */
  190. struct svc_rdma_write_info {
  191. const struct svc_rdma_chunk *wi_chunk;
  192. /* write state of this chunk */
  193. unsigned int wi_seg_off;
  194. unsigned int wi_seg_no;
  195. /* SGL constructor arguments */
  196. const struct xdr_buf *wi_xdr;
  197. unsigned char *wi_base;
  198. unsigned int wi_next_off;
  199. struct svc_rdma_chunk_ctxt wi_cc;
  200. };
  201. static struct svc_rdma_write_info *
  202. svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
  203. const struct svc_rdma_chunk *chunk)
  204. {
  205. struct svc_rdma_write_info *info;
  206. info = kmalloc(sizeof(*info), GFP_KERNEL);
  207. if (!info)
  208. return info;
  209. info->wi_chunk = chunk;
  210. info->wi_seg_off = 0;
  211. info->wi_seg_no = 0;
  212. svc_rdma_cc_init(rdma, &info->wi_cc);
  213. info->wi_cc.cc_cqe.done = svc_rdma_write_done;
  214. return info;
  215. }
  216. static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
  217. {
  218. svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
  219. kfree(info);
  220. }
  221. /**
  222. * svc_rdma_write_done - Write chunk completion
  223. * @cq: controlling Completion Queue
  224. * @wc: Work Completion
  225. *
  226. * Pages under I/O are freed by a subsequent Send completion.
  227. */
  228. static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
  229. {
  230. struct ib_cqe *cqe = wc->wr_cqe;
  231. struct svc_rdma_chunk_ctxt *cc =
  232. container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
  233. struct svcxprt_rdma *rdma = cc->cc_rdma;
  234. struct svc_rdma_write_info *info =
  235. container_of(cc, struct svc_rdma_write_info, wi_cc);
  236. switch (wc->status) {
  237. case IB_WC_SUCCESS:
  238. trace_svcrdma_wc_write(wc, &cc->cc_cid);
  239. break;
  240. case IB_WC_WR_FLUSH_ERR:
  241. trace_svcrdma_wc_write_flush(wc, &cc->cc_cid);
  242. break;
  243. default:
  244. trace_svcrdma_wc_write_err(wc, &cc->cc_cid);
  245. }
  246. svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
  247. if (unlikely(wc->status != IB_WC_SUCCESS))
  248. svc_xprt_deferred_close(&rdma->sc_xprt);
  249. svc_rdma_write_info_free(info);
  250. }
  251. /* State for pulling a Read chunk.
  252. */
  253. struct svc_rdma_read_info {
  254. struct svc_rqst *ri_rqst;
  255. struct svc_rdma_recv_ctxt *ri_readctxt;
  256. unsigned int ri_pageno;
  257. unsigned int ri_pageoff;
  258. unsigned int ri_totalbytes;
  259. struct svc_rdma_chunk_ctxt ri_cc;
  260. };
  261. static struct svc_rdma_read_info *
  262. svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
  263. {
  264. struct svc_rdma_read_info *info;
  265. info = kmalloc(sizeof(*info), GFP_KERNEL);
  266. if (!info)
  267. return info;
  268. svc_rdma_cc_init(rdma, &info->ri_cc);
  269. info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
  270. return info;
  271. }
  272. static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
  273. {
  274. svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
  275. kfree(info);
  276. }
  277. /**
  278. * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
  279. * @cq: controlling Completion Queue
  280. * @wc: Work Completion
  281. *
  282. */
  283. static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
  284. {
  285. struct ib_cqe *cqe = wc->wr_cqe;
  286. struct svc_rdma_chunk_ctxt *cc =
  287. container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
  288. struct svc_rdma_read_info *info;
  289. switch (wc->status) {
  290. case IB_WC_SUCCESS:
  291. info = container_of(cc, struct svc_rdma_read_info, ri_cc);
  292. trace_svcrdma_wc_read(wc, &cc->cc_cid, info->ri_totalbytes,
  293. cc->cc_posttime);
  294. break;
  295. case IB_WC_WR_FLUSH_ERR:
  296. trace_svcrdma_wc_read_flush(wc, &cc->cc_cid);
  297. break;
  298. default:
  299. trace_svcrdma_wc_read_err(wc, &cc->cc_cid);
  300. }
  301. svc_rdma_wake_send_waiters(cc->cc_rdma, cc->cc_sqecount);
  302. cc->cc_status = wc->status;
  303. complete(&cc->cc_done);
  304. return;
  305. }
  306. /* This function sleeps when the transport's Send Queue is congested.
  307. *
  308. * Assumptions:
  309. * - If ib_post_send() succeeds, only one completion is expected,
  310. * even if one or more WRs are flushed. This is true when posting
  311. * an rdma_rw_ctx or when posting a single signaled WR.
  312. */
  313. static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
  314. {
  315. struct svcxprt_rdma *rdma = cc->cc_rdma;
  316. struct ib_send_wr *first_wr;
  317. const struct ib_send_wr *bad_wr;
  318. struct list_head *tmp;
  319. struct ib_cqe *cqe;
  320. int ret;
  321. if (cc->cc_sqecount > rdma->sc_sq_depth)
  322. return -EINVAL;
  323. first_wr = NULL;
  324. cqe = &cc->cc_cqe;
  325. list_for_each(tmp, &cc->cc_rwctxts) {
  326. struct svc_rdma_rw_ctxt *ctxt;
  327. ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
  328. first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
  329. rdma->sc_port_num, cqe, first_wr);
  330. cqe = NULL;
  331. }
  332. do {
  333. if (atomic_sub_return(cc->cc_sqecount,
  334. &rdma->sc_sq_avail) > 0) {
  335. cc->cc_posttime = ktime_get();
  336. ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
  337. if (ret)
  338. break;
  339. return 0;
  340. }
  341. percpu_counter_inc(&svcrdma_stat_sq_starve);
  342. trace_svcrdma_sq_full(rdma);
  343. atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
  344. wait_event(rdma->sc_send_wait,
  345. atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
  346. trace_svcrdma_sq_retry(rdma);
  347. } while (1);
  348. trace_svcrdma_sq_post_err(rdma, ret);
  349. svc_xprt_deferred_close(&rdma->sc_xprt);
  350. /* If even one was posted, there will be a completion. */
  351. if (bad_wr != first_wr)
  352. return 0;
  353. atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
  354. wake_up(&rdma->sc_send_wait);
  355. return -ENOTCONN;
  356. }
  357. /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
  358. */
  359. static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
  360. unsigned int len,
  361. struct svc_rdma_rw_ctxt *ctxt)
  362. {
  363. struct scatterlist *sg = ctxt->rw_sg_table.sgl;
  364. sg_set_buf(&sg[0], info->wi_base, len);
  365. info->wi_base += len;
  366. ctxt->rw_nents = 1;
  367. }
  368. /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
  369. */
  370. static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
  371. unsigned int remaining,
  372. struct svc_rdma_rw_ctxt *ctxt)
  373. {
  374. unsigned int sge_no, sge_bytes, page_off, page_no;
  375. const struct xdr_buf *xdr = info->wi_xdr;
  376. struct scatterlist *sg;
  377. struct page **page;
  378. page_off = info->wi_next_off + xdr->page_base;
  379. page_no = page_off >> PAGE_SHIFT;
  380. page_off = offset_in_page(page_off);
  381. page = xdr->pages + page_no;
  382. info->wi_next_off += remaining;
  383. sg = ctxt->rw_sg_table.sgl;
  384. sge_no = 0;
  385. do {
  386. sge_bytes = min_t(unsigned int, remaining,
  387. PAGE_SIZE - page_off);
  388. sg_set_page(sg, *page, sge_bytes, page_off);
  389. remaining -= sge_bytes;
  390. sg = sg_next(sg);
  391. page_off = 0;
  392. sge_no++;
  393. page++;
  394. } while (remaining);
  395. ctxt->rw_nents = sge_no;
  396. }
  397. /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
  398. * an RPC Reply.
  399. */
  400. static int
  401. svc_rdma_build_writes(struct svc_rdma_write_info *info,
  402. void (*constructor)(struct svc_rdma_write_info *info,
  403. unsigned int len,
  404. struct svc_rdma_rw_ctxt *ctxt),
  405. unsigned int remaining)
  406. {
  407. struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
  408. struct svcxprt_rdma *rdma = cc->cc_rdma;
  409. const struct svc_rdma_segment *seg;
  410. struct svc_rdma_rw_ctxt *ctxt;
  411. int ret;
  412. do {
  413. unsigned int write_len;
  414. u64 offset;
  415. if (info->wi_seg_no >= info->wi_chunk->ch_segcount)
  416. goto out_overflow;
  417. seg = &info->wi_chunk->ch_segments[info->wi_seg_no];
  418. write_len = min(remaining, seg->rs_length - info->wi_seg_off);
  419. if (!write_len)
  420. goto out_overflow;
  421. ctxt = svc_rdma_get_rw_ctxt(rdma,
  422. (write_len >> PAGE_SHIFT) + 2);
  423. if (!ctxt)
  424. return -ENOMEM;
  425. constructor(info, write_len, ctxt);
  426. offset = seg->rs_offset + info->wi_seg_off;
  427. ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, seg->rs_handle,
  428. DMA_TO_DEVICE);
  429. if (ret < 0)
  430. return -EIO;
  431. percpu_counter_inc(&svcrdma_stat_write);
  432. list_add(&ctxt->rw_list, &cc->cc_rwctxts);
  433. cc->cc_sqecount += ret;
  434. if (write_len == seg->rs_length - info->wi_seg_off) {
  435. info->wi_seg_no++;
  436. info->wi_seg_off = 0;
  437. } else {
  438. info->wi_seg_off += write_len;
  439. }
  440. remaining -= write_len;
  441. } while (remaining);
  442. return 0;
  443. out_overflow:
  444. trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
  445. info->wi_chunk->ch_segcount);
  446. return -E2BIG;
  447. }
  448. /**
  449. * svc_rdma_iov_write - Construct RDMA Writes from an iov
  450. * @info: pointer to write arguments
  451. * @iov: kvec to write
  452. *
  453. * Returns:
  454. * On success, returns zero
  455. * %-E2BIG if the client-provided Write chunk is too small
  456. * %-ENOMEM if a resource has been exhausted
  457. * %-EIO if an rdma-rw error occurred
  458. */
  459. static int svc_rdma_iov_write(struct svc_rdma_write_info *info,
  460. const struct kvec *iov)
  461. {
  462. info->wi_base = iov->iov_base;
  463. return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
  464. iov->iov_len);
  465. }
  466. /**
  467. * svc_rdma_pages_write - Construct RDMA Writes from pages
  468. * @info: pointer to write arguments
  469. * @xdr: xdr_buf with pages to write
  470. * @offset: offset into the content of @xdr
  471. * @length: number of bytes to write
  472. *
  473. * Returns:
  474. * On success, returns zero
  475. * %-E2BIG if the client-provided Write chunk is too small
  476. * %-ENOMEM if a resource has been exhausted
  477. * %-EIO if an rdma-rw error occurred
  478. */
  479. static int svc_rdma_pages_write(struct svc_rdma_write_info *info,
  480. const struct xdr_buf *xdr,
  481. unsigned int offset,
  482. unsigned long length)
  483. {
  484. info->wi_xdr = xdr;
  485. info->wi_next_off = offset - xdr->head[0].iov_len;
  486. return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
  487. length);
  488. }
  489. /**
  490. * svc_rdma_xb_write - Construct RDMA Writes to write an xdr_buf
  491. * @xdr: xdr_buf to write
  492. * @data: pointer to write arguments
  493. *
  494. * Returns:
  495. * On success, returns zero
  496. * %-E2BIG if the client-provided Write chunk is too small
  497. * %-ENOMEM if a resource has been exhausted
  498. * %-EIO if an rdma-rw error occurred
  499. */
  500. static int svc_rdma_xb_write(const struct xdr_buf *xdr, void *data)
  501. {
  502. struct svc_rdma_write_info *info = data;
  503. int ret;
  504. if (xdr->head[0].iov_len) {
  505. ret = svc_rdma_iov_write(info, &xdr->head[0]);
  506. if (ret < 0)
  507. return ret;
  508. }
  509. if (xdr->page_len) {
  510. ret = svc_rdma_pages_write(info, xdr, xdr->head[0].iov_len,
  511. xdr->page_len);
  512. if (ret < 0)
  513. return ret;
  514. }
  515. if (xdr->tail[0].iov_len) {
  516. ret = svc_rdma_iov_write(info, &xdr->tail[0]);
  517. if (ret < 0)
  518. return ret;
  519. }
  520. return xdr->len;
  521. }
  522. /**
  523. * svc_rdma_send_write_chunk - Write all segments in a Write chunk
  524. * @rdma: controlling RDMA transport
  525. * @chunk: Write chunk provided by the client
  526. * @xdr: xdr_buf containing the data payload
  527. *
  528. * Returns a non-negative number of bytes the chunk consumed, or
  529. * %-E2BIG if the payload was larger than the Write chunk,
  530. * %-EINVAL if client provided too many segments,
  531. * %-ENOMEM if rdma_rw context pool was exhausted,
  532. * %-ENOTCONN if posting failed (connection is lost),
  533. * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
  534. */
  535. int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
  536. const struct svc_rdma_chunk *chunk,
  537. const struct xdr_buf *xdr)
  538. {
  539. struct svc_rdma_write_info *info;
  540. struct svc_rdma_chunk_ctxt *cc;
  541. int ret;
  542. info = svc_rdma_write_info_alloc(rdma, chunk);
  543. if (!info)
  544. return -ENOMEM;
  545. cc = &info->wi_cc;
  546. ret = svc_rdma_xb_write(xdr, info);
  547. if (ret != xdr->len)
  548. goto out_err;
  549. trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
  550. ret = svc_rdma_post_chunk_ctxt(cc);
  551. if (ret < 0)
  552. goto out_err;
  553. return xdr->len;
  554. out_err:
  555. svc_rdma_write_info_free(info);
  556. return ret;
  557. }
  558. /**
  559. * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
  560. * @rdma: controlling RDMA transport
  561. * @rctxt: Write and Reply chunks from client
  562. * @xdr: xdr_buf containing an RPC Reply
  563. *
  564. * Returns a non-negative number of bytes the chunk consumed, or
  565. * %-E2BIG if the payload was larger than the Reply chunk,
  566. * %-EINVAL if client provided too many segments,
  567. * %-ENOMEM if rdma_rw context pool was exhausted,
  568. * %-ENOTCONN if posting failed (connection is lost),
  569. * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
  570. */
  571. int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
  572. const struct svc_rdma_recv_ctxt *rctxt,
  573. const struct xdr_buf *xdr)
  574. {
  575. struct svc_rdma_write_info *info;
  576. struct svc_rdma_chunk_ctxt *cc;
  577. struct svc_rdma_chunk *chunk;
  578. int ret;
  579. if (pcl_is_empty(&rctxt->rc_reply_pcl))
  580. return 0;
  581. chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
  582. info = svc_rdma_write_info_alloc(rdma, chunk);
  583. if (!info)
  584. return -ENOMEM;
  585. cc = &info->wi_cc;
  586. ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
  587. svc_rdma_xb_write, info);
  588. if (ret < 0)
  589. goto out_err;
  590. trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount);
  591. ret = svc_rdma_post_chunk_ctxt(cc);
  592. if (ret < 0)
  593. goto out_err;
  594. return xdr->len;
  595. out_err:
  596. svc_rdma_write_info_free(info);
  597. return ret;
  598. }
  599. /**
  600. * svc_rdma_build_read_segment - Build RDMA Read WQEs to pull one RDMA segment
  601. * @info: context for ongoing I/O
  602. * @segment: co-ordinates of remote memory to be read
  603. *
  604. * Returns:
  605. * %0: the Read WR chain was constructed successfully
  606. * %-EINVAL: there were not enough rq_pages to finish
  607. * %-ENOMEM: allocating a local resources failed
  608. * %-EIO: a DMA mapping error occurred
  609. */
  610. static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
  611. const struct svc_rdma_segment *segment)
  612. {
  613. struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
  614. struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
  615. struct svc_rqst *rqstp = info->ri_rqst;
  616. unsigned int sge_no, seg_len, len;
  617. struct svc_rdma_rw_ctxt *ctxt;
  618. struct scatterlist *sg;
  619. int ret;
  620. len = segment->rs_length;
  621. sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
  622. ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
  623. if (!ctxt)
  624. return -ENOMEM;
  625. ctxt->rw_nents = sge_no;
  626. sg = ctxt->rw_sg_table.sgl;
  627. for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
  628. seg_len = min_t(unsigned int, len,
  629. PAGE_SIZE - info->ri_pageoff);
  630. if (!info->ri_pageoff)
  631. head->rc_page_count++;
  632. sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
  633. seg_len, info->ri_pageoff);
  634. sg = sg_next(sg);
  635. info->ri_pageoff += seg_len;
  636. if (info->ri_pageoff == PAGE_SIZE) {
  637. info->ri_pageno++;
  638. info->ri_pageoff = 0;
  639. }
  640. len -= seg_len;
  641. /* Safety check */
  642. if (len &&
  643. &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
  644. goto out_overrun;
  645. }
  646. ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, segment->rs_offset,
  647. segment->rs_handle, DMA_FROM_DEVICE);
  648. if (ret < 0)
  649. return -EIO;
  650. percpu_counter_inc(&svcrdma_stat_read);
  651. list_add(&ctxt->rw_list, &cc->cc_rwctxts);
  652. cc->cc_sqecount += ret;
  653. return 0;
  654. out_overrun:
  655. trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
  656. return -EINVAL;
  657. }
  658. /**
  659. * svc_rdma_build_read_chunk - Build RDMA Read WQEs to pull one RDMA chunk
  660. * @info: context for ongoing I/O
  661. * @chunk: Read chunk to pull
  662. *
  663. * Return values:
  664. * %0: the Read WR chain was constructed successfully
  665. * %-EINVAL: there were not enough resources to finish
  666. * %-ENOMEM: allocating a local resources failed
  667. * %-EIO: a DMA mapping error occurred
  668. */
  669. static int svc_rdma_build_read_chunk(struct svc_rdma_read_info *info,
  670. const struct svc_rdma_chunk *chunk)
  671. {
  672. const struct svc_rdma_segment *segment;
  673. int ret;
  674. ret = -EINVAL;
  675. pcl_for_each_segment(segment, chunk) {
  676. ret = svc_rdma_build_read_segment(info, segment);
  677. if (ret < 0)
  678. break;
  679. info->ri_totalbytes += segment->rs_length;
  680. }
  681. return ret;
  682. }
  683. /**
  684. * svc_rdma_copy_inline_range - Copy part of the inline content into pages
  685. * @info: context for RDMA Reads
  686. * @offset: offset into the Receive buffer of region to copy
  687. * @remaining: length of region to copy
  688. *
  689. * Take a page at a time from rqstp->rq_pages and copy the inline
  690. * content from the Receive buffer into that page. Update
  691. * info->ri_pageno and info->ri_pageoff so that the next RDMA Read
  692. * result will land contiguously with the copied content.
  693. *
  694. * Return values:
  695. * %0: Inline content was successfully copied
  696. * %-EINVAL: offset or length was incorrect
  697. */
  698. static int svc_rdma_copy_inline_range(struct svc_rdma_read_info *info,
  699. unsigned int offset,
  700. unsigned int remaining)
  701. {
  702. struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
  703. unsigned char *dst, *src = head->rc_recv_buf;
  704. struct svc_rqst *rqstp = info->ri_rqst;
  705. unsigned int page_no, numpages;
  706. numpages = PAGE_ALIGN(info->ri_pageoff + remaining) >> PAGE_SHIFT;
  707. for (page_no = 0; page_no < numpages; page_no++) {
  708. unsigned int page_len;
  709. page_len = min_t(unsigned int, remaining,
  710. PAGE_SIZE - info->ri_pageoff);
  711. if (!info->ri_pageoff)
  712. head->rc_page_count++;
  713. dst = page_address(rqstp->rq_pages[info->ri_pageno]);
  714. memcpy(dst + info->ri_pageno, src + offset, page_len);
  715. info->ri_totalbytes += page_len;
  716. info->ri_pageoff += page_len;
  717. if (info->ri_pageoff == PAGE_SIZE) {
  718. info->ri_pageno++;
  719. info->ri_pageoff = 0;
  720. }
  721. remaining -= page_len;
  722. offset += page_len;
  723. }
  724. return -EINVAL;
  725. }
  726. /**
  727. * svc_rdma_read_multiple_chunks - Construct RDMA Reads to pull data item Read chunks
  728. * @info: context for RDMA Reads
  729. *
  730. * The chunk data lands in rqstp->rq_arg as a series of contiguous pages,
  731. * like an incoming TCP call.
  732. *
  733. * Return values:
  734. * %0: RDMA Read WQEs were successfully built
  735. * %-EINVAL: client provided too many chunks or segments,
  736. * %-ENOMEM: rdma_rw context pool was exhausted,
  737. * %-ENOTCONN: posting failed (connection is lost),
  738. * %-EIO: rdma_rw initialization failed (DMA mapping, etc).
  739. */
  740. static noinline int svc_rdma_read_multiple_chunks(struct svc_rdma_read_info *info)
  741. {
  742. struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
  743. const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
  744. struct xdr_buf *buf = &info->ri_rqst->rq_arg;
  745. struct svc_rdma_chunk *chunk, *next;
  746. unsigned int start, length;
  747. int ret;
  748. start = 0;
  749. chunk = pcl_first_chunk(pcl);
  750. length = chunk->ch_position;
  751. ret = svc_rdma_copy_inline_range(info, start, length);
  752. if (ret < 0)
  753. return ret;
  754. pcl_for_each_chunk(chunk, pcl) {
  755. ret = svc_rdma_build_read_chunk(info, chunk);
  756. if (ret < 0)
  757. return ret;
  758. next = pcl_next_chunk(pcl, chunk);
  759. if (!next)
  760. break;
  761. start += length;
  762. length = next->ch_position - info->ri_totalbytes;
  763. ret = svc_rdma_copy_inline_range(info, start, length);
  764. if (ret < 0)
  765. return ret;
  766. }
  767. start += length;
  768. length = head->rc_byte_len - start;
  769. ret = svc_rdma_copy_inline_range(info, start, length);
  770. if (ret < 0)
  771. return ret;
  772. buf->len += info->ri_totalbytes;
  773. buf->buflen += info->ri_totalbytes;
  774. buf->head[0].iov_base = page_address(info->ri_rqst->rq_pages[0]);
  775. buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
  776. buf->pages = &info->ri_rqst->rq_pages[1];
  777. buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
  778. return 0;
  779. }
  780. /**
  781. * svc_rdma_read_data_item - Construct RDMA Reads to pull data item Read chunks
  782. * @info: context for RDMA Reads
  783. *
  784. * The chunk data lands in the page list of rqstp->rq_arg.pages.
  785. *
  786. * Currently NFSD does not look at the rqstp->rq_arg.tail[0] kvec.
  787. * Therefore, XDR round-up of the Read chunk and trailing
  788. * inline content must both be added at the end of the pagelist.
  789. *
  790. * Return values:
  791. * %0: RDMA Read WQEs were successfully built
  792. * %-EINVAL: client provided too many chunks or segments,
  793. * %-ENOMEM: rdma_rw context pool was exhausted,
  794. * %-ENOTCONN: posting failed (connection is lost),
  795. * %-EIO: rdma_rw initialization failed (DMA mapping, etc).
  796. */
  797. static int svc_rdma_read_data_item(struct svc_rdma_read_info *info)
  798. {
  799. struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
  800. struct xdr_buf *buf = &info->ri_rqst->rq_arg;
  801. struct svc_rdma_chunk *chunk;
  802. unsigned int length;
  803. int ret;
  804. chunk = pcl_first_chunk(&head->rc_read_pcl);
  805. ret = svc_rdma_build_read_chunk(info, chunk);
  806. if (ret < 0)
  807. goto out;
  808. /* Split the Receive buffer between the head and tail
  809. * buffers at Read chunk's position. XDR roundup of the
  810. * chunk is not included in either the pagelist or in
  811. * the tail.
  812. */
  813. buf->tail[0].iov_base = buf->head[0].iov_base + chunk->ch_position;
  814. buf->tail[0].iov_len = buf->head[0].iov_len - chunk->ch_position;
  815. buf->head[0].iov_len = chunk->ch_position;
  816. /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
  817. *
  818. * If the client already rounded up the chunk length, the
  819. * length does not change. Otherwise, the length of the page
  820. * list is increased to include XDR round-up.
  821. *
  822. * Currently these chunks always start at page offset 0,
  823. * thus the rounded-up length never crosses a page boundary.
  824. */
  825. buf->pages = &info->ri_rqst->rq_pages[0];
  826. length = xdr_align_size(chunk->ch_length);
  827. buf->page_len = length;
  828. buf->len += length;
  829. buf->buflen += length;
  830. out:
  831. return ret;
  832. }
  833. /**
  834. * svc_rdma_read_chunk_range - Build RDMA Read WQEs for portion of a chunk
  835. * @info: context for RDMA Reads
  836. * @chunk: parsed Call chunk to pull
  837. * @offset: offset of region to pull
  838. * @length: length of region to pull
  839. *
  840. * Return values:
  841. * %0: RDMA Read WQEs were successfully built
  842. * %-EINVAL: there were not enough resources to finish
  843. * %-ENOMEM: rdma_rw context pool was exhausted,
  844. * %-ENOTCONN: posting failed (connection is lost),
  845. * %-EIO: rdma_rw initialization failed (DMA mapping, etc).
  846. */
  847. static int svc_rdma_read_chunk_range(struct svc_rdma_read_info *info,
  848. const struct svc_rdma_chunk *chunk,
  849. unsigned int offset, unsigned int length)
  850. {
  851. const struct svc_rdma_segment *segment;
  852. int ret;
  853. ret = -EINVAL;
  854. pcl_for_each_segment(segment, chunk) {
  855. struct svc_rdma_segment dummy;
  856. if (offset > segment->rs_length) {
  857. offset -= segment->rs_length;
  858. continue;
  859. }
  860. dummy.rs_handle = segment->rs_handle;
  861. dummy.rs_length = min_t(u32, length, segment->rs_length) - offset;
  862. dummy.rs_offset = segment->rs_offset + offset;
  863. ret = svc_rdma_build_read_segment(info, &dummy);
  864. if (ret < 0)
  865. break;
  866. info->ri_totalbytes += dummy.rs_length;
  867. length -= dummy.rs_length;
  868. offset = 0;
  869. }
  870. return ret;
  871. }
  872. /**
  873. * svc_rdma_read_call_chunk - Build RDMA Read WQEs to pull a Long Message
  874. * @info: context for RDMA Reads
  875. *
  876. * Return values:
  877. * %0: RDMA Read WQEs were successfully built
  878. * %-EINVAL: there were not enough resources to finish
  879. * %-ENOMEM: rdma_rw context pool was exhausted,
  880. * %-ENOTCONN: posting failed (connection is lost),
  881. * %-EIO: rdma_rw initialization failed (DMA mapping, etc).
  882. */
  883. static int svc_rdma_read_call_chunk(struct svc_rdma_read_info *info)
  884. {
  885. struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
  886. const struct svc_rdma_chunk *call_chunk =
  887. pcl_first_chunk(&head->rc_call_pcl);
  888. const struct svc_rdma_pcl *pcl = &head->rc_read_pcl;
  889. struct svc_rdma_chunk *chunk, *next;
  890. unsigned int start, length;
  891. int ret;
  892. if (pcl_is_empty(pcl))
  893. return svc_rdma_build_read_chunk(info, call_chunk);
  894. start = 0;
  895. chunk = pcl_first_chunk(pcl);
  896. length = chunk->ch_position;
  897. ret = svc_rdma_read_chunk_range(info, call_chunk, start, length);
  898. if (ret < 0)
  899. return ret;
  900. pcl_for_each_chunk(chunk, pcl) {
  901. ret = svc_rdma_build_read_chunk(info, chunk);
  902. if (ret < 0)
  903. return ret;
  904. next = pcl_next_chunk(pcl, chunk);
  905. if (!next)
  906. break;
  907. start += length;
  908. length = next->ch_position - info->ri_totalbytes;
  909. ret = svc_rdma_read_chunk_range(info, call_chunk,
  910. start, length);
  911. if (ret < 0)
  912. return ret;
  913. }
  914. start += length;
  915. length = call_chunk->ch_length - start;
  916. return svc_rdma_read_chunk_range(info, call_chunk, start, length);
  917. }
  918. /**
  919. * svc_rdma_read_special - Build RDMA Read WQEs to pull a Long Message
  920. * @info: context for RDMA Reads
  921. *
  922. * The start of the data lands in the first page just after the
  923. * Transport header, and the rest lands in rqstp->rq_arg.pages.
  924. *
  925. * Assumptions:
  926. * - A PZRC is never sent in an RDMA_MSG message, though it's
  927. * allowed by spec.
  928. *
  929. * Return values:
  930. * %0: RDMA Read WQEs were successfully built
  931. * %-EINVAL: client provided too many chunks or segments,
  932. * %-ENOMEM: rdma_rw context pool was exhausted,
  933. * %-ENOTCONN: posting failed (connection is lost),
  934. * %-EIO: rdma_rw initialization failed (DMA mapping, etc).
  935. */
  936. static noinline int svc_rdma_read_special(struct svc_rdma_read_info *info)
  937. {
  938. struct xdr_buf *buf = &info->ri_rqst->rq_arg;
  939. int ret;
  940. ret = svc_rdma_read_call_chunk(info);
  941. if (ret < 0)
  942. goto out;
  943. buf->len += info->ri_totalbytes;
  944. buf->buflen += info->ri_totalbytes;
  945. buf->head[0].iov_base = page_address(info->ri_rqst->rq_pages[0]);
  946. buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, info->ri_totalbytes);
  947. buf->pages = &info->ri_rqst->rq_pages[1];
  948. buf->page_len = info->ri_totalbytes - buf->head[0].iov_len;
  949. out:
  950. return ret;
  951. }
  952. /**
  953. * svc_rdma_process_read_list - Pull list of Read chunks from the client
  954. * @rdma: controlling RDMA transport
  955. * @rqstp: set of pages to use as Read sink buffers
  956. * @head: pages under I/O collect here
  957. *
  958. * The RPC/RDMA protocol assumes that the upper layer's XDR decoders
  959. * pull each Read chunk as they decode an incoming RPC message.
  960. *
  961. * On Linux, however, the server needs to have a fully-constructed RPC
  962. * message in rqstp->rq_arg when there is a positive return code from
  963. * ->xpo_recvfrom. So the Read list is safety-checked immediately when
  964. * it is received, then here the whole Read list is pulled all at once.
  965. * The ingress RPC message is fully reconstructed once all associated
  966. * RDMA Reads have completed.
  967. *
  968. * Return values:
  969. * %1: all needed RDMA Reads were posted successfully,
  970. * %-EINVAL: client provided too many chunks or segments,
  971. * %-ENOMEM: rdma_rw context pool was exhausted,
  972. * %-ENOTCONN: posting failed (connection is lost),
  973. * %-EIO: rdma_rw initialization failed (DMA mapping, etc).
  974. */
  975. int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
  976. struct svc_rqst *rqstp,
  977. struct svc_rdma_recv_ctxt *head)
  978. {
  979. struct svc_rdma_read_info *info;
  980. struct svc_rdma_chunk_ctxt *cc;
  981. int ret;
  982. info = svc_rdma_read_info_alloc(rdma);
  983. if (!info)
  984. return -ENOMEM;
  985. cc = &info->ri_cc;
  986. info->ri_rqst = rqstp;
  987. info->ri_readctxt = head;
  988. info->ri_pageno = 0;
  989. info->ri_pageoff = 0;
  990. info->ri_totalbytes = 0;
  991. if (pcl_is_empty(&head->rc_call_pcl)) {
  992. if (head->rc_read_pcl.cl_count == 1)
  993. ret = svc_rdma_read_data_item(info);
  994. else
  995. ret = svc_rdma_read_multiple_chunks(info);
  996. } else
  997. ret = svc_rdma_read_special(info);
  998. if (ret < 0)
  999. goto out_err;
  1000. trace_svcrdma_post_read_chunk(&cc->cc_cid, cc->cc_sqecount);
  1001. init_completion(&cc->cc_done);
  1002. ret = svc_rdma_post_chunk_ctxt(cc);
  1003. if (ret < 0)
  1004. goto out_err;
  1005. ret = 1;
  1006. wait_for_completion(&cc->cc_done);
  1007. if (cc->cc_status != IB_WC_SUCCESS)
  1008. ret = -EIO;
  1009. /* rq_respages starts after the last arg page */
  1010. rqstp->rq_respages = &rqstp->rq_pages[head->rc_page_count];
  1011. rqstp->rq_next_page = rqstp->rq_respages + 1;
  1012. /* Ensure svc_rdma_recv_ctxt_put() does not try to release pages */
  1013. head->rc_page_count = 0;
  1014. out_err:
  1015. svc_rdma_read_info_free(info);
  1016. return ret;
  1017. }