xprt_rdma.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
  2. /*
  3. * Copyright (c) 2014-2017 Oracle. All rights reserved.
  4. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the BSD-type
  10. * license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. *
  16. * Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * Neither the name of the Network Appliance, Inc. nor the names of
  25. * its contributors may be used to endorse or promote products
  26. * derived from this software without specific prior written
  27. * permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  30. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  31. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  32. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  33. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  34. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  35. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  36. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  37. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  38. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  39. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  40. */
  41. #ifndef _LINUX_SUNRPC_XPRT_RDMA_H
  42. #define _LINUX_SUNRPC_XPRT_RDMA_H
  43. #include <linux/wait.h> /* wait_queue_head_t, etc */
  44. #include <linux/spinlock.h> /* spinlock_t, etc */
  45. #include <linux/atomic.h> /* atomic_t, etc */
  46. #include <linux/kref.h> /* struct kref */
  47. #include <linux/workqueue.h> /* struct work_struct */
  48. #include <linux/llist.h>
  49. #include <rdma/rdma_cm.h> /* RDMA connection api */
  50. #include <rdma/ib_verbs.h> /* RDMA verbs api */
  51. #include <linux/sunrpc/clnt.h> /* rpc_xprt */
  52. #include <linux/sunrpc/rpc_rdma_cid.h> /* completion IDs */
  53. #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */
  54. #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */
  55. #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */
  56. #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */
  57. #define RPCRDMA_BIND_TO (60U * HZ)
  58. #define RPCRDMA_INIT_REEST_TO (5U * HZ)
  59. #define RPCRDMA_MAX_REEST_TO (30U * HZ)
  60. #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
  61. /*
  62. * RDMA Endpoint -- connection endpoint details
  63. */
  64. struct rpcrdma_mr;
  65. struct rpcrdma_ep {
  66. struct kref re_kref;
  67. struct rdma_cm_id *re_id;
  68. struct ib_pd *re_pd;
  69. unsigned int re_max_rdma_segs;
  70. unsigned int re_max_fr_depth;
  71. struct rpcrdma_mr *re_write_pad_mr;
  72. enum ib_mr_type re_mrtype;
  73. struct completion re_done;
  74. unsigned int re_send_count;
  75. unsigned int re_send_batch;
  76. unsigned int re_max_inline_send;
  77. unsigned int re_max_inline_recv;
  78. int re_async_rc;
  79. int re_connect_status;
  80. atomic_t re_receiving;
  81. atomic_t re_force_disconnect;
  82. struct ib_qp_init_attr re_attr;
  83. wait_queue_head_t re_connect_wait;
  84. struct rpc_xprt *re_xprt;
  85. struct rpcrdma_connect_private
  86. re_cm_private;
  87. struct rdma_conn_param re_remote_cma;
  88. int re_receive_count;
  89. unsigned int re_max_requests; /* depends on device */
  90. unsigned int re_inline_send; /* negotiated */
  91. unsigned int re_inline_recv; /* negotiated */
  92. atomic_t re_completion_ids;
  93. char re_write_pad[XDR_UNIT];
  94. };
  95. /* Pre-allocate extra Work Requests for handling reverse-direction
  96. * Receives and Sends. This is a fixed value because the Work Queues
  97. * are allocated when the forward channel is set up, long before the
  98. * backchannel is provisioned. This value is two times
  99. * NFS4_DEF_CB_SLOT_TABLE_SIZE.
  100. */
  101. #if defined(CONFIG_SUNRPC_BACKCHANNEL)
  102. #define RPCRDMA_BACKWARD_WRS (32)
  103. #else
  104. #define RPCRDMA_BACKWARD_WRS (0)
  105. #endif
  106. /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
  107. */
  108. struct rpcrdma_regbuf {
  109. struct ib_sge rg_iov;
  110. struct ib_device *rg_device;
  111. enum dma_data_direction rg_direction;
  112. void *rg_data;
  113. };
  114. static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb)
  115. {
  116. return rb->rg_iov.addr;
  117. }
  118. static inline u32 rdmab_length(struct rpcrdma_regbuf *rb)
  119. {
  120. return rb->rg_iov.length;
  121. }
  122. static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb)
  123. {
  124. return rb->rg_iov.lkey;
  125. }
  126. static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb)
  127. {
  128. return rb->rg_device;
  129. }
  130. static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
  131. {
  132. return rb->rg_data;
  133. }
  134. /* Do not use emergency memory reserves, and fail quickly if memory
  135. * cannot be allocated easily. These flags may be used wherever there
  136. * is robust logic to handle a failure to allocate.
  137. */
  138. #define XPRTRDMA_GFP_FLAGS (__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
  139. /* To ensure a transport can always make forward progress,
  140. * the number of RDMA segments allowed in header chunk lists
  141. * is capped at 16. This prevents less-capable devices from
  142. * overrunning the Send buffer while building chunk lists.
  143. *
  144. * Elements of the Read list take up more room than the
  145. * Write list or Reply chunk. 16 read segments means the
  146. * chunk lists cannot consume more than
  147. *
  148. * ((16 + 2) * read segment size) + 1 XDR words,
  149. *
  150. * or about 400 bytes. The fixed part of the header is
  151. * another 24 bytes. Thus when the inline threshold is
  152. * 1024 bytes, at least 600 bytes are available for RPC
  153. * message bodies.
  154. */
  155. enum {
  156. RPCRDMA_MAX_HDR_SEGS = 16,
  157. };
  158. /*
  159. * struct rpcrdma_rep -- this structure encapsulates state required
  160. * to receive and complete an RPC Reply, asychronously. It needs
  161. * several pieces of state:
  162. *
  163. * o receive buffer and ib_sge (donated to provider)
  164. * o status of receive (success or not, length, inv rkey)
  165. * o bookkeeping state to get run by reply handler (XDR stream)
  166. *
  167. * These structures are allocated during transport initialization.
  168. * N of these are associated with a transport instance, managed by
  169. * struct rpcrdma_buffer. N is the max number of outstanding RPCs.
  170. */
  171. struct rpcrdma_rep {
  172. struct ib_cqe rr_cqe;
  173. struct rpc_rdma_cid rr_cid;
  174. __be32 rr_xid;
  175. __be32 rr_vers;
  176. __be32 rr_proc;
  177. int rr_wc_flags;
  178. u32 rr_inv_rkey;
  179. bool rr_temp;
  180. struct rpcrdma_regbuf *rr_rdmabuf;
  181. struct rpcrdma_xprt *rr_rxprt;
  182. struct rpc_rqst *rr_rqst;
  183. struct xdr_buf rr_hdrbuf;
  184. struct xdr_stream rr_stream;
  185. struct llist_node rr_node;
  186. struct ib_recv_wr rr_recv_wr;
  187. struct list_head rr_all;
  188. };
  189. /* To reduce the rate at which a transport invokes ib_post_recv
  190. * (and thus the hardware doorbell rate), xprtrdma posts Receive
  191. * WRs in batches.
  192. *
  193. * Setting this to zero disables Receive post batching.
  194. */
  195. enum {
  196. RPCRDMA_MAX_RECV_BATCH = 7,
  197. };
  198. /* struct rpcrdma_sendctx - DMA mapped SGEs to unmap after Send completes
  199. */
  200. struct rpcrdma_req;
  201. struct rpcrdma_sendctx {
  202. struct ib_cqe sc_cqe;
  203. struct rpc_rdma_cid sc_cid;
  204. struct rpcrdma_req *sc_req;
  205. unsigned int sc_unmap_count;
  206. struct ib_sge sc_sges[];
  207. };
  208. /*
  209. * struct rpcrdma_mr - external memory region metadata
  210. *
  211. * An external memory region is any buffer or page that is registered
  212. * on the fly (ie, not pre-registered).
  213. */
  214. struct rpcrdma_req;
  215. struct rpcrdma_mr {
  216. struct list_head mr_list;
  217. struct rpcrdma_req *mr_req;
  218. struct ib_mr *mr_ibmr;
  219. struct ib_device *mr_device;
  220. struct scatterlist *mr_sg;
  221. int mr_nents;
  222. enum dma_data_direction mr_dir;
  223. struct ib_cqe mr_cqe;
  224. struct completion mr_linv_done;
  225. union {
  226. struct ib_reg_wr mr_regwr;
  227. struct ib_send_wr mr_invwr;
  228. };
  229. struct rpcrdma_xprt *mr_xprt;
  230. u32 mr_handle;
  231. u32 mr_length;
  232. u64 mr_offset;
  233. struct list_head mr_all;
  234. struct rpc_rdma_cid mr_cid;
  235. };
  236. /*
  237. * struct rpcrdma_req -- structure central to the request/reply sequence.
  238. *
  239. * N of these are associated with a transport instance, and stored in
  240. * struct rpcrdma_buffer. N is the max number of outstanding requests.
  241. *
  242. * It includes pre-registered buffer memory for send AND recv.
  243. * The recv buffer, however, is not owned by this structure, and
  244. * is "donated" to the hardware when a recv is posted. When a
  245. * reply is handled, the recv buffer used is given back to the
  246. * struct rpcrdma_req associated with the request.
  247. *
  248. * In addition to the basic memory, this structure includes an array
  249. * of iovs for send operations. The reason is that the iovs passed to
  250. * ib_post_{send,recv} must not be modified until the work request
  251. * completes.
  252. */
  253. /* Maximum number of page-sized "segments" per chunk list to be
  254. * registered or invalidated. Must handle a Reply chunk:
  255. */
  256. enum {
  257. RPCRDMA_MAX_IOV_SEGS = 3,
  258. RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
  259. RPCRDMA_MAX_SEGS = RPCRDMA_MAX_DATA_SEGS +
  260. RPCRDMA_MAX_IOV_SEGS,
  261. };
  262. /* Arguments for DMA mapping and registration */
  263. struct rpcrdma_mr_seg {
  264. u32 mr_len; /* length of segment */
  265. struct page *mr_page; /* underlying struct page */
  266. u64 mr_offset; /* IN: page offset, OUT: iova */
  267. };
  268. /* The Send SGE array is provisioned to send a maximum size
  269. * inline request:
  270. * - RPC-over-RDMA header
  271. * - xdr_buf head iovec
  272. * - RPCRDMA_MAX_INLINE bytes, in pages
  273. * - xdr_buf tail iovec
  274. *
  275. * The actual number of array elements consumed by each RPC
  276. * depends on the device's max_sge limit.
  277. */
  278. enum {
  279. RPCRDMA_MIN_SEND_SGES = 3,
  280. RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT,
  281. RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
  282. };
  283. struct rpcrdma_buffer;
  284. struct rpcrdma_req {
  285. struct list_head rl_list;
  286. struct rpc_rqst rl_slot;
  287. struct rpcrdma_rep *rl_reply;
  288. struct xdr_stream rl_stream;
  289. struct xdr_buf rl_hdrbuf;
  290. struct ib_send_wr rl_wr;
  291. struct rpcrdma_sendctx *rl_sendctx;
  292. struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */
  293. struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */
  294. struct rpcrdma_regbuf *rl_recvbuf; /* rq_rcv_buf */
  295. struct list_head rl_all;
  296. struct kref rl_kref;
  297. struct list_head rl_free_mrs;
  298. struct list_head rl_registered;
  299. struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
  300. };
  301. static inline struct rpcrdma_req *
  302. rpcr_to_rdmar(const struct rpc_rqst *rqst)
  303. {
  304. return container_of(rqst, struct rpcrdma_req, rl_slot);
  305. }
  306. static inline void
  307. rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
  308. {
  309. list_add(&mr->mr_list, list);
  310. }
  311. static inline struct rpcrdma_mr *
  312. rpcrdma_mr_pop(struct list_head *list)
  313. {
  314. struct rpcrdma_mr *mr;
  315. mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list);
  316. if (mr)
  317. list_del_init(&mr->mr_list);
  318. return mr;
  319. }
  320. /*
  321. * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
  322. * inline requests/replies, and client/server credits.
  323. *
  324. * One of these is associated with a transport instance
  325. */
  326. struct rpcrdma_buffer {
  327. spinlock_t rb_lock;
  328. struct list_head rb_send_bufs;
  329. struct list_head rb_mrs;
  330. unsigned long rb_sc_head;
  331. unsigned long rb_sc_tail;
  332. unsigned long rb_sc_last;
  333. struct rpcrdma_sendctx **rb_sc_ctxs;
  334. struct list_head rb_allreqs;
  335. struct list_head rb_all_mrs;
  336. struct list_head rb_all_reps;
  337. struct llist_head rb_free_reps;
  338. __be32 rb_max_requests;
  339. u32 rb_credits; /* most recent credit grant */
  340. u32 rb_bc_srv_max_requests;
  341. u32 rb_bc_max_requests;
  342. struct work_struct rb_refresh_worker;
  343. };
  344. /*
  345. * Statistics for RPCRDMA
  346. */
  347. struct rpcrdma_stats {
  348. /* accessed when sending a call */
  349. unsigned long read_chunk_count;
  350. unsigned long write_chunk_count;
  351. unsigned long reply_chunk_count;
  352. unsigned long long total_rdma_request;
  353. /* rarely accessed error counters */
  354. unsigned long long pullup_copy_count;
  355. unsigned long hardway_register_count;
  356. unsigned long failed_marshal_count;
  357. unsigned long bad_reply_count;
  358. unsigned long mrs_recycled;
  359. unsigned long mrs_orphaned;
  360. unsigned long mrs_allocated;
  361. unsigned long empty_sendctx_q;
  362. /* accessed when receiving a reply */
  363. unsigned long long total_rdma_reply;
  364. unsigned long long fixup_copy_count;
  365. unsigned long reply_waits_for_send;
  366. unsigned long local_inv_needed;
  367. unsigned long nomsg_call_count;
  368. unsigned long bcall_count;
  369. };
  370. /*
  371. * RPCRDMA transport -- encapsulates the structures above for
  372. * integration with RPC.
  373. *
  374. * The contained structures are embedded, not pointers,
  375. * for convenience. This structure need not be visible externally.
  376. *
  377. * It is allocated and initialized during mount, and released
  378. * during unmount.
  379. */
  380. struct rpcrdma_xprt {
  381. struct rpc_xprt rx_xprt;
  382. struct rpcrdma_ep *rx_ep;
  383. struct rpcrdma_buffer rx_buf;
  384. struct delayed_work rx_connect_worker;
  385. struct rpc_timeout rx_timeout;
  386. struct rpcrdma_stats rx_stats;
  387. };
  388. #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
  389. static inline const char *
  390. rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt)
  391. {
  392. return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR];
  393. }
  394. static inline const char *
  395. rpcrdma_portstr(const struct rpcrdma_xprt *r_xprt)
  396. {
  397. return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_PORT];
  398. }
  399. /* Setting this to 0 ensures interoperability with early servers.
  400. * Setting this to 1 enhances certain unaligned read/write performance.
  401. * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
  402. extern int xprt_rdma_pad_optimize;
  403. /* This setting controls the hunt for a supported memory
  404. * registration strategy.
  405. */
  406. extern unsigned int xprt_rdma_memreg_strategy;
  407. /*
  408. * Endpoint calls - xprtrdma/verbs.c
  409. */
  410. void rpcrdma_force_disconnect(struct rpcrdma_ep *ep);
  411. void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
  412. int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
  413. void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
  414. void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
  415. /*
  416. * Buffer calls - xprtrdma/verbs.c
  417. */
  418. struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt,
  419. size_t size);
  420. int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
  421. void rpcrdma_req_destroy(struct rpcrdma_req *req);
  422. int rpcrdma_buffer_create(struct rpcrdma_xprt *);
  423. void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
  424. struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt);
  425. struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
  426. void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt);
  427. struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
  428. void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
  429. struct rpcrdma_req *req);
  430. void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep);
  431. void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
  432. bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
  433. gfp_t flags);
  434. bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
  435. struct rpcrdma_regbuf *rb);
  436. /**
  437. * rpcrdma_regbuf_is_mapped - check if buffer is DMA mapped
  438. *
  439. * Returns true if the buffer is now mapped to rb->rg_device.
  440. */
  441. static inline bool rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
  442. {
  443. return rb->rg_device != NULL;
  444. }
  445. /**
  446. * rpcrdma_regbuf_dma_map - DMA-map a regbuf
  447. * @r_xprt: controlling transport instance
  448. * @rb: regbuf to be mapped
  449. *
  450. * Returns true if the buffer is currently DMA mapped.
  451. */
  452. static inline bool rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
  453. struct rpcrdma_regbuf *rb)
  454. {
  455. if (likely(rpcrdma_regbuf_is_mapped(rb)))
  456. return true;
  457. return __rpcrdma_regbuf_dma_map(r_xprt, rb);
  458. }
  459. /*
  460. * Wrappers for chunk registration, shared by read/write chunk code.
  461. */
  462. static inline enum dma_data_direction
  463. rpcrdma_data_dir(bool writing)
  464. {
  465. return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  466. }
  467. /* Memory registration calls xprtrdma/frwr_ops.c
  468. */
  469. void frwr_reset(struct rpcrdma_req *req);
  470. int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device);
  471. int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr);
  472. void frwr_mr_release(struct rpcrdma_mr *mr);
  473. struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
  474. struct rpcrdma_mr_seg *seg,
  475. int nsegs, bool writing, __be32 xid,
  476. struct rpcrdma_mr *mr);
  477. int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
  478. void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
  479. void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
  480. void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
  481. int frwr_wp_create(struct rpcrdma_xprt *r_xprt);
  482. /*
  483. * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  484. */
  485. enum rpcrdma_chunktype {
  486. rpcrdma_noch = 0,
  487. rpcrdma_noch_pullup,
  488. rpcrdma_noch_mapped,
  489. rpcrdma_readch,
  490. rpcrdma_areadch,
  491. rpcrdma_writech,
  492. rpcrdma_replych
  493. };
  494. int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
  495. struct rpcrdma_req *req, u32 hdrlen,
  496. struct xdr_buf *xdr,
  497. enum rpcrdma_chunktype rtype);
  498. void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc);
  499. int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
  500. void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep);
  501. void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt);
  502. void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
  503. void rpcrdma_unpin_rqst(struct rpcrdma_rep *rep);
  504. void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
  505. static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
  506. {
  507. xdr->head[0].iov_len = len;
  508. xdr->len = len;
  509. }
  510. /* RPC/RDMA module init - xprtrdma/transport.c
  511. */
  512. extern unsigned int xprt_rdma_max_inline_read;
  513. extern unsigned int xprt_rdma_max_inline_write;
  514. void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
  515. void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
  516. void xprt_rdma_close(struct rpc_xprt *xprt);
  517. void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
  518. int xprt_rdma_init(void);
  519. void xprt_rdma_cleanup(void);
  520. /* Backchannel calls - xprtrdma/backchannel.c
  521. */
  522. #if defined(CONFIG_SUNRPC_BACKCHANNEL)
  523. int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
  524. size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
  525. unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *);
  526. int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
  527. void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
  528. int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);
  529. void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
  530. void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
  531. #endif /* CONFIG_SUNRPC_BACKCHANNEL */
  532. extern struct xprt_class xprt_rdma_bc;
  533. #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */