xprtrdma: Clean up sendctx functions
Minor clean-ups I've stumbled on since sendctx was merged last year. In particular, making Send completion processing more efficient appears to have a measurable impact on IOPS throughput. Note: test_and_clear_bit() returns a value, thus an explicit memory barrier is not necessary. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:

committed by
Anna Schumaker

parent
17e4c443c0
commit
dbcc53a52d
@@ -225,6 +225,7 @@ struct rpcrdma_xprt;
|
||||
struct rpcrdma_sendctx {
|
||||
struct ib_send_wr sc_wr;
|
||||
struct ib_cqe sc_cqe;
|
||||
struct ib_device *sc_device;
|
||||
struct rpcrdma_xprt *sc_xprt;
|
||||
struct rpcrdma_req *sc_req;
|
||||
unsigned int sc_unmap_count;
|
||||
@@ -536,7 +537,7 @@ struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
|
||||
void rpcrdma_req_destroy(struct rpcrdma_req *req);
|
||||
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
|
||||
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
|
||||
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
|
||||
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt);
|
||||
|
||||
struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
|
||||
void rpcrdma_mr_put(struct rpcrdma_mr *mr);
|
||||
@@ -625,7 +626,7 @@ int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
|
||||
struct rpcrdma_req *req, u32 hdrlen,
|
||||
struct xdr_buf *xdr,
|
||||
enum rpcrdma_chunktype rtype);
|
||||
void rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc);
|
||||
void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc);
|
||||
int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
|
||||
void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
|
||||
void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
|
||||
|
Reference in New Issue
Block a user