SUNRPC: More fixes for backlog congestion

commit e86be3a04bc4aeaf12f93af35f08f8d4385bcd98 upstream.

Ensure that we fix the XPRT_CONGESTED starvation issue for RDMA as well
as socket based transports.
Ensure we always initialise the request after waking up from the backlog
list.

Fixes: e877a88d1f06 ("SUNRPC in case of backlog, hand free slots directly to waiting task")
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
此提交包含在:
Trond Myklebust
2021-05-25 18:43:38 -04:00
提交者 Greg Kroah-Hartman
父節點 c2779f8ddc
當前提交 899b5131e7
共有 5 個檔案被更改,包括 52 行新增39 行删除

查看文件

@@ -1575,11 +1575,18 @@ xprt_transmit(struct rpc_task *task)
spin_unlock(&xprt->queue_lock);
}
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
static void xprt_complete_request_init(struct rpc_task *task)
{
if (task->tk_rqstp)
xprt_request_init(task);
}
void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
set_bit(XPRT_CONGESTED, &xprt->state);
rpc_sleep_on(&xprt->backlog, task, NULL);
rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
}
EXPORT_SYMBOL_GPL(xprt_add_backlog);
static bool __xprt_set_rq(struct rpc_task *task, void *data)
{
@@ -1587,14 +1594,13 @@ static bool __xprt_set_rq(struct rpc_task *task, void *data)
if (task->tk_rqstp == NULL) {
memset(req, 0, sizeof(*req)); /* mark unused */
task->tk_status = -EAGAIN;
task->tk_rqstp = req;
return true;
}
return false;
}
static bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
clear_bit(XPRT_CONGESTED, &xprt->state);
@@ -1602,6 +1608,7 @@ static bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
}
return true;
}
EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
@@ -1611,7 +1618,7 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task
goto out;
spin_lock(&xprt->reserve_lock);
if (test_bit(XPRT_CONGESTED, &xprt->state)) {
rpc_sleep_on(&xprt->backlog, task, NULL);
xprt_add_backlog(xprt, task);
ret = true;
}
spin_unlock(&xprt->reserve_lock);
@@ -1780,10 +1787,6 @@ xprt_request_init(struct rpc_task *task)
struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req = task->tk_rqstp;
if (req->rq_task)
/* Already initialized */
return;
req->rq_task = task;
req->rq_xprt = xprt;
req->rq_buffer = NULL;
@@ -1844,10 +1847,8 @@ void xprt_retry_reserve(struct rpc_task *task)
struct rpc_xprt *xprt = task->tk_xprt;
task->tk_status = 0;
if (task->tk_rqstp != NULL) {
xprt_request_init(task);
if (task->tk_rqstp != NULL)
return;
}
task->tk_status = -EAGAIN;
xprt_do_reserve(xprt, task);
@@ -1872,24 +1873,21 @@ void xprt_release(struct rpc_task *task)
}
xprt = req->rq_xprt;
if (xprt) {
xprt_request_dequeue_xprt(task);
spin_lock(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
if (xprt->ops->release_request)
xprt->ops->release_request(task);
xprt_schedule_autodisconnect(xprt);
spin_unlock(&xprt->transport_lock);
if (req->rq_buffer)
xprt->ops->buf_free(task);
xdr_free_bvec(&req->rq_rcv_buf);
xdr_free_bvec(&req->rq_snd_buf);
if (req->rq_cred != NULL)
put_rpccred(req->rq_cred);
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
} else
xprt = task->tk_xprt;
xprt_request_dequeue_xprt(task);
spin_lock(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
if (xprt->ops->release_request)
xprt->ops->release_request(task);
xprt_schedule_autodisconnect(xprt);
spin_unlock(&xprt->transport_lock);
if (req->rq_buffer)
xprt->ops->buf_free(task);
xdr_free_bvec(&req->rq_rcv_buf);
xdr_free_bvec(&req->rq_snd_buf);
if (req->rq_cred != NULL)
put_rpccred(req->rq_cred);
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
task->tk_rqstp = NULL;
if (likely(!bc_prealloc(req)))