Merge tag 'nfsd-4.15' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields: "Lots of good bugfixes, including: - fix a number of races in the NFSv4+ state code - fix some shutdown crashes in multiple-network-namespace cases - relax our 4.1 session limits; if you've an artificially low limit to the number of 4.1 clients that can mount simultaneously, try upgrading" * tag 'nfsd-4.15' of git://linux-nfs.org/~bfields/linux: (22 commits) SUNRPC: Improve ordering of transport processing nfsd: deal with revoked delegations appropriately svcrdma: Enqueue after setting XPT_CLOSE in completion handlers nfsd: use nfs->ns.inum as net ID rpc: remove some BUG()s svcrdma: Preserve CB send buffer across retransmits nfds: avoid gettimeofday for nfssvc_boot time fs, nfsd: convert nfs4_file.fi_ref from atomic_t to refcount_t fs, nfsd: convert nfs4_cntl_odstate.co_odcount from atomic_t to refcount_t fs, nfsd: convert nfs4_stid.sc_count from atomic_t to refcount_t lockd: double unregister of inetaddr notifiers nfsd4: catch some false session retries nfsd4: fix cached replies to solo SEQUENCE compounds sunrcp: make function _svc_create_xprt static SUNRPC: Fix tracepoint storage issues with svc_recv and svc_rqst_status nfsd: use ARRAY_SIZE nfsd: give out fewer session slots as limit approaches nfsd: increase DRC cache limit nfsd: remove unnecessary nofilehandle checks nfs_common: convert int to bool ...
This commit is contained in:
@@ -855,11 +855,13 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
|
||||
return stat;
|
||||
if (integ_len > buf->len)
|
||||
return stat;
|
||||
if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len))
|
||||
BUG();
|
||||
if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) {
|
||||
WARN_ON_ONCE(1);
|
||||
return stat;
|
||||
}
|
||||
/* copy out mic... */
|
||||
if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
|
||||
BUG();
|
||||
return stat;
|
||||
if (mic.len > RPC_MAX_AUTH_SIZE)
|
||||
return stat;
|
||||
mic.data = kmalloc(mic.len, GFP_KERNEL);
|
||||
@@ -1611,8 +1613,10 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
|
||||
BUG_ON(integ_len % 4);
|
||||
*p++ = htonl(integ_len);
|
||||
*p++ = htonl(gc->gc_seq);
|
||||
if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len))
|
||||
BUG();
|
||||
if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) {
|
||||
WARN_ON_ONCE(1);
|
||||
goto out_err;
|
||||
}
|
||||
if (resbuf->tail[0].iov_base == NULL) {
|
||||
if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
|
||||
goto out_err;
|
||||
|
@@ -250,9 +250,9 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
|
||||
svc_xprt_received(new);
|
||||
}
|
||||
|
||||
int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
|
||||
struct net *net, const int family,
|
||||
const unsigned short port, int flags)
|
||||
static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
|
||||
struct net *net, const int family,
|
||||
const unsigned short port, int flags)
|
||||
{
|
||||
struct svc_xprt_class *xcl;
|
||||
|
||||
@@ -380,7 +380,6 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
|
||||
struct svc_pool *pool;
|
||||
struct svc_rqst *rqstp = NULL;
|
||||
int cpu;
|
||||
bool queued = false;
|
||||
|
||||
if (!svc_xprt_has_something_to_do(xprt))
|
||||
goto out;
|
||||
@@ -401,58 +400,25 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
|
||||
|
||||
atomic_long_inc(&pool->sp_stats.packets);
|
||||
|
||||
redo_search:
|
||||
dprintk("svc: transport %p put into queue\n", xprt);
|
||||
spin_lock_bh(&pool->sp_lock);
|
||||
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
|
||||
pool->sp_stats.sockets_queued++;
|
||||
spin_unlock_bh(&pool->sp_lock);
|
||||
|
||||
/* find a thread for this xprt */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
|
||||
/* Do a lockless check first */
|
||||
if (test_bit(RQ_BUSY, &rqstp->rq_flags))
|
||||
if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Once the xprt has been queued, it can only be dequeued by
|
||||
* the task that intends to service it. All we can do at that
|
||||
* point is to try to wake this thread back up so that it can
|
||||
* do so.
|
||||
*/
|
||||
if (!queued) {
|
||||
spin_lock_bh(&rqstp->rq_lock);
|
||||
if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
|
||||
/* already busy, move on... */
|
||||
spin_unlock_bh(&rqstp->rq_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* this one will do */
|
||||
rqstp->rq_xprt = xprt;
|
||||
svc_xprt_get(xprt);
|
||||
spin_unlock_bh(&rqstp->rq_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
atomic_long_inc(&pool->sp_stats.threads_woken);
|
||||
wake_up_process(rqstp->rq_task);
|
||||
put_cpu();
|
||||
goto out;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* We didn't find an idle thread to use, so we need to queue the xprt.
|
||||
* Do so and then search again. If we find one, we can't hook this one
|
||||
* up to it directly but we can wake the thread up in the hopes that it
|
||||
* will pick it up once it searches for a xprt to service.
|
||||
*/
|
||||
if (!queued) {
|
||||
queued = true;
|
||||
dprintk("svc: transport %p put into queue\n", xprt);
|
||||
spin_lock_bh(&pool->sp_lock);
|
||||
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
|
||||
pool->sp_stats.sockets_queued++;
|
||||
spin_unlock_bh(&pool->sp_lock);
|
||||
goto redo_search;
|
||||
goto out_unlock;
|
||||
}
|
||||
set_bit(SP_CONGESTED, &pool->sp_flags);
|
||||
rqstp = NULL;
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
put_cpu();
|
||||
out:
|
||||
trace_svc_xprt_do_enqueue(xprt, rqstp);
|
||||
@@ -721,38 +687,25 @@ rqst_should_sleep(struct svc_rqst *rqstp)
|
||||
|
||||
static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
|
||||
{
|
||||
struct svc_xprt *xprt;
|
||||
struct svc_pool *pool = rqstp->rq_pool;
|
||||
long time_left = 0;
|
||||
|
||||
/* rq_xprt should be clear on entry */
|
||||
WARN_ON_ONCE(rqstp->rq_xprt);
|
||||
|
||||
/* Normally we will wait up to 5 seconds for any required
|
||||
* cache information to be provided.
|
||||
*/
|
||||
rqstp->rq_chandle.thread_wait = 5*HZ;
|
||||
|
||||
xprt = svc_xprt_dequeue(pool);
|
||||
if (xprt) {
|
||||
rqstp->rq_xprt = xprt;
|
||||
|
||||
/* As there is a shortage of threads and this request
|
||||
* had to be queued, don't allow the thread to wait so
|
||||
* long for cache updates.
|
||||
*/
|
||||
rqstp->rq_chandle.thread_wait = 1*HZ;
|
||||
clear_bit(SP_TASK_PENDING, &pool->sp_flags);
|
||||
return xprt;
|
||||
}
|
||||
rqstp->rq_xprt = svc_xprt_dequeue(pool);
|
||||
if (rqstp->rq_xprt)
|
||||
goto out_found;
|
||||
|
||||
/*
|
||||
* We have to be able to interrupt this wait
|
||||
* to bring down the daemons ...
|
||||
*/
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(SP_CONGESTED, &pool->sp_flags);
|
||||
clear_bit(RQ_BUSY, &rqstp->rq_flags);
|
||||
smp_mb();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (likely(rqst_should_sleep(rqstp)))
|
||||
time_left = schedule_timeout(timeout);
|
||||
@@ -761,13 +714,11 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
|
||||
|
||||
try_to_freeze();
|
||||
|
||||
spin_lock_bh(&rqstp->rq_lock);
|
||||
set_bit(RQ_BUSY, &rqstp->rq_flags);
|
||||
spin_unlock_bh(&rqstp->rq_lock);
|
||||
|
||||
xprt = rqstp->rq_xprt;
|
||||
if (xprt != NULL)
|
||||
return xprt;
|
||||
smp_mb__after_atomic();
|
||||
rqstp->rq_xprt = svc_xprt_dequeue(pool);
|
||||
if (rqstp->rq_xprt)
|
||||
goto out_found;
|
||||
|
||||
if (!time_left)
|
||||
atomic_long_inc(&pool->sp_stats.threads_timedout);
|
||||
@@ -775,6 +726,15 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
|
||||
if (signalled() || kthread_should_stop())
|
||||
return ERR_PTR(-EINTR);
|
||||
return ERR_PTR(-EAGAIN);
|
||||
out_found:
|
||||
/* Normally we will wait up to 5 seconds for any required
|
||||
* cache information to be provided.
|
||||
*/
|
||||
if (!test_bit(SP_CONGESTED, &pool->sp_flags))
|
||||
rqstp->rq_chandle.thread_wait = 5*HZ;
|
||||
else
|
||||
rqstp->rq_chandle.thread_wait = 1*HZ;
|
||||
return rqstp->rq_xprt;
|
||||
}
|
||||
|
||||
static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
|
||||
|
@@ -133,6 +133,10 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
/* Bump page refcnt so Send completion doesn't release
|
||||
* the rq_buffer before all retransmits are complete.
|
||||
*/
|
||||
get_page(virt_to_page(rqst->rq_buffer));
|
||||
ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
@@ -165,7 +169,6 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* svc_rdma_sendto releases this page */
|
||||
page = alloc_page(RPCRDMA_DEF_GFP);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
@@ -184,6 +187,7 @@ xprt_rdma_bc_free(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_rqst *rqst = task->tk_rqstp;
|
||||
|
||||
put_page(virt_to_page(rqst->rq_buffer));
|
||||
kfree(rqst->rq_rbuffer);
|
||||
}
|
||||
|
||||
|
@@ -290,6 +290,7 @@ static void qp_event_handler(struct ib_event *event, void *context)
|
||||
ib_event_msg(event->event), event->event,
|
||||
event->element.qp);
|
||||
set_bit(XPT_CLOSE, &xprt->xpt_flags);
|
||||
svc_xprt_enqueue(xprt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -322,8 +323,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
|
||||
set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
|
||||
if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
|
||||
goto out;
|
||||
svc_xprt_enqueue(&xprt->sc_xprt);
|
||||
goto out;
|
||||
goto out_enqueue;
|
||||
|
||||
flushed:
|
||||
if (wc->status != IB_WC_WR_FLUSH_ERR)
|
||||
@@ -333,6 +333,8 @@ flushed:
|
||||
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
|
||||
svc_rdma_put_context(ctxt, 1);
|
||||
|
||||
out_enqueue:
|
||||
svc_xprt_enqueue(&xprt->sc_xprt);
|
||||
out:
|
||||
svc_xprt_put(&xprt->sc_xprt);
|
||||
}
|
||||
@@ -358,6 +360,7 @@ void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
|
||||
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
||||
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
|
||||
svc_xprt_enqueue(&xprt->sc_xprt);
|
||||
if (wc->status != IB_WC_WR_FLUSH_ERR)
|
||||
pr_err("svcrdma: Send: %s (%u/0x%x)\n",
|
||||
ib_wc_status_msg(wc->status),
|
||||
@@ -569,8 +572,10 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
|
||||
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
||||
dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
|
||||
xprt, cma_id);
|
||||
if (xprt)
|
||||
if (xprt) {
|
||||
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
|
||||
svc_xprt_enqueue(&xprt->sc_xprt);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
Reference in New Issue
Block a user