Merge branch 'for-3.8' of git://linux-nfs.org/~bfields/linux
Pull nfsd update from Bruce Fields: "Included this time: - more nfsd containerization work from Stanislav Kinsbursky: we're not quite there yet, but should be by 3.9. - NFSv4.1 progress: implementation of basic backchannel security negotiation and the mandatory BACKCHANNEL_CTL operation. See http://wiki.linux-nfs.org/wiki/index.php/Server_4.0_and_4.1_issues for remaining TODO's - Fixes for some bugs that could be triggered by unusual compounds. Our xdr code wasn't designed with v4 compounds in mind, and it shows. A more thorough rewrite is still a todo. - If you've ever seen "RPC: multiple fragments per record not supported" logged while using some sort of odd userland NFS client, that should now be fixed. - Further work from Jeff Layton on our mechanism for storing information about NFSv4 clients across reboots. - Further work from Bryan Schumaker on his fault-injection mechanism (which allows us to discard selective NFSv4 state, to excercise rarely-taken recovery code paths in the client.) - The usual mix of miscellaneous bugs and cleanup. Thanks to everyone who tested or contributed this cycle." * 'for-3.8' of git://linux-nfs.org/~bfields/linux: (111 commits) nfsd4: don't leave freed stateid hashed nfsd4: free_stateid can use the current stateid nfsd4: cleanup: replace rq_resused count by rq_next_page pointer nfsd: warn on odd reply state in nfsd_vfs_read nfsd4: fix oops on unusual readlike compound nfsd4: disable zero-copy on non-final read ops svcrpc: fix some printks NFSD: Correct the size calculation in fault_inject_write NFSD: Pass correct buffer size to rpc_ntop nfsd: pass proper net to nfsd_destroy() from NFSd kthreads nfsd: simplify service shutdown nfsd: replace boolean nfsd_up flag by users counter nfsd: simplify NFSv4 state init and shutdown nfsd: introduce helpers for generic resources init and shutdown nfsd: make NFSd service structure allocated per net nfsd: make NFSd service boot time per-net nfsd: per-net NFSd up flag introduced nfsd: move per-net startup code to separated function nfsd: pass net to __write_ports() and down nfsd: pass net to nfsd_set_nrthreads() ...
This commit is contained in:
@@ -23,7 +23,6 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <net/ipv6.h>
|
||||
|
||||
#include <linux/sunrpc/clnt.h>
|
||||
|
@@ -20,7 +20,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nsproxy.h>
|
||||
|
||||
#include <linux/sunrpc/types.h>
|
||||
#include <linux/sunrpc/xdr.h>
|
||||
@@ -1041,7 +1040,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
|
||||
}
|
||||
|
||||
/*
|
||||
* Printk the given error with the address of the client that caused it.
|
||||
* dprintk the given error with the address of the client that caused it.
|
||||
*/
|
||||
static __printf(2, 3)
|
||||
void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
|
||||
@@ -1055,8 +1054,7 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
net_warn_ratelimited("svc: %s: %pV",
|
||||
svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
|
||||
dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
@@ -1305,7 +1303,7 @@ svc_process(struct svc_rqst *rqstp)
|
||||
* Setup response xdr_buf.
|
||||
* Initially it has just one page
|
||||
*/
|
||||
rqstp->rq_resused = 1;
|
||||
rqstp->rq_next_page = &rqstp->rq_respages[1];
|
||||
resv->iov_base = page_address(rqstp->rq_respages[0]);
|
||||
resv->iov_len = 0;
|
||||
rqstp->rq_res.pages = rqstp->rq_respages + 1;
|
||||
|
@@ -605,6 +605,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
|
||||
rqstp->rq_respages = rqstp->rq_pages + 1 +
|
||||
DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
|
||||
}
|
||||
rqstp->rq_next_page = rqstp->rq_respages+1;
|
||||
|
||||
if (serv->sv_stats)
|
||||
serv->sv_stats->netudpcnt++;
|
||||
@@ -878,9 +879,9 @@ static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst
|
||||
{
|
||||
unsigned int i, len, npages;
|
||||
|
||||
if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
|
||||
if (svsk->sk_datalen == 0)
|
||||
return 0;
|
||||
len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
|
||||
len = svsk->sk_datalen;
|
||||
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (rqstp->rq_pages[i] != NULL)
|
||||
@@ -897,9 +898,9 @@ static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
||||
{
|
||||
unsigned int i, len, npages;
|
||||
|
||||
if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
|
||||
if (svsk->sk_datalen == 0)
|
||||
return;
|
||||
len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
|
||||
len = svsk->sk_datalen;
|
||||
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
for (i = 0; i < npages; i++) {
|
||||
svsk->sk_pages[i] = rqstp->rq_pages[i];
|
||||
@@ -911,9 +912,9 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk)
|
||||
{
|
||||
unsigned int i, len, npages;
|
||||
|
||||
if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
|
||||
if (svsk->sk_datalen == 0)
|
||||
goto out;
|
||||
len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
|
||||
len = svsk->sk_datalen;
|
||||
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
for (i = 0; i < npages; i++) {
|
||||
BUG_ON(svsk->sk_pages[i] == NULL);
|
||||
@@ -922,13 +923,12 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk)
|
||||
}
|
||||
out:
|
||||
svsk->sk_tcplen = 0;
|
||||
svsk->sk_datalen = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Receive data.
|
||||
* Receive fragment record header.
|
||||
* If we haven't gotten the record length yet, get the next four bytes.
|
||||
* Otherwise try to gobble up as much as possible up to the complete
|
||||
* record length.
|
||||
*/
|
||||
static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
||||
{
|
||||
@@ -954,32 +954,16 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
svsk->sk_reclen = ntohl(svsk->sk_reclen);
|
||||
if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) {
|
||||
/* FIXME: technically, a record can be fragmented,
|
||||
* and non-terminal fragments will not have the top
|
||||
* bit set in the fragment length header.
|
||||
* But apparently no known nfs clients send fragmented
|
||||
* records. */
|
||||
net_notice_ratelimited("RPC: multiple fragments per record not supported\n");
|
||||
goto err_delete;
|
||||
}
|
||||
|
||||
svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
|
||||
dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
|
||||
if (svsk->sk_reclen > serv->sv_max_mesg) {
|
||||
net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n",
|
||||
(unsigned long)svsk->sk_reclen);
|
||||
dprintk("svc: TCP record, %d bytes\n", svc_sock_reclen(svsk));
|
||||
if (svc_sock_reclen(svsk) + svsk->sk_datalen >
|
||||
serv->sv_max_mesg) {
|
||||
net_notice_ratelimited("RPC: fragment too large: %d\n",
|
||||
svc_sock_reclen(svsk));
|
||||
goto err_delete;
|
||||
}
|
||||
}
|
||||
|
||||
if (svsk->sk_reclen < 8)
|
||||
goto err_delete; /* client is nuts. */
|
||||
|
||||
len = svsk->sk_reclen;
|
||||
|
||||
return len;
|
||||
return svc_sock_reclen(svsk);
|
||||
error:
|
||||
dprintk("RPC: TCP recv_record got %d\n", len);
|
||||
return len;
|
||||
@@ -1023,7 +1007,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
||||
if (dst->iov_len < src->iov_len)
|
||||
return -EAGAIN; /* whatever; just giving up. */
|
||||
memcpy(dst->iov_base, src->iov_base, src->iov_len);
|
||||
xprt_complete_rqst(req->rq_task, svsk->sk_reclen);
|
||||
xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
|
||||
rqstp->rq_arg.len = 0;
|
||||
return 0;
|
||||
}
|
||||
@@ -1042,6 +1026,17 @@ static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
|
||||
return i;
|
||||
}
|
||||
|
||||
static void svc_tcp_fragment_received(struct svc_sock *svsk)
|
||||
{
|
||||
/* If we have more data, signal svc_xprt_enqueue() to try again */
|
||||
if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
|
||||
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
||||
dprintk("svc: TCP %s record (%d bytes)\n",
|
||||
svc_sock_final_rec(svsk) ? "final" : "nonfinal",
|
||||
svc_sock_reclen(svsk));
|
||||
svsk->sk_tcplen = 0;
|
||||
svsk->sk_reclen = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Receive data from a TCP socket.
|
||||
@@ -1068,29 +1063,39 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
|
||||
goto error;
|
||||
|
||||
base = svc_tcp_restore_pages(svsk, rqstp);
|
||||
want = svsk->sk_reclen - base;
|
||||
want = svc_sock_reclen(svsk) - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
|
||||
|
||||
vec = rqstp->rq_vec;
|
||||
|
||||
pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0],
|
||||
svsk->sk_reclen);
|
||||
svsk->sk_datalen + want);
|
||||
|
||||
rqstp->rq_respages = &rqstp->rq_pages[pnum];
|
||||
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
||||
|
||||
/* Now receive data */
|
||||
len = svc_partial_recvfrom(rqstp, vec, pnum, want, base);
|
||||
if (len >= 0)
|
||||
if (len >= 0) {
|
||||
svsk->sk_tcplen += len;
|
||||
if (len != want) {
|
||||
svsk->sk_datalen += len;
|
||||
}
|
||||
if (len != want || !svc_sock_final_rec(svsk)) {
|
||||
svc_tcp_save_pages(svsk, rqstp);
|
||||
if (len < 0 && len != -EAGAIN)
|
||||
goto err_other;
|
||||
dprintk("svc: incomplete TCP record (%d of %d)\n",
|
||||
svsk->sk_tcplen, svsk->sk_reclen);
|
||||
goto err_delete;
|
||||
if (len == want)
|
||||
svc_tcp_fragment_received(svsk);
|
||||
else
|
||||
dprintk("svc: incomplete TCP record (%d of %d)\n",
|
||||
(int)(svsk->sk_tcplen - sizeof(rpc_fraghdr)),
|
||||
svc_sock_reclen(svsk));
|
||||
goto err_noclose;
|
||||
}
|
||||
|
||||
rqstp->rq_arg.len = svsk->sk_reclen;
|
||||
if (svc_sock_reclen(svsk) < 8)
|
||||
goto err_delete; /* client is nuts. */
|
||||
|
||||
rqstp->rq_arg.len = svsk->sk_datalen;
|
||||
rqstp->rq_arg.page_base = 0;
|
||||
if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
|
||||
rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
|
||||
@@ -1107,11 +1112,8 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
|
||||
len = receive_cb_reply(svsk, rqstp);
|
||||
|
||||
/* Reset TCP read info */
|
||||
svsk->sk_reclen = 0;
|
||||
svsk->sk_tcplen = 0;
|
||||
/* If we have more data, signal svc_xprt_enqueue() to try again */
|
||||
if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
|
||||
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
||||
svsk->sk_datalen = 0;
|
||||
svc_tcp_fragment_received(svsk);
|
||||
|
||||
if (len < 0)
|
||||
goto error;
|
||||
@@ -1120,15 +1122,14 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
|
||||
if (serv->sv_stats)
|
||||
serv->sv_stats->nettcpcnt++;
|
||||
|
||||
dprintk("svc: TCP complete record (%d bytes)\n", rqstp->rq_arg.len);
|
||||
return rqstp->rq_arg.len;
|
||||
|
||||
error:
|
||||
if (len != -EAGAIN)
|
||||
goto err_other;
|
||||
goto err_delete;
|
||||
dprintk("RPC: TCP recvfrom got EAGAIN\n");
|
||||
return 0;
|
||||
err_other:
|
||||
err_delete:
|
||||
printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
|
||||
svsk->sk_xprt.xpt_server->sv_name, -len);
|
||||
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
|
||||
@@ -1305,6 +1306,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
|
||||
|
||||
svsk->sk_reclen = 0;
|
||||
svsk->sk_tcplen = 0;
|
||||
svsk->sk_datalen = 0;
|
||||
memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
|
||||
|
||||
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
|
||||
|
@@ -521,11 +521,11 @@ next_sge:
|
||||
rqstp->rq_pages[ch_no] = NULL;
|
||||
|
||||
/*
|
||||
* Detach res pages. svc_release must see a resused count of
|
||||
* zero or it will attempt to put them.
|
||||
* Detach res pages. If svc_release sees any it will attempt to
|
||||
* put them.
|
||||
*/
|
||||
while (rqstp->rq_resused)
|
||||
rqstp->rq_respages[--rqstp->rq_resused] = NULL;
|
||||
while (rqstp->rq_next_page != rqstp->rq_respages)
|
||||
*(--rqstp->rq_next_page) = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -550,7 +550,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
|
||||
|
||||
/* rq_respages starts after the last arg page */
|
||||
rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
|
||||
rqstp->rq_resused = 0;
|
||||
rqstp->rq_next_page = &rqstp->rq_arg.pages[page_no];
|
||||
|
||||
/* Rebuild rq_arg head and tail. */
|
||||
rqstp->rq_arg.head[0] = head->arg.head[0];
|
||||
|
@@ -548,6 +548,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
|
||||
int sge_no;
|
||||
int sge_bytes;
|
||||
int page_no;
|
||||
int pages;
|
||||
int ret;
|
||||
|
||||
/* Post a recv buffer to handle another request. */
|
||||
@@ -611,7 +612,8 @@ static int send_reply(struct svcxprt_rdma *rdma,
|
||||
* respages array. They are our pages until the I/O
|
||||
* completes.
|
||||
*/
|
||||
for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
|
||||
pages = rqstp->rq_next_page - rqstp->rq_respages;
|
||||
for (page_no = 0; page_no < pages; page_no++) {
|
||||
ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
|
||||
ctxt->count++;
|
||||
rqstp->rq_respages[page_no] = NULL;
|
||||
|
Reference in New Issue
Block a user