Merge branch 'core/urgent' into x86/urgent, to pick up objtool fix
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -8,7 +8,6 @@ obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
|
||||
mod.o \
|
||||
client.o \
|
||||
error.o \
|
||||
util.o \
|
||||
protocol.o \
|
||||
trans_fd.o \
|
||||
trans_common.o \
|
||||
|
561
net/9p/client.c
561
net/9p/client.c
File diff suppressed because it is too large
Load Diff
@@ -171,11 +171,17 @@ void v9fs_put_trans(struct p9_trans_module *m)
|
||||
*/
|
||||
static int __init init_p9(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = p9_client_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
p9_error_init();
|
||||
pr_info("Installing 9P2000 support\n");
|
||||
p9_trans_fd_init();
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -188,6 +194,7 @@ static void __exit exit_p9(void)
|
||||
pr_info("Unloading 9P2000 support\n");
|
||||
|
||||
p9_trans_fd_exit();
|
||||
p9_client_exit();
|
||||
}
|
||||
|
||||
module_init(init_p9)
|
||||
|
@@ -46,10 +46,15 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
|
||||
void p9stat_free(struct p9_wstat *stbuf)
|
||||
{
|
||||
kfree(stbuf->name);
|
||||
stbuf->name = NULL;
|
||||
kfree(stbuf->uid);
|
||||
stbuf->uid = NULL;
|
||||
kfree(stbuf->gid);
|
||||
stbuf->gid = NULL;
|
||||
kfree(stbuf->muid);
|
||||
stbuf->muid = NULL;
|
||||
kfree(stbuf->extension);
|
||||
stbuf->extension = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(p9stat_free);
|
||||
|
||||
@@ -566,9 +571,10 @@ int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
|
||||
if (ret) {
|
||||
p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
|
||||
trace_9p_protocol_dump(clnt, &fake_pdu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return fake_pdu.offset;
|
||||
}
|
||||
EXPORT_SYMBOL(p9stat_read);
|
||||
|
||||
@@ -617,13 +623,19 @@ int p9dirent_read(struct p9_client *clnt, char *buf, int len,
|
||||
if (ret) {
|
||||
p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
|
||||
trace_9p_protocol_dump(clnt, &fake_pdu);
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
strcpy(dirent->d_name, nameptr);
|
||||
ret = strscpy(dirent->d_name, nameptr, sizeof(dirent->d_name));
|
||||
if (ret < 0) {
|
||||
p9_debug(P9_DEBUG_ERROR,
|
||||
"On the wire dirent name too long: %s\n",
|
||||
nameptr);
|
||||
kfree(nameptr);
|
||||
return ret;
|
||||
}
|
||||
kfree(nameptr);
|
||||
|
||||
out:
|
||||
return fake_pdu.offset;
|
||||
}
|
||||
EXPORT_SYMBOL(p9dirent_read);
|
||||
|
@@ -131,7 +131,8 @@ struct p9_conn {
|
||||
int err;
|
||||
struct list_head req_list;
|
||||
struct list_head unsent_req_list;
|
||||
struct p9_req_t *req;
|
||||
struct p9_req_t *rreq;
|
||||
struct p9_req_t *wreq;
|
||||
char tmp_buf[7];
|
||||
struct p9_fcall rc;
|
||||
int wpos;
|
||||
@@ -291,7 +292,6 @@ static void p9_read_work(struct work_struct *work)
|
||||
__poll_t n;
|
||||
int err;
|
||||
struct p9_conn *m;
|
||||
int status = REQ_STATUS_ERROR;
|
||||
|
||||
m = container_of(work, struct p9_conn, rq);
|
||||
|
||||
@@ -322,7 +322,7 @@ static void p9_read_work(struct work_struct *work)
|
||||
m->rc.offset += err;
|
||||
|
||||
/* header read in */
|
||||
if ((!m->req) && (m->rc.offset == m->rc.capacity)) {
|
||||
if ((!m->rreq) && (m->rc.offset == m->rc.capacity)) {
|
||||
p9_debug(P9_DEBUG_TRANS, "got new header\n");
|
||||
|
||||
/* Header size */
|
||||
@@ -346,23 +346,23 @@ static void p9_read_work(struct work_struct *work)
|
||||
"mux %p pkt: size: %d bytes tag: %d\n",
|
||||
m, m->rc.size, m->rc.tag);
|
||||
|
||||
m->req = p9_tag_lookup(m->client, m->rc.tag);
|
||||
if (!m->req || (m->req->status != REQ_STATUS_SENT)) {
|
||||
m->rreq = p9_tag_lookup(m->client, m->rc.tag);
|
||||
if (!m->rreq || (m->rreq->status != REQ_STATUS_SENT)) {
|
||||
p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
|
||||
m->rc.tag);
|
||||
err = -EIO;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (m->req->rc == NULL) {
|
||||
if (!m->rreq->rc.sdata) {
|
||||
p9_debug(P9_DEBUG_ERROR,
|
||||
"No recv fcall for tag %d (req %p), disconnecting!\n",
|
||||
m->rc.tag, m->req);
|
||||
m->req = NULL;
|
||||
m->rc.tag, m->rreq);
|
||||
m->rreq = NULL;
|
||||
err = -EIO;
|
||||
goto error;
|
||||
}
|
||||
m->rc.sdata = (char *)m->req->rc + sizeof(struct p9_fcall);
|
||||
m->rc.sdata = m->rreq->rc.sdata;
|
||||
memcpy(m->rc.sdata, m->tmp_buf, m->rc.capacity);
|
||||
m->rc.capacity = m->rc.size;
|
||||
}
|
||||
@@ -370,20 +370,27 @@ static void p9_read_work(struct work_struct *work)
|
||||
/* packet is read in
|
||||
* not an else because some packets (like clunk) have no payload
|
||||
*/
|
||||
if ((m->req) && (m->rc.offset == m->rc.capacity)) {
|
||||
if ((m->rreq) && (m->rc.offset == m->rc.capacity)) {
|
||||
p9_debug(P9_DEBUG_TRANS, "got new packet\n");
|
||||
m->req->rc->size = m->rc.offset;
|
||||
m->rreq->rc.size = m->rc.offset;
|
||||
spin_lock(&m->client->lock);
|
||||
if (m->req->status != REQ_STATUS_ERROR)
|
||||
status = REQ_STATUS_RCVD;
|
||||
list_del(&m->req->req_list);
|
||||
/* update req->status while holding client->lock */
|
||||
p9_client_cb(m->client, m->req, status);
|
||||
if (m->rreq->status == REQ_STATUS_SENT) {
|
||||
list_del(&m->rreq->req_list);
|
||||
p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
|
||||
} else {
|
||||
spin_unlock(&m->client->lock);
|
||||
p9_debug(P9_DEBUG_ERROR,
|
||||
"Request tag %d errored out while we were reading the reply\n",
|
||||
m->rc.tag);
|
||||
err = -EIO;
|
||||
goto error;
|
||||
}
|
||||
spin_unlock(&m->client->lock);
|
||||
m->rc.sdata = NULL;
|
||||
m->rc.offset = 0;
|
||||
m->rc.capacity = 0;
|
||||
m->req = NULL;
|
||||
p9_req_put(m->rreq);
|
||||
m->rreq = NULL;
|
||||
}
|
||||
|
||||
end_clear:
|
||||
@@ -469,9 +476,11 @@ static void p9_write_work(struct work_struct *work)
|
||||
p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
|
||||
list_move_tail(&req->req_list, &m->req_list);
|
||||
|
||||
m->wbuf = req->tc->sdata;
|
||||
m->wsize = req->tc->size;
|
||||
m->wbuf = req->tc.sdata;
|
||||
m->wsize = req->tc.size;
|
||||
m->wpos = 0;
|
||||
p9_req_get(req);
|
||||
m->wreq = req;
|
||||
spin_unlock(&m->client->lock);
|
||||
}
|
||||
|
||||
@@ -492,8 +501,11 @@ static void p9_write_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
m->wpos += err;
|
||||
if (m->wpos == m->wsize)
|
||||
if (m->wpos == m->wsize) {
|
||||
m->wpos = m->wsize = 0;
|
||||
p9_req_put(m->wreq);
|
||||
m->wreq = NULL;
|
||||
}
|
||||
|
||||
end_clear:
|
||||
clear_bit(Wworksched, &m->wsched);
|
||||
@@ -663,7 +675,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
|
||||
struct p9_conn *m = &ts->conn;
|
||||
|
||||
p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
|
||||
m, current, req->tc, req->tc->id);
|
||||
m, current, &req->tc, req->tc.id);
|
||||
if (m->err < 0)
|
||||
return m->err;
|
||||
|
||||
@@ -694,6 +706,7 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
|
||||
if (req->status == REQ_STATUS_UNSENT) {
|
||||
list_del(&req->req_list);
|
||||
req->status = REQ_STATUS_FLSHD;
|
||||
p9_req_put(req);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&client->lock);
|
||||
@@ -711,6 +724,7 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
|
||||
spin_lock(&client->lock);
|
||||
list_del(&req->req_list);
|
||||
spin_unlock(&client->lock);
|
||||
p9_req_put(req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -862,7 +876,15 @@ static void p9_conn_destroy(struct p9_conn *m)
|
||||
|
||||
p9_mux_poll_stop(m);
|
||||
cancel_work_sync(&m->rq);
|
||||
if (m->rreq) {
|
||||
p9_req_put(m->rreq);
|
||||
m->rreq = NULL;
|
||||
}
|
||||
cancel_work_sync(&m->wq);
|
||||
if (m->wreq) {
|
||||
p9_req_put(m->wreq);
|
||||
m->wreq = NULL;
|
||||
}
|
||||
|
||||
p9_conn_cancel(m, -ECONNRESET);
|
||||
|
||||
|
@@ -122,7 +122,7 @@ struct p9_rdma_context {
|
||||
dma_addr_t busa;
|
||||
union {
|
||||
struct p9_req_t *req;
|
||||
struct p9_fcall *rc;
|
||||
struct p9_fcall rc;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -274,8 +274,7 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
||||
case RDMA_CM_EVENT_DISCONNECTED:
|
||||
if (rdma)
|
||||
rdma->state = P9_RDMA_CLOSED;
|
||||
if (c)
|
||||
c->status = Disconnected;
|
||||
c->status = Disconnected;
|
||||
break;
|
||||
|
||||
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
|
||||
@@ -320,8 +319,8 @@ recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
if (wc->status != IB_WC_SUCCESS)
|
||||
goto err_out;
|
||||
|
||||
c->rc->size = wc->byte_len;
|
||||
err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
|
||||
c->rc.size = wc->byte_len;
|
||||
err = p9_parse_header(&c->rc, NULL, NULL, &tag, 1);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
@@ -331,12 +330,13 @@ recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
|
||||
/* Check that we have not yet received a reply for this request.
|
||||
*/
|
||||
if (unlikely(req->rc)) {
|
||||
if (unlikely(req->rc.sdata)) {
|
||||
pr_err("Duplicate reply for request %d", tag);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
req->rc = c->rc;
|
||||
req->rc.size = c->rc.size;
|
||||
req->rc.sdata = c->rc.sdata;
|
||||
p9_client_cb(client, req, REQ_STATUS_RCVD);
|
||||
|
||||
out:
|
||||
@@ -361,9 +361,10 @@ send_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
|
||||
|
||||
ib_dma_unmap_single(rdma->cm_id->device,
|
||||
c->busa, c->req->tc->size,
|
||||
c->busa, c->req->tc.size,
|
||||
DMA_TO_DEVICE);
|
||||
up(&rdma->sq_sem);
|
||||
p9_req_put(c->req);
|
||||
kfree(c);
|
||||
}
|
||||
|
||||
@@ -401,7 +402,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
|
||||
struct ib_sge sge;
|
||||
|
||||
c->busa = ib_dma_map_single(rdma->cm_id->device,
|
||||
c->rc->sdata, client->msize,
|
||||
c->rc.sdata, client->msize,
|
||||
DMA_FROM_DEVICE);
|
||||
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
|
||||
goto error;
|
||||
@@ -443,9 +444,9 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
|
||||
**/
|
||||
if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
|
||||
if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
|
||||
/* Got one ! */
|
||||
kfree(req->rc);
|
||||
req->rc = NULL;
|
||||
/* Got one! */
|
||||
p9_fcall_fini(&req->rc);
|
||||
req->rc.sdata = NULL;
|
||||
goto dont_need_post_recv;
|
||||
} else {
|
||||
/* We raced and lost. */
|
||||
@@ -459,7 +460,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
|
||||
err = -ENOMEM;
|
||||
goto recv_error;
|
||||
}
|
||||
rpl_context->rc = req->rc;
|
||||
rpl_context->rc.sdata = req->rc.sdata;
|
||||
|
||||
/*
|
||||
* Post a receive buffer for this request. We need to ensure
|
||||
@@ -475,11 +476,11 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
|
||||
|
||||
err = post_recv(client, rpl_context);
|
||||
if (err) {
|
||||
p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
|
||||
p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
|
||||
goto recv_error;
|
||||
}
|
||||
/* remove posted receive buffer from request structure */
|
||||
req->rc = NULL;
|
||||
req->rc.sdata = NULL;
|
||||
|
||||
dont_need_post_recv:
|
||||
/* Post the request */
|
||||
@@ -491,7 +492,7 @@ dont_need_post_recv:
|
||||
c->req = req;
|
||||
|
||||
c->busa = ib_dma_map_single(rdma->cm_id->device,
|
||||
c->req->tc->sdata, c->req->tc->size,
|
||||
c->req->tc.sdata, c->req->tc.size,
|
||||
DMA_TO_DEVICE);
|
||||
if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
|
||||
err = -EIO;
|
||||
@@ -501,7 +502,7 @@ dont_need_post_recv:
|
||||
c->cqe.done = send_done;
|
||||
|
||||
sge.addr = c->busa;
|
||||
sge.length = c->req->tc->size;
|
||||
sge.length = c->req->tc.size;
|
||||
sge.lkey = rdma->pd->local_dma_lkey;
|
||||
|
||||
wr.next = NULL;
|
||||
@@ -544,7 +545,7 @@ dont_need_post_recv:
|
||||
recv_error:
|
||||
kfree(rpl_context);
|
||||
spin_lock_irqsave(&rdma->req_lock, flags);
|
||||
if (rdma->state < P9_RDMA_CLOSING) {
|
||||
if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
|
||||
rdma->state = P9_RDMA_CLOSING;
|
||||
spin_unlock_irqrestore(&rdma->req_lock, flags);
|
||||
rdma_disconnect(rdma->cm_id);
|
||||
|
@@ -155,7 +155,7 @@ static void req_done(struct virtqueue *vq)
|
||||
}
|
||||
|
||||
if (len) {
|
||||
req->rc->size = len;
|
||||
req->rc.size = len;
|
||||
p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
|
||||
}
|
||||
}
|
||||
@@ -207,6 +207,13 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Reply won't come, so drop req ref */
|
||||
static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
|
||||
{
|
||||
p9_req_put(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
|
||||
* this takes a list of pages.
|
||||
@@ -273,12 +280,12 @@ req_retry:
|
||||
out_sgs = in_sgs = 0;
|
||||
/* Handle out VirtIO ring buffers */
|
||||
out = pack_sg_list(chan->sg, 0,
|
||||
VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
|
||||
VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
|
||||
if (out)
|
||||
sgs[out_sgs++] = chan->sg;
|
||||
|
||||
in = pack_sg_list(chan->sg, out,
|
||||
VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity);
|
||||
VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity);
|
||||
if (in)
|
||||
sgs[out_sgs + in_sgs++] = chan->sg + out;
|
||||
|
||||
@@ -404,6 +411,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
|
||||
struct scatterlist *sgs[4];
|
||||
size_t offs;
|
||||
int need_drop = 0;
|
||||
int kicked = 0;
|
||||
|
||||
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
|
||||
|
||||
@@ -411,29 +419,33 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
|
||||
__le32 sz;
|
||||
int n = p9_get_mapped_pages(chan, &out_pages, uodata,
|
||||
outlen, &offs, &need_drop);
|
||||
if (n < 0)
|
||||
return n;
|
||||
if (n < 0) {
|
||||
err = n;
|
||||
goto err_out;
|
||||
}
|
||||
out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
|
||||
if (n != outlen) {
|
||||
__le32 v = cpu_to_le32(n);
|
||||
memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
|
||||
memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
|
||||
outlen = n;
|
||||
}
|
||||
/* The size field of the message must include the length of the
|
||||
* header and the length of the data. We didn't actually know
|
||||
* the length of the data until this point so add it in now.
|
||||
*/
|
||||
sz = cpu_to_le32(req->tc->size + outlen);
|
||||
memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
|
||||
sz = cpu_to_le32(req->tc.size + outlen);
|
||||
memcpy(&req->tc.sdata[0], &sz, sizeof(sz));
|
||||
} else if (uidata) {
|
||||
int n = p9_get_mapped_pages(chan, &in_pages, uidata,
|
||||
inlen, &offs, &need_drop);
|
||||
if (n < 0)
|
||||
return n;
|
||||
if (n < 0) {
|
||||
err = n;
|
||||
goto err_out;
|
||||
}
|
||||
in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
|
||||
if (n != inlen) {
|
||||
__le32 v = cpu_to_le32(n);
|
||||
memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
|
||||
memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
|
||||
inlen = n;
|
||||
}
|
||||
}
|
||||
@@ -445,7 +457,7 @@ req_retry_pinned:
|
||||
|
||||
/* out data */
|
||||
out = pack_sg_list(chan->sg, 0,
|
||||
VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
|
||||
VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
|
||||
|
||||
if (out)
|
||||
sgs[out_sgs++] = chan->sg;
|
||||
@@ -464,7 +476,7 @@ req_retry_pinned:
|
||||
* alloced memory and payload onto the user buffer.
|
||||
*/
|
||||
in = pack_sg_list(chan->sg, out,
|
||||
VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len);
|
||||
VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len);
|
||||
if (in)
|
||||
sgs[out_sgs + in_sgs++] = chan->sg + out;
|
||||
|
||||
@@ -498,6 +510,7 @@ req_retry_pinned:
|
||||
}
|
||||
virtqueue_kick(chan->vq);
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
kicked = 1;
|
||||
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
|
||||
err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
|
||||
/*
|
||||
@@ -518,6 +531,10 @@ err_out:
|
||||
}
|
||||
kvfree(in_pages);
|
||||
kvfree(out_pages);
|
||||
if (!kicked) {
|
||||
/* reply won't come */
|
||||
p9_req_put(req);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -750,6 +767,7 @@ static struct p9_trans_module p9_virtio_trans = {
|
||||
.request = p9_virtio_request,
|
||||
.zc_request = p9_virtio_zc_request,
|
||||
.cancel = p9_virtio_cancel,
|
||||
.cancelled = p9_virtio_cancelled,
|
||||
/*
|
||||
* We leave one entry for input and one entry for response
|
||||
* headers. We also skip one more entry to accomodate, address
|
||||
|
@@ -141,7 +141,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
|
||||
struct xen_9pfs_front_priv *priv = NULL;
|
||||
RING_IDX cons, prod, masked_cons, masked_prod;
|
||||
unsigned long flags;
|
||||
u32 size = p9_req->tc->size;
|
||||
u32 size = p9_req->tc.size;
|
||||
struct xen_9pfs_dataring *ring;
|
||||
int num;
|
||||
|
||||
@@ -154,7 +154,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
|
||||
if (!priv || priv->client != client)
|
||||
return -EINVAL;
|
||||
|
||||
num = p9_req->tc->tag % priv->num_rings;
|
||||
num = p9_req->tc.tag % priv->num_rings;
|
||||
ring = &priv->rings[num];
|
||||
|
||||
again:
|
||||
@@ -176,7 +176,7 @@ again:
|
||||
masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
|
||||
masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
|
||||
|
||||
xen_9pfs_write_packet(ring->data.out, p9_req->tc->sdata, size,
|
||||
xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size,
|
||||
&masked_prod, masked_cons, XEN_9PFS_RING_SIZE);
|
||||
|
||||
p9_req->status = REQ_STATUS_SENT;
|
||||
@@ -185,6 +185,7 @@ again:
|
||||
ring->intf->out_prod = prod;
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
notify_remote_via_irq(ring->irq);
|
||||
p9_req_put(p9_req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -229,12 +230,12 @@ static void p9_xen_response(struct work_struct *work)
|
||||
continue;
|
||||
}
|
||||
|
||||
memcpy(req->rc, &h, sizeof(h));
|
||||
req->rc->offset = 0;
|
||||
memcpy(&req->rc, &h, sizeof(h));
|
||||
req->rc.offset = 0;
|
||||
|
||||
masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
|
||||
/* Then, read the whole packet (including the header) */
|
||||
xen_9pfs_read_packet(req->rc->sdata, ring->data.in, h.size,
|
||||
xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size,
|
||||
masked_prod, &masked_cons,
|
||||
XEN_9PFS_RING_SIZE);
|
||||
|
||||
@@ -391,8 +392,8 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
|
||||
unsigned int max_rings, max_ring_order, len = 0;
|
||||
|
||||
versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(versions))
|
||||
return PTR_ERR(versions);
|
||||
if (strcmp(versions, "1")) {
|
||||
kfree(versions);
|
||||
return -EINVAL;
|
||||
|
140
net/9p/util.c
140
net/9p/util.c
@@ -1,140 +0,0 @@
|
||||
/*
|
||||
* net/9p/util.c
|
||||
*
|
||||
* This file contains some helper functions
|
||||
*
|
||||
* Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
|
||||
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
|
||||
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to:
|
||||
* Free Software Foundation
|
||||
* 51 Franklin Street, Fifth Floor
|
||||
* Boston, MA 02111-1301 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/parser.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/9p/9p.h>
|
||||
|
||||
/**
|
||||
* struct p9_idpool - per-connection accounting for tag idpool
|
||||
* @lock: protects the pool
|
||||
* @pool: idr to allocate tag id from
|
||||
*
|
||||
*/
|
||||
|
||||
struct p9_idpool {
|
||||
spinlock_t lock;
|
||||
struct idr pool;
|
||||
};
|
||||
|
||||
/**
|
||||
* p9_idpool_create - create a new per-connection id pool
|
||||
*
|
||||
*/
|
||||
|
||||
struct p9_idpool *p9_idpool_create(void)
|
||||
{
|
||||
struct p9_idpool *p;
|
||||
|
||||
p = kmalloc(sizeof(struct p9_idpool), GFP_KERNEL);
|
||||
if (!p)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spin_lock_init(&p->lock);
|
||||
idr_init(&p->pool);
|
||||
|
||||
return p;
|
||||
}
|
||||
EXPORT_SYMBOL(p9_idpool_create);
|
||||
|
||||
/**
|
||||
* p9_idpool_destroy - create a new per-connection id pool
|
||||
* @p: idpool to destroy
|
||||
*/
|
||||
|
||||
void p9_idpool_destroy(struct p9_idpool *p)
|
||||
{
|
||||
idr_destroy(&p->pool);
|
||||
kfree(p);
|
||||
}
|
||||
EXPORT_SYMBOL(p9_idpool_destroy);
|
||||
|
||||
/**
|
||||
* p9_idpool_get - allocate numeric id from pool
|
||||
* @p: pool to allocate from
|
||||
*
|
||||
* Bugs: This seems to be an awful generic function, should it be in idr.c with
|
||||
* the lock included in struct idr?
|
||||
*/
|
||||
|
||||
int p9_idpool_get(struct p9_idpool *p)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
idr_preload(GFP_NOFS);
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
|
||||
/* no need to store exactly p, we just need something non-null */
|
||||
i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT);
|
||||
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
idr_preload_end();
|
||||
if (i < 0)
|
||||
return -1;
|
||||
|
||||
p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
|
||||
return i;
|
||||
}
|
||||
EXPORT_SYMBOL(p9_idpool_get);
|
||||
|
||||
/**
|
||||
* p9_idpool_put - release numeric id from pool
|
||||
* @id: numeric id which is being released
|
||||
* @p: pool to release id into
|
||||
*
|
||||
* Bugs: This seems to be an awful generic function, should it be in idr.c with
|
||||
* the lock included in struct idr?
|
||||
*/
|
||||
|
||||
void p9_idpool_put(int id, struct p9_idpool *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", id, p);
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
idr_remove(&p->pool, id);
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(p9_idpool_put);
|
||||
|
||||
/**
|
||||
* p9_idpool_check - check if the specified id is available
|
||||
* @id: id to check
|
||||
* @p: pool to check
|
||||
*/
|
||||
|
||||
int p9_idpool_check(int id, struct p9_idpool *p)
|
||||
{
|
||||
return idr_find(&p->pool, id) != NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(p9_idpool_check);
|
@@ -1428,8 +1428,7 @@ static void br_multicast_query_received(struct net_bridge *br,
|
||||
* is 0.0.0.0 should not be added to router port list.
|
||||
*/
|
||||
if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
|
||||
(saddr->proto == htons(ETH_P_IPV6) &&
|
||||
!ipv6_addr_any(&saddr->u.ip6)))
|
||||
saddr->proto == htons(ETH_P_IPV6))
|
||||
br_multicast_mark_router(br, port);
|
||||
}
|
||||
|
||||
|
@@ -156,7 +156,6 @@ static bool con_flag_test_and_set(struct ceph_connection *con,
|
||||
/* Slab caches for frequently-allocated structures */
|
||||
|
||||
static struct kmem_cache *ceph_msg_cache;
|
||||
static struct kmem_cache *ceph_msg_data_cache;
|
||||
|
||||
/* static tag bytes (protocol control messages) */
|
||||
static char tag_msg = CEPH_MSGR_TAG_MSG;
|
||||
@@ -235,23 +234,11 @@ static int ceph_msgr_slab_init(void)
|
||||
if (!ceph_msg_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
BUG_ON(ceph_msg_data_cache);
|
||||
ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0);
|
||||
if (ceph_msg_data_cache)
|
||||
return 0;
|
||||
|
||||
kmem_cache_destroy(ceph_msg_cache);
|
||||
ceph_msg_cache = NULL;
|
||||
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ceph_msgr_slab_exit(void)
|
||||
{
|
||||
BUG_ON(!ceph_msg_data_cache);
|
||||
kmem_cache_destroy(ceph_msg_data_cache);
|
||||
ceph_msg_data_cache = NULL;
|
||||
|
||||
BUG_ON(!ceph_msg_cache);
|
||||
kmem_cache_destroy(ceph_msg_cache);
|
||||
ceph_msg_cache = NULL;
|
||||
@@ -1141,16 +1128,13 @@ static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
|
||||
static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length)
|
||||
{
|
||||
struct ceph_msg_data_cursor *cursor = &msg->cursor;
|
||||
struct ceph_msg_data *data;
|
||||
|
||||
BUG_ON(!length);
|
||||
BUG_ON(length > msg->data_length);
|
||||
BUG_ON(list_empty(&msg->data));
|
||||
BUG_ON(!msg->num_data_items);
|
||||
|
||||
cursor->data_head = &msg->data;
|
||||
cursor->total_resid = length;
|
||||
data = list_first_entry(&msg->data, struct ceph_msg_data, links);
|
||||
cursor->data = data;
|
||||
cursor->data = msg->data;
|
||||
|
||||
__ceph_msg_data_cursor_init(cursor);
|
||||
}
|
||||
@@ -1231,8 +1215,7 @@ static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
|
||||
|
||||
if (!cursor->resid && cursor->total_resid) {
|
||||
WARN_ON(!cursor->last_piece);
|
||||
BUG_ON(list_is_last(&cursor->data->links, cursor->data_head));
|
||||
cursor->data = list_next_entry(cursor->data, links);
|
||||
cursor->data++;
|
||||
__ceph_msg_data_cursor_init(cursor);
|
||||
new_piece = true;
|
||||
}
|
||||
@@ -1248,9 +1231,6 @@ static size_t sizeof_footer(struct ceph_connection *con)
|
||||
|
||||
static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
|
||||
{
|
||||
BUG_ON(!msg);
|
||||
BUG_ON(!data_len);
|
||||
|
||||
/* Initialize data cursor */
|
||||
|
||||
ceph_msg_data_cursor_init(msg, (size_t)data_len);
|
||||
@@ -1590,7 +1570,7 @@ static int write_partial_message_data(struct ceph_connection *con)
|
||||
|
||||
dout("%s %p msg %p\n", __func__, con, msg);
|
||||
|
||||
if (list_empty(&msg->data))
|
||||
if (!msg->num_data_items)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
@@ -2347,8 +2327,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
|
||||
u32 crc = 0;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!msg);
|
||||
if (list_empty(&msg->data))
|
||||
if (!msg->num_data_items)
|
||||
return -EIO;
|
||||
|
||||
if (do_datacrc)
|
||||
@@ -3256,32 +3235,16 @@ bool ceph_con_keepalive_expired(struct ceph_connection *con,
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
|
||||
static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
|
||||
{
|
||||
struct ceph_msg_data *data;
|
||||
|
||||
if (WARN_ON(!ceph_msg_data_type_valid(type)))
|
||||
return NULL;
|
||||
|
||||
data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
|
||||
if (!data)
|
||||
return NULL;
|
||||
|
||||
data->type = type;
|
||||
INIT_LIST_HEAD(&data->links);
|
||||
|
||||
return data;
|
||||
BUG_ON(msg->num_data_items >= msg->max_data_items);
|
||||
return &msg->data[msg->num_data_items++];
|
||||
}
|
||||
|
||||
static void ceph_msg_data_destroy(struct ceph_msg_data *data)
|
||||
{
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
WARN_ON(!list_empty(&data->links));
|
||||
if (data->type == CEPH_MSG_DATA_PAGELIST)
|
||||
ceph_pagelist_release(data->pagelist);
|
||||
kmem_cache_free(ceph_msg_data_cache, data);
|
||||
}
|
||||
|
||||
void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
|
||||
@@ -3292,13 +3255,12 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
|
||||
BUG_ON(!pages);
|
||||
BUG_ON(!length);
|
||||
|
||||
data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES);
|
||||
BUG_ON(!data);
|
||||
data = ceph_msg_data_add(msg);
|
||||
data->type = CEPH_MSG_DATA_PAGES;
|
||||
data->pages = pages;
|
||||
data->length = length;
|
||||
data->alignment = alignment & ~PAGE_MASK;
|
||||
|
||||
list_add_tail(&data->links, &msg->data);
|
||||
msg->data_length += length;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_msg_data_add_pages);
|
||||
@@ -3311,11 +3273,11 @@ void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
|
||||
BUG_ON(!pagelist);
|
||||
BUG_ON(!pagelist->length);
|
||||
|
||||
data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST);
|
||||
BUG_ON(!data);
|
||||
data = ceph_msg_data_add(msg);
|
||||
data->type = CEPH_MSG_DATA_PAGELIST;
|
||||
refcount_inc(&pagelist->refcnt);
|
||||
data->pagelist = pagelist;
|
||||
|
||||
list_add_tail(&data->links, &msg->data);
|
||||
msg->data_length += pagelist->length;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
|
||||
@@ -3326,12 +3288,11 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
|
||||
{
|
||||
struct ceph_msg_data *data;
|
||||
|
||||
data = ceph_msg_data_create(CEPH_MSG_DATA_BIO);
|
||||
BUG_ON(!data);
|
||||
data = ceph_msg_data_add(msg);
|
||||
data->type = CEPH_MSG_DATA_BIO;
|
||||
data->bio_pos = *bio_pos;
|
||||
data->bio_length = length;
|
||||
|
||||
list_add_tail(&data->links, &msg->data);
|
||||
msg->data_length += length;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_msg_data_add_bio);
|
||||
@@ -3342,11 +3303,10 @@ void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
|
||||
{
|
||||
struct ceph_msg_data *data;
|
||||
|
||||
data = ceph_msg_data_create(CEPH_MSG_DATA_BVECS);
|
||||
BUG_ON(!data);
|
||||
data = ceph_msg_data_add(msg);
|
||||
data->type = CEPH_MSG_DATA_BVECS;
|
||||
data->bvec_pos = *bvec_pos;
|
||||
|
||||
list_add_tail(&data->links, &msg->data);
|
||||
msg->data_length += bvec_pos->iter.bi_size;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
|
||||
@@ -3355,8 +3315,8 @@ EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
|
||||
* construct a new message with given type, size
|
||||
* the new msg has a ref count of 1.
|
||||
*/
|
||||
struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
|
||||
bool can_fail)
|
||||
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
|
||||
gfp_t flags, bool can_fail)
|
||||
{
|
||||
struct ceph_msg *m;
|
||||
|
||||
@@ -3370,7 +3330,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
|
||||
|
||||
INIT_LIST_HEAD(&m->list_head);
|
||||
kref_init(&m->kref);
|
||||
INIT_LIST_HEAD(&m->data);
|
||||
|
||||
/* front */
|
||||
if (front_len) {
|
||||
@@ -3385,6 +3344,15 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
|
||||
}
|
||||
m->front_alloc_len = m->front.iov_len = front_len;
|
||||
|
||||
if (max_data_items) {
|
||||
m->data = kmalloc_array(max_data_items, sizeof(*m->data),
|
||||
flags);
|
||||
if (!m->data)
|
||||
goto out2;
|
||||
|
||||
m->max_data_items = max_data_items;
|
||||
}
|
||||
|
||||
dout("ceph_msg_new %p front %d\n", m, front_len);
|
||||
return m;
|
||||
|
||||
@@ -3401,6 +3369,13 @@ out:
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_msg_new2);
|
||||
|
||||
struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
|
||||
bool can_fail)
|
||||
{
|
||||
return ceph_msg_new2(type, front_len, 0, flags, can_fail);
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_msg_new);
|
||||
|
||||
/*
|
||||
@@ -3496,13 +3471,14 @@ static void ceph_msg_free(struct ceph_msg *m)
|
||||
{
|
||||
dout("%s %p\n", __func__, m);
|
||||
kvfree(m->front.iov_base);
|
||||
kfree(m->data);
|
||||
kmem_cache_free(ceph_msg_cache, m);
|
||||
}
|
||||
|
||||
static void ceph_msg_release(struct kref *kref)
|
||||
{
|
||||
struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
|
||||
struct ceph_msg_data *data, *next;
|
||||
int i;
|
||||
|
||||
dout("%s %p\n", __func__, m);
|
||||
WARN_ON(!list_empty(&m->list_head));
|
||||
@@ -3515,11 +3491,8 @@ static void ceph_msg_release(struct kref *kref)
|
||||
m->middle = NULL;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(data, next, &m->data, links) {
|
||||
list_del_init(&data->links);
|
||||
ceph_msg_data_destroy(data);
|
||||
}
|
||||
m->data_length = 0;
|
||||
for (i = 0; i < m->num_data_items; i++)
|
||||
ceph_msg_data_destroy(&m->data[i]);
|
||||
|
||||
if (m->pool)
|
||||
ceph_msgpool_put(m->pool, m);
|
||||
|
@@ -14,7 +14,8 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
|
||||
struct ceph_msgpool *pool = arg;
|
||||
struct ceph_msg *msg;
|
||||
|
||||
msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true);
|
||||
msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items,
|
||||
gfp_mask, true);
|
||||
if (!msg) {
|
||||
dout("msgpool_alloc %s failed\n", pool->name);
|
||||
} else {
|
||||
@@ -35,11 +36,13 @@ static void msgpool_free(void *element, void *arg)
|
||||
}
|
||||
|
||||
int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
|
||||
int front_len, int size, bool blocking, const char *name)
|
||||
int front_len, int max_data_items, int size,
|
||||
const char *name)
|
||||
{
|
||||
dout("msgpool %s init\n", name);
|
||||
pool->type = type;
|
||||
pool->front_len = front_len;
|
||||
pool->max_data_items = max_data_items;
|
||||
pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
|
||||
if (!pool->pool)
|
||||
return -ENOMEM;
|
||||
@@ -53,18 +56,21 @@ void ceph_msgpool_destroy(struct ceph_msgpool *pool)
|
||||
mempool_destroy(pool->pool);
|
||||
}
|
||||
|
||||
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
|
||||
int front_len)
|
||||
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
|
||||
int max_data_items)
|
||||
{
|
||||
struct ceph_msg *msg;
|
||||
|
||||
if (front_len > pool->front_len) {
|
||||
dout("msgpool_get %s need front %d, pool size is %d\n",
|
||||
pool->name, front_len, pool->front_len);
|
||||
WARN_ON(1);
|
||||
if (front_len > pool->front_len ||
|
||||
max_data_items > pool->max_data_items) {
|
||||
pr_warn_ratelimited("%s need %d/%d, pool %s has %d/%d\n",
|
||||
__func__, front_len, max_data_items, pool->name,
|
||||
pool->front_len, pool->max_data_items);
|
||||
WARN_ON_ONCE(1);
|
||||
|
||||
/* try to alloc a fresh message */
|
||||
return ceph_msg_new(pool->type, front_len, GFP_NOFS, false);
|
||||
return ceph_msg_new2(pool->type, front_len, max_data_items,
|
||||
GFP_NOFS, false);
|
||||
}
|
||||
|
||||
msg = mempool_alloc(pool->pool, GFP_NOFS);
|
||||
@@ -80,6 +86,9 @@ void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
|
||||
msg->front.iov_len = pool->front_len;
|
||||
msg->hdr.front_len = cpu_to_le32(pool->front_len);
|
||||
|
||||
msg->data_length = 0;
|
||||
msg->num_data_items = 0;
|
||||
|
||||
kref_init(&msg->kref); /* retake single ref */
|
||||
mempool_free(msg, pool->pool);
|
||||
}
|
||||
|
@@ -126,6 +126,9 @@ static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
|
||||
osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Consumes @pages if @own_pages is true.
|
||||
*/
|
||||
static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
|
||||
struct page **pages, u64 length, u32 alignment,
|
||||
bool pages_from_pool, bool own_pages)
|
||||
@@ -138,6 +141,9 @@ static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
|
||||
osd_data->own_pages = own_pages;
|
||||
}
|
||||
|
||||
/*
|
||||
* Consumes a ref on @pagelist.
|
||||
*/
|
||||
static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
|
||||
struct ceph_pagelist *pagelist)
|
||||
{
|
||||
@@ -362,6 +368,8 @@ static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
|
||||
num_pages = calc_pages_for((u64)osd_data->alignment,
|
||||
(u64)osd_data->length);
|
||||
ceph_release_page_vector(osd_data->pages, num_pages);
|
||||
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
|
||||
ceph_pagelist_release(osd_data->pagelist);
|
||||
}
|
||||
ceph_osd_data_init(osd_data);
|
||||
}
|
||||
@@ -402,6 +410,9 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
|
||||
case CEPH_OSD_OP_LIST_WATCHERS:
|
||||
ceph_osd_data_release(&op->list_watchers.response_data);
|
||||
break;
|
||||
case CEPH_OSD_OP_COPY_FROM:
|
||||
ceph_osd_data_release(&op->copy_from.osd_data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -606,12 +617,15 @@ static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
|
||||
return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
|
||||
}
|
||||
|
||||
int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
|
||||
static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
|
||||
int num_request_data_items,
|
||||
int num_reply_data_items)
|
||||
{
|
||||
struct ceph_osd_client *osdc = req->r_osdc;
|
||||
struct ceph_msg *msg;
|
||||
int msg_size;
|
||||
|
||||
WARN_ON(req->r_request || req->r_reply);
|
||||
WARN_ON(ceph_oid_empty(&req->r_base_oid));
|
||||
WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
|
||||
|
||||
@@ -633,9 +647,11 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
|
||||
msg_size += 4 + 8; /* retry_attempt, features */
|
||||
|
||||
if (req->r_mempool)
|
||||
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
|
||||
msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
|
||||
num_request_data_items);
|
||||
else
|
||||
msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
|
||||
msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
|
||||
num_request_data_items, gfp, true);
|
||||
if (!msg)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -648,9 +664,11 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
|
||||
msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
|
||||
|
||||
if (req->r_mempool)
|
||||
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
|
||||
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
|
||||
num_reply_data_items);
|
||||
else
|
||||
msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
|
||||
msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
|
||||
num_reply_data_items, gfp, true);
|
||||
if (!msg)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -658,7 +676,6 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_osdc_alloc_messages);
|
||||
|
||||
static bool osd_req_opcode_valid(u16 opcode)
|
||||
{
|
||||
@@ -671,6 +688,65 @@ __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
|
||||
}
|
||||
}
|
||||
|
||||
static void get_num_data_items(struct ceph_osd_request *req,
|
||||
int *num_request_data_items,
|
||||
int *num_reply_data_items)
|
||||
{
|
||||
struct ceph_osd_req_op *op;
|
||||
|
||||
*num_request_data_items = 0;
|
||||
*num_reply_data_items = 0;
|
||||
|
||||
for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
|
||||
switch (op->op) {
|
||||
/* request */
|
||||
case CEPH_OSD_OP_WRITE:
|
||||
case CEPH_OSD_OP_WRITEFULL:
|
||||
case CEPH_OSD_OP_SETXATTR:
|
||||
case CEPH_OSD_OP_CMPXATTR:
|
||||
case CEPH_OSD_OP_NOTIFY_ACK:
|
||||
case CEPH_OSD_OP_COPY_FROM:
|
||||
*num_request_data_items += 1;
|
||||
break;
|
||||
|
||||
/* reply */
|
||||
case CEPH_OSD_OP_STAT:
|
||||
case CEPH_OSD_OP_READ:
|
||||
case CEPH_OSD_OP_LIST_WATCHERS:
|
||||
*num_reply_data_items += 1;
|
||||
break;
|
||||
|
||||
/* both */
|
||||
case CEPH_OSD_OP_NOTIFY:
|
||||
*num_request_data_items += 1;
|
||||
*num_reply_data_items += 1;
|
||||
break;
|
||||
case CEPH_OSD_OP_CALL:
|
||||
*num_request_data_items += 2;
|
||||
*num_reply_data_items += 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON(!osd_req_opcode_valid(op->op));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* oid, oloc and OSD op opcode(s) must be filled in before this function
|
||||
* is called.
|
||||
*/
|
||||
int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
|
||||
{
|
||||
int num_request_data_items, num_reply_data_items;
|
||||
|
||||
get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
|
||||
return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
|
||||
num_reply_data_items);
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_osdc_alloc_messages);
|
||||
|
||||
/*
|
||||
* This is an osd op init function for opcodes that have no data or
|
||||
* other information associated with them. It also serves as a
|
||||
@@ -767,22 +843,19 @@ void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
|
||||
EXPORT_SYMBOL(osd_req_op_extent_dup_last);
|
||||
|
||||
int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
|
||||
u16 opcode, const char *class, const char *method)
|
||||
const char *class, const char *method)
|
||||
{
|
||||
struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
|
||||
opcode, 0);
|
||||
struct ceph_osd_req_op *op;
|
||||
struct ceph_pagelist *pagelist;
|
||||
size_t payload_len = 0;
|
||||
size_t size;
|
||||
|
||||
BUG_ON(opcode != CEPH_OSD_OP_CALL);
|
||||
op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
|
||||
|
||||
pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
|
||||
pagelist = ceph_pagelist_alloc(GFP_NOFS);
|
||||
if (!pagelist)
|
||||
return -ENOMEM;
|
||||
|
||||
ceph_pagelist_init(pagelist);
|
||||
|
||||
op->cls.class_name = class;
|
||||
size = strlen(class);
|
||||
BUG_ON(size > (size_t) U8_MAX);
|
||||
@@ -815,12 +888,10 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
|
||||
|
||||
BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
|
||||
|
||||
pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
|
||||
pagelist = ceph_pagelist_alloc(GFP_NOFS);
|
||||
if (!pagelist)
|
||||
return -ENOMEM;
|
||||
|
||||
ceph_pagelist_init(pagelist);
|
||||
|
||||
payload_len = strlen(name);
|
||||
op->xattr.name_len = payload_len;
|
||||
ceph_pagelist_append(pagelist, name, payload_len);
|
||||
@@ -900,12 +971,6 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
|
||||
static u32 osd_req_encode_op(struct ceph_osd_op *dst,
|
||||
const struct ceph_osd_req_op *src)
|
||||
{
|
||||
if (WARN_ON(!osd_req_opcode_valid(src->op))) {
|
||||
pr_err("unrecognized osd opcode %d\n", src->op);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (src->op) {
|
||||
case CEPH_OSD_OP_STAT:
|
||||
break;
|
||||
@@ -955,6 +1020,14 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst,
|
||||
case CEPH_OSD_OP_CREATE:
|
||||
case CEPH_OSD_OP_DELETE:
|
||||
break;
|
||||
case CEPH_OSD_OP_COPY_FROM:
|
||||
dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
|
||||
dst->copy_from.src_version =
|
||||
cpu_to_le64(src->copy_from.src_version);
|
||||
dst->copy_from.flags = src->copy_from.flags;
|
||||
dst->copy_from.src_fadvise_flags =
|
||||
cpu_to_le32(src->copy_from.src_fadvise_flags);
|
||||
break;
|
||||
default:
|
||||
pr_err("unsupported osd opcode %s\n",
|
||||
ceph_osd_op_name(src->op));
|
||||
@@ -1038,7 +1111,15 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
|
||||
if (flags & CEPH_OSD_FLAG_WRITE)
|
||||
req->r_data_offset = off;
|
||||
|
||||
r = ceph_osdc_alloc_messages(req, GFP_NOFS);
|
||||
if (num_ops > 1)
|
||||
/*
|
||||
* This is a special case for ceph_writepages_start(), but it
|
||||
* also covers ceph_uninline_data(). If more multi-op request
|
||||
* use cases emerge, we will need a separate helper.
|
||||
*/
|
||||
r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
|
||||
else
|
||||
r = ceph_osdc_alloc_messages(req, GFP_NOFS);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
@@ -1845,48 +1926,55 @@ static bool should_plug_request(struct ceph_osd_request *req)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void setup_request_data(struct ceph_osd_request *req,
|
||||
struct ceph_msg *msg)
|
||||
/*
|
||||
* Keep get_num_data_items() in sync with this function.
|
||||
*/
|
||||
static void setup_request_data(struct ceph_osd_request *req)
|
||||
{
|
||||
u32 data_len = 0;
|
||||
int i;
|
||||
struct ceph_msg *request_msg = req->r_request;
|
||||
struct ceph_msg *reply_msg = req->r_reply;
|
||||
struct ceph_osd_req_op *op;
|
||||
|
||||
if (!list_empty(&msg->data))
|
||||
if (req->r_request->num_data_items || req->r_reply->num_data_items)
|
||||
return;
|
||||
|
||||
WARN_ON(msg->data_length);
|
||||
for (i = 0; i < req->r_num_ops; i++) {
|
||||
struct ceph_osd_req_op *op = &req->r_ops[i];
|
||||
|
||||
WARN_ON(request_msg->data_length || reply_msg->data_length);
|
||||
for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
|
||||
switch (op->op) {
|
||||
/* request */
|
||||
case CEPH_OSD_OP_WRITE:
|
||||
case CEPH_OSD_OP_WRITEFULL:
|
||||
WARN_ON(op->indata_len != op->extent.length);
|
||||
ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
|
||||
ceph_osdc_msg_data_add(request_msg,
|
||||
&op->extent.osd_data);
|
||||
break;
|
||||
case CEPH_OSD_OP_SETXATTR:
|
||||
case CEPH_OSD_OP_CMPXATTR:
|
||||
WARN_ON(op->indata_len != op->xattr.name_len +
|
||||
op->xattr.value_len);
|
||||
ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
|
||||
ceph_osdc_msg_data_add(request_msg,
|
||||
&op->xattr.osd_data);
|
||||
break;
|
||||
case CEPH_OSD_OP_NOTIFY_ACK:
|
||||
ceph_osdc_msg_data_add(msg,
|
||||
ceph_osdc_msg_data_add(request_msg,
|
||||
&op->notify_ack.request_data);
|
||||
break;
|
||||
case CEPH_OSD_OP_COPY_FROM:
|
||||
ceph_osdc_msg_data_add(request_msg,
|
||||
&op->copy_from.osd_data);
|
||||
break;
|
||||
|
||||
/* reply */
|
||||
case CEPH_OSD_OP_STAT:
|
||||
ceph_osdc_msg_data_add(req->r_reply,
|
||||
ceph_osdc_msg_data_add(reply_msg,
|
||||
&op->raw_data_in);
|
||||
break;
|
||||
case CEPH_OSD_OP_READ:
|
||||
ceph_osdc_msg_data_add(req->r_reply,
|
||||
ceph_osdc_msg_data_add(reply_msg,
|
||||
&op->extent.osd_data);
|
||||
break;
|
||||
case CEPH_OSD_OP_LIST_WATCHERS:
|
||||
ceph_osdc_msg_data_add(req->r_reply,
|
||||
ceph_osdc_msg_data_add(reply_msg,
|
||||
&op->list_watchers.response_data);
|
||||
break;
|
||||
|
||||
@@ -1895,25 +1983,23 @@ static void setup_request_data(struct ceph_osd_request *req,
|
||||
WARN_ON(op->indata_len != op->cls.class_len +
|
||||
op->cls.method_len +
|
||||
op->cls.indata_len);
|
||||
ceph_osdc_msg_data_add(msg, &op->cls.request_info);
|
||||
ceph_osdc_msg_data_add(request_msg,
|
||||
&op->cls.request_info);
|
||||
/* optional, can be NONE */
|
||||
ceph_osdc_msg_data_add(msg, &op->cls.request_data);
|
||||
ceph_osdc_msg_data_add(request_msg,
|
||||
&op->cls.request_data);
|
||||
/* optional, can be NONE */
|
||||
ceph_osdc_msg_data_add(req->r_reply,
|
||||
ceph_osdc_msg_data_add(reply_msg,
|
||||
&op->cls.response_data);
|
||||
break;
|
||||
case CEPH_OSD_OP_NOTIFY:
|
||||
ceph_osdc_msg_data_add(msg,
|
||||
ceph_osdc_msg_data_add(request_msg,
|
||||
&op->notify.request_data);
|
||||
ceph_osdc_msg_data_add(req->r_reply,
|
||||
ceph_osdc_msg_data_add(reply_msg,
|
||||
&op->notify.response_data);
|
||||
break;
|
||||
}
|
||||
|
||||
data_len += op->indata_len;
|
||||
}
|
||||
|
||||
WARN_ON(data_len != msg->data_length);
|
||||
}
|
||||
|
||||
static void encode_pgid(void **p, const struct ceph_pg *pgid)
|
||||
@@ -1961,7 +2047,7 @@ static void encode_request_partial(struct ceph_osd_request *req,
|
||||
req->r_data_offset || req->r_snapc);
|
||||
}
|
||||
|
||||
setup_request_data(req, msg);
|
||||
setup_request_data(req);
|
||||
|
||||
encode_spgid(&p, &req->r_t.spgid); /* actual spg */
|
||||
ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
|
||||
@@ -3001,11 +3087,21 @@ static void linger_submit(struct ceph_osd_linger_request *lreq)
|
||||
struct ceph_osd_client *osdc = lreq->osdc;
|
||||
struct ceph_osd *osd;
|
||||
|
||||
down_write(&osdc->lock);
|
||||
linger_register(lreq);
|
||||
if (lreq->is_watch) {
|
||||
lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
|
||||
lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
|
||||
} else {
|
||||
lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
|
||||
}
|
||||
|
||||
calc_target(osdc, &lreq->t, NULL, false);
|
||||
osd = lookup_create_osd(osdc, lreq->t.osd, true);
|
||||
link_linger(osd, lreq);
|
||||
|
||||
send_linger(lreq);
|
||||
up_write(&osdc->lock);
|
||||
}
|
||||
|
||||
static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
|
||||
@@ -4318,9 +4414,7 @@ static void handle_watch_notify(struct ceph_osd_client *osdc,
|
||||
lreq->notify_id, notify_id);
|
||||
} else if (!completion_done(&lreq->notify_finish_wait)) {
|
||||
struct ceph_msg_data *data =
|
||||
list_first_entry_or_null(&msg->data,
|
||||
struct ceph_msg_data,
|
||||
links);
|
||||
msg->num_data_items ? &msg->data[0] : NULL;
|
||||
|
||||
if (data) {
|
||||
if (lreq->preply_pages) {
|
||||
@@ -4476,6 +4570,23 @@ alloc_linger_request(struct ceph_osd_linger_request *lreq)
|
||||
|
||||
ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
|
||||
ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
|
||||
return req;
|
||||
}
|
||||
|
||||
static struct ceph_osd_request *
|
||||
alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
|
||||
{
|
||||
struct ceph_osd_request *req;
|
||||
|
||||
req = alloc_linger_request(lreq);
|
||||
if (!req)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Pass 0 for cookie because we don't know it yet, it will be
|
||||
* filled in by linger_submit().
|
||||
*/
|
||||
osd_req_op_watch_init(req, 0, 0, watch_opcode);
|
||||
|
||||
if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
|
||||
ceph_osdc_put_request(req);
|
||||
@@ -4514,27 +4625,19 @@ ceph_osdc_watch(struct ceph_osd_client *osdc,
|
||||
lreq->t.flags = CEPH_OSD_FLAG_WRITE;
|
||||
ktime_get_real_ts64(&lreq->mtime);
|
||||
|
||||
lreq->reg_req = alloc_linger_request(lreq);
|
||||
lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
|
||||
if (!lreq->reg_req) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_lreq;
|
||||
}
|
||||
|
||||
lreq->ping_req = alloc_linger_request(lreq);
|
||||
lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
|
||||
if (!lreq->ping_req) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_lreq;
|
||||
}
|
||||
|
||||
down_write(&osdc->lock);
|
||||
linger_register(lreq); /* before osd_req_op_* */
|
||||
osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
|
||||
CEPH_OSD_WATCH_OP_WATCH);
|
||||
osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
|
||||
CEPH_OSD_WATCH_OP_PING);
|
||||
linger_submit(lreq);
|
||||
up_write(&osdc->lock);
|
||||
|
||||
ret = linger_reg_commit_wait(lreq);
|
||||
if (ret) {
|
||||
linger_cancel(lreq);
|
||||
@@ -4599,11 +4702,10 @@ static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
|
||||
|
||||
op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
|
||||
|
||||
pl = kmalloc(sizeof(*pl), GFP_NOIO);
|
||||
pl = ceph_pagelist_alloc(GFP_NOIO);
|
||||
if (!pl)
|
||||
return -ENOMEM;
|
||||
|
||||
ceph_pagelist_init(pl);
|
||||
ret = ceph_pagelist_encode_64(pl, notify_id);
|
||||
ret |= ceph_pagelist_encode_64(pl, cookie);
|
||||
if (payload) {
|
||||
@@ -4641,12 +4743,12 @@ int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
|
||||
ceph_oloc_copy(&req->r_base_oloc, oloc);
|
||||
req->r_flags = CEPH_OSD_FLAG_READ;
|
||||
|
||||
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
|
||||
ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
|
||||
payload_len);
|
||||
if (ret)
|
||||
goto out_put_req;
|
||||
|
||||
ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
|
||||
payload_len);
|
||||
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
|
||||
if (ret)
|
||||
goto out_put_req;
|
||||
|
||||
@@ -4670,11 +4772,10 @@ static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
|
||||
op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
|
||||
op->notify.cookie = cookie;
|
||||
|
||||
pl = kmalloc(sizeof(*pl), GFP_NOIO);
|
||||
pl = ceph_pagelist_alloc(GFP_NOIO);
|
||||
if (!pl)
|
||||
return -ENOMEM;
|
||||
|
||||
ceph_pagelist_init(pl);
|
||||
ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
|
||||
ret |= ceph_pagelist_encode_32(pl, timeout);
|
||||
ret |= ceph_pagelist_encode_32(pl, payload_len);
|
||||
@@ -4733,29 +4834,30 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
|
||||
goto out_put_lreq;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pass 0 for cookie because we don't know it yet, it will be
|
||||
* filled in by linger_submit().
|
||||
*/
|
||||
ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
|
||||
payload, payload_len);
|
||||
if (ret)
|
||||
goto out_put_lreq;
|
||||
|
||||
/* for notify_id */
|
||||
pages = ceph_alloc_page_vector(1, GFP_NOIO);
|
||||
if (IS_ERR(pages)) {
|
||||
ret = PTR_ERR(pages);
|
||||
goto out_put_lreq;
|
||||
}
|
||||
|
||||
down_write(&osdc->lock);
|
||||
linger_register(lreq); /* before osd_req_op_* */
|
||||
ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
|
||||
timeout, payload, payload_len);
|
||||
if (ret) {
|
||||
linger_unregister(lreq);
|
||||
up_write(&osdc->lock);
|
||||
ceph_release_page_vector(pages, 1);
|
||||
goto out_put_lreq;
|
||||
}
|
||||
ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
|
||||
response_data),
|
||||
pages, PAGE_SIZE, 0, false, true);
|
||||
linger_submit(lreq);
|
||||
up_write(&osdc->lock);
|
||||
|
||||
ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
|
||||
if (ret)
|
||||
goto out_put_lreq;
|
||||
|
||||
linger_submit(lreq);
|
||||
ret = linger_reg_commit_wait(lreq);
|
||||
if (!ret)
|
||||
ret = linger_notify_finish_wait(lreq);
|
||||
@@ -4881,10 +4983,6 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
|
||||
ceph_oloc_copy(&req->r_base_oloc, oloc);
|
||||
req->r_flags = CEPH_OSD_FLAG_READ;
|
||||
|
||||
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
|
||||
if (ret)
|
||||
goto out_put_req;
|
||||
|
||||
pages = ceph_alloc_page_vector(1, GFP_NOIO);
|
||||
if (IS_ERR(pages)) {
|
||||
ret = PTR_ERR(pages);
|
||||
@@ -4896,6 +4994,10 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
|
||||
response_data),
|
||||
pages, PAGE_SIZE, 0, false, true);
|
||||
|
||||
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
|
||||
if (ret)
|
||||
goto out_put_req;
|
||||
|
||||
ceph_osdc_start_request(osdc, req, false);
|
||||
ret = ceph_osdc_wait_request(osdc, req);
|
||||
if (ret >= 0) {
|
||||
@@ -4958,11 +5060,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
|
||||
ceph_oloc_copy(&req->r_base_oloc, oloc);
|
||||
req->r_flags = flags;
|
||||
|
||||
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
|
||||
if (ret)
|
||||
goto out_put_req;
|
||||
|
||||
ret = osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
|
||||
ret = osd_req_op_cls_init(req, 0, class, method);
|
||||
if (ret)
|
||||
goto out_put_req;
|
||||
|
||||
@@ -4973,6 +5071,10 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
|
||||
osd_req_op_cls_response_data_pages(req, 0, &resp_page,
|
||||
*resp_len, 0, false, false);
|
||||
|
||||
ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
|
||||
if (ret)
|
||||
goto out_put_req;
|
||||
|
||||
ceph_osdc_start_request(osdc, req, false);
|
||||
ret = ceph_osdc_wait_request(osdc, req);
|
||||
if (ret >= 0) {
|
||||
@@ -5021,11 +5123,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
|
||||
goto out_map;
|
||||
|
||||
err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
|
||||
PAGE_SIZE, 10, true, "osd_op");
|
||||
PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
|
||||
if (err < 0)
|
||||
goto out_mempool;
|
||||
err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
|
||||
PAGE_SIZE, 10, true, "osd_op_reply");
|
||||
PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
|
||||
"osd_op_reply");
|
||||
if (err < 0)
|
||||
goto out_msgpool;
|
||||
|
||||
@@ -5168,6 +5271,80 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_osdc_writepages);
|
||||
|
||||
static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
|
||||
u64 src_snapid, u64 src_version,
|
||||
struct ceph_object_id *src_oid,
|
||||
struct ceph_object_locator *src_oloc,
|
||||
u32 src_fadvise_flags,
|
||||
u32 dst_fadvise_flags,
|
||||
u8 copy_from_flags)
|
||||
{
|
||||
struct ceph_osd_req_op *op;
|
||||
struct page **pages;
|
||||
void *p, *end;
|
||||
|
||||
pages = ceph_alloc_page_vector(1, GFP_KERNEL);
|
||||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags);
|
||||
op->copy_from.snapid = src_snapid;
|
||||
op->copy_from.src_version = src_version;
|
||||
op->copy_from.flags = copy_from_flags;
|
||||
op->copy_from.src_fadvise_flags = src_fadvise_flags;
|
||||
|
||||
p = page_address(pages[0]);
|
||||
end = p + PAGE_SIZE;
|
||||
ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
|
||||
encode_oloc(&p, end, src_oloc);
|
||||
op->indata_len = PAGE_SIZE - (end - p);
|
||||
|
||||
ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
|
||||
op->indata_len, 0, false, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
|
||||
u64 src_snapid, u64 src_version,
|
||||
struct ceph_object_id *src_oid,
|
||||
struct ceph_object_locator *src_oloc,
|
||||
u32 src_fadvise_flags,
|
||||
struct ceph_object_id *dst_oid,
|
||||
struct ceph_object_locator *dst_oloc,
|
||||
u32 dst_fadvise_flags,
|
||||
u8 copy_from_flags)
|
||||
{
|
||||
struct ceph_osd_request *req;
|
||||
int ret;
|
||||
|
||||
req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
req->r_flags = CEPH_OSD_FLAG_WRITE;
|
||||
|
||||
ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
|
||||
ceph_oid_copy(&req->r_t.base_oid, dst_oid);
|
||||
|
||||
ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
|
||||
src_oloc, src_fadvise_flags,
|
||||
dst_fadvise_flags, copy_from_flags);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ceph_osdc_start_request(osdc, req, false);
|
||||
ret = ceph_osdc_wait_request(osdc, req);
|
||||
|
||||
out:
|
||||
ceph_osdc_put_request(req);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_osdc_copy_from);
|
||||
|
||||
int __init ceph_osdc_setup(void)
|
||||
{
|
||||
size_t size = sizeof(struct ceph_osd_request) +
|
||||
@@ -5295,7 +5472,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
|
||||
u32 front_len = le32_to_cpu(hdr->front_len);
|
||||
u32 data_len = le32_to_cpu(hdr->data_len);
|
||||
|
||||
m = ceph_msg_new(type, front_len, GFP_NOIO, false);
|
||||
m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
|
||||
if (!m)
|
||||
return NULL;
|
||||
|
||||
|
@@ -6,6 +6,26 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/ceph/pagelist.h>
|
||||
|
||||
struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags)
|
||||
{
|
||||
struct ceph_pagelist *pl;
|
||||
|
||||
pl = kmalloc(sizeof(*pl), gfp_flags);
|
||||
if (!pl)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&pl->head);
|
||||
pl->mapped_tail = NULL;
|
||||
pl->length = 0;
|
||||
pl->room = 0;
|
||||
INIT_LIST_HEAD(&pl->free_list);
|
||||
pl->num_pages_free = 0;
|
||||
refcount_set(&pl->refcnt, 1);
|
||||
|
||||
return pl;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_pagelist_alloc);
|
||||
|
||||
static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
|
||||
{
|
||||
if (pl->mapped_tail) {
|
||||
|
@@ -5457,7 +5457,7 @@ static void gro_flush_oldest(struct list_head *head)
|
||||
/* Do not adjust napi->gro_hash[].count, caller is adding a new
|
||||
* SKB to the chain.
|
||||
*/
|
||||
list_del(&oldest->list);
|
||||
skb_list_del_init(oldest);
|
||||
napi_gro_complete(oldest);
|
||||
}
|
||||
|
||||
|
@@ -5264,8 +5264,6 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_msg_pull_data_proto;
|
||||
case BPF_FUNC_msg_push_data:
|
||||
return &bpf_msg_push_data_proto;
|
||||
case BPF_FUNC_get_local_storage:
|
||||
return &bpf_get_local_storage_proto;
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
@@ -5296,8 +5294,6 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
return &bpf_sk_redirect_map_proto;
|
||||
case BPF_FUNC_sk_redirect_hash:
|
||||
return &bpf_sk_redirect_hash_proto;
|
||||
case BPF_FUNC_get_local_storage:
|
||||
return &bpf_get_local_storage_proto;
|
||||
#ifdef CONFIG_INET
|
||||
case BPF_FUNC_sk_lookup_tcp:
|
||||
return &bpf_sk_lookup_tcp_proto;
|
||||
@@ -5496,7 +5492,13 @@ static bool cg_skb_is_valid_access(int off, int size,
|
||||
case bpf_ctx_range(struct __sk_buff, data_meta):
|
||||
case bpf_ctx_range(struct __sk_buff, flow_keys):
|
||||
return false;
|
||||
case bpf_ctx_range(struct __sk_buff, data):
|
||||
case bpf_ctx_range(struct __sk_buff, data_end):
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (type == BPF_WRITE) {
|
||||
switch (off) {
|
||||
case bpf_ctx_range(struct __sk_buff, mark):
|
||||
@@ -5638,6 +5640,15 @@ static bool sock_filter_is_valid_access(int off, int size,
|
||||
prog->expected_attach_type);
|
||||
}
|
||||
|
||||
static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||
const struct bpf_prog *prog)
|
||||
{
|
||||
/* Neither direct read nor direct write requires any preliminary
|
||||
* action.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||
const struct bpf_prog *prog, int drop_verdict)
|
||||
{
|
||||
@@ -7204,6 +7215,7 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
|
||||
.get_func_proto = xdp_func_proto,
|
||||
.is_valid_access = xdp_is_valid_access,
|
||||
.convert_ctx_access = xdp_convert_ctx_access,
|
||||
.gen_prologue = bpf_noop_prologue,
|
||||
};
|
||||
|
||||
const struct bpf_prog_ops xdp_prog_ops = {
|
||||
@@ -7302,6 +7314,7 @@ const struct bpf_verifier_ops sk_msg_verifier_ops = {
|
||||
.get_func_proto = sk_msg_func_proto,
|
||||
.is_valid_access = sk_msg_is_valid_access,
|
||||
.convert_ctx_access = sk_msg_convert_ctx_access,
|
||||
.gen_prologue = bpf_noop_prologue,
|
||||
};
|
||||
|
||||
const struct bpf_prog_ops sk_msg_prog_ops = {
|
||||
|
@@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
|
||||
return ret;
|
||||
}
|
||||
|
||||
# ifdef CONFIG_HAVE_EBPF_JIT
|
||||
static int
|
||||
proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
@@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
|
||||
|
||||
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
static struct ctl_table net_core_table[] = {
|
||||
@@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = {
|
||||
.extra2 = &one,
|
||||
},
|
||||
# endif
|
||||
{
|
||||
.procname = "bpf_jit_limit",
|
||||
.data = &bpf_jit_limit,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0600,
|
||||
.proc_handler = proc_dointvec_minmax_bpf_restricted,
|
||||
.extra1 = &one,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.procname = "netdev_tstamp_prequeue",
|
||||
|
@@ -19,7 +19,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <net/addrconf.h>
|
||||
#include <net/inet_connection_sock.h>
|
||||
|
@@ -262,7 +262,7 @@
|
||||
#include <linux/net.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/cache.h>
|
||||
|
@@ -81,7 +81,7 @@
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/ioctls.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/types.h>
|
||||
|
@@ -42,6 +42,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
|
||||
|
||||
rcu_read_lock();
|
||||
if (req->sdiag_family == AF_INET)
|
||||
/* src and dst are swapped for historical reasons */
|
||||
sk = __udp4_lib_lookup(net,
|
||||
req->id.idiag_src[0], req->id.idiag_sport,
|
||||
req->id.idiag_dst[0], req->id.idiag_dport,
|
||||
|
@@ -413,7 +413,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
|
||||
if (tb[TCA_GRED_LIMIT] != NULL)
|
||||
sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
|
||||
return gred_change_table_def(sch, opt);
|
||||
return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
|
||||
}
|
||||
|
||||
if (tb[TCA_GRED_PARMS] == NULL ||
|
||||
|
@@ -46,7 +46,7 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/slab.h>
|
||||
|
@@ -76,6 +76,7 @@ struct rsi {
|
||||
struct xdr_netobj in_handle, in_token;
|
||||
struct xdr_netobj out_handle, out_token;
|
||||
int major_status, minor_status;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old);
|
||||
@@ -89,11 +90,19 @@ static void rsi_free(struct rsi *rsii)
|
||||
kfree(rsii->out_token.data);
|
||||
}
|
||||
|
||||
static void rsi_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct rsi *rsii = container_of(head, struct rsi, rcu_head);
|
||||
|
||||
rsi_free(rsii);
|
||||
kfree(rsii);
|
||||
}
|
||||
|
||||
static void rsi_put(struct kref *ref)
|
||||
{
|
||||
struct rsi *rsii = container_of(ref, struct rsi, h.ref);
|
||||
rsi_free(rsii);
|
||||
kfree(rsii);
|
||||
|
||||
call_rcu(&rsii->rcu_head, rsi_free_rcu);
|
||||
}
|
||||
|
||||
static inline int rsi_hash(struct rsi *item)
|
||||
@@ -282,7 +291,7 @@ static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item)
|
||||
struct cache_head *ch;
|
||||
int hash = rsi_hash(item);
|
||||
|
||||
ch = sunrpc_cache_lookup(cd, &item->h, hash);
|
||||
ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
|
||||
if (ch)
|
||||
return container_of(ch, struct rsi, h);
|
||||
else
|
||||
@@ -330,6 +339,7 @@ struct rsc {
|
||||
struct svc_cred cred;
|
||||
struct gss_svc_seq_data seqdata;
|
||||
struct gss_ctx *mechctx;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
|
||||
@@ -343,12 +353,22 @@ static void rsc_free(struct rsc *rsci)
|
||||
free_svc_cred(&rsci->cred);
|
||||
}
|
||||
|
||||
static void rsc_free_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct rsc *rsci = container_of(head, struct rsc, rcu_head);
|
||||
|
||||
kfree(rsci->handle.data);
|
||||
kfree(rsci);
|
||||
}
|
||||
|
||||
static void rsc_put(struct kref *ref)
|
||||
{
|
||||
struct rsc *rsci = container_of(ref, struct rsc, h.ref);
|
||||
|
||||
rsc_free(rsci);
|
||||
kfree(rsci);
|
||||
if (rsci->mechctx)
|
||||
gss_delete_sec_context(&rsci->mechctx);
|
||||
free_svc_cred(&rsci->cred);
|
||||
call_rcu(&rsci->rcu_head, rsc_free_rcu);
|
||||
}
|
||||
|
||||
static inline int
|
||||
@@ -542,7 +562,7 @@ static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item)
|
||||
struct cache_head *ch;
|
||||
int hash = rsc_hash(item);
|
||||
|
||||
ch = sunrpc_cache_lookup(cd, &item->h, hash);
|
||||
ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
|
||||
if (ch)
|
||||
return container_of(ch, struct rsc, h);
|
||||
else
|
||||
@@ -1764,14 +1784,21 @@ out_err:
|
||||
}
|
||||
|
||||
static void
|
||||
svcauth_gss_domain_release(struct auth_domain *dom)
|
||||
svcauth_gss_domain_release_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
|
||||
struct gss_domain *gd = container_of(dom, struct gss_domain, h);
|
||||
|
||||
kfree(dom->name);
|
||||
kfree(gd);
|
||||
}
|
||||
|
||||
static void
|
||||
svcauth_gss_domain_release(struct auth_domain *dom)
|
||||
{
|
||||
call_rcu(&dom->rcu_head, svcauth_gss_domain_release_rcu);
|
||||
}
|
||||
|
||||
static struct auth_ops svcauthops_gss = {
|
||||
.name = "rpcsec_gss",
|
||||
.owner = THIS_MODULE,
|
||||
|
@@ -54,28 +54,33 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
|
||||
h->last_refresh = now;
|
||||
}
|
||||
|
||||
struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
|
||||
struct cache_head *key, int hash)
|
||||
static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
|
||||
struct cache_head *key,
|
||||
int hash)
|
||||
{
|
||||
struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
|
||||
struct hlist_head *head;
|
||||
struct hlist_head *head = &detail->hash_table[hash];
|
||||
struct cache_head *tmp;
|
||||
|
||||
head = &detail->hash_table[hash];
|
||||
|
||||
read_lock(&detail->hash_lock);
|
||||
|
||||
hlist_for_each_entry(tmp, head, cache_list) {
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(tmp, head, cache_list) {
|
||||
if (detail->match(tmp, key)) {
|
||||
if (cache_is_expired(detail, tmp))
|
||||
/* This entry is expired, we will discard it. */
|
||||
break;
|
||||
cache_get(tmp);
|
||||
read_unlock(&detail->hash_lock);
|
||||
continue;
|
||||
tmp = cache_get_rcu(tmp);
|
||||
rcu_read_unlock();
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
read_unlock(&detail->hash_lock);
|
||||
/* Didn't find anything, insert an empty entry */
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
|
||||
struct cache_head *key,
|
||||
int hash)
|
||||
{
|
||||
struct cache_head *new, *tmp, *freeme = NULL;
|
||||
struct hlist_head *head = &detail->hash_table[hash];
|
||||
|
||||
new = detail->alloc();
|
||||
if (!new)
|
||||
@@ -87,35 +92,46 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
|
||||
cache_init(new, detail);
|
||||
detail->init(new, key);
|
||||
|
||||
write_lock(&detail->hash_lock);
|
||||
spin_lock(&detail->hash_lock);
|
||||
|
||||
/* check if entry appeared while we slept */
|
||||
hlist_for_each_entry(tmp, head, cache_list) {
|
||||
hlist_for_each_entry_rcu(tmp, head, cache_list) {
|
||||
if (detail->match(tmp, key)) {
|
||||
if (cache_is_expired(detail, tmp)) {
|
||||
hlist_del_init(&tmp->cache_list);
|
||||
hlist_del_init_rcu(&tmp->cache_list);
|
||||
detail->entries --;
|
||||
freeme = tmp;
|
||||
break;
|
||||
}
|
||||
cache_get(tmp);
|
||||
write_unlock(&detail->hash_lock);
|
||||
spin_unlock(&detail->hash_lock);
|
||||
cache_put(new, detail);
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
||||
hlist_add_head(&new->cache_list, head);
|
||||
hlist_add_head_rcu(&new->cache_list, head);
|
||||
detail->entries++;
|
||||
cache_get(new);
|
||||
write_unlock(&detail->hash_lock);
|
||||
spin_unlock(&detail->hash_lock);
|
||||
|
||||
if (freeme)
|
||||
cache_put(freeme, detail);
|
||||
return new;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
|
||||
|
||||
struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
|
||||
struct cache_head *key, int hash)
|
||||
{
|
||||
struct cache_head *ret;
|
||||
|
||||
ret = sunrpc_cache_find_rcu(detail, key, hash);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Didn't find anything, insert an empty entry */
|
||||
return sunrpc_cache_add_entry(detail, key, hash);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
|
||||
|
||||
static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
|
||||
|
||||
@@ -151,18 +167,18 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
|
||||
struct cache_head *tmp;
|
||||
|
||||
if (!test_bit(CACHE_VALID, &old->flags)) {
|
||||
write_lock(&detail->hash_lock);
|
||||
spin_lock(&detail->hash_lock);
|
||||
if (!test_bit(CACHE_VALID, &old->flags)) {
|
||||
if (test_bit(CACHE_NEGATIVE, &new->flags))
|
||||
set_bit(CACHE_NEGATIVE, &old->flags);
|
||||
else
|
||||
detail->update(old, new);
|
||||
cache_fresh_locked(old, new->expiry_time, detail);
|
||||
write_unlock(&detail->hash_lock);
|
||||
spin_unlock(&detail->hash_lock);
|
||||
cache_fresh_unlocked(old, detail);
|
||||
return old;
|
||||
}
|
||||
write_unlock(&detail->hash_lock);
|
||||
spin_unlock(&detail->hash_lock);
|
||||
}
|
||||
/* We need to insert a new entry */
|
||||
tmp = detail->alloc();
|
||||
@@ -173,7 +189,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
|
||||
cache_init(tmp, detail);
|
||||
detail->init(tmp, old);
|
||||
|
||||
write_lock(&detail->hash_lock);
|
||||
spin_lock(&detail->hash_lock);
|
||||
if (test_bit(CACHE_NEGATIVE, &new->flags))
|
||||
set_bit(CACHE_NEGATIVE, &tmp->flags);
|
||||
else
|
||||
@@ -183,7 +199,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
|
||||
cache_get(tmp);
|
||||
cache_fresh_locked(tmp, new->expiry_time, detail);
|
||||
cache_fresh_locked(old, 0, detail);
|
||||
write_unlock(&detail->hash_lock);
|
||||
spin_unlock(&detail->hash_lock);
|
||||
cache_fresh_unlocked(tmp, detail);
|
||||
cache_fresh_unlocked(old, detail);
|
||||
cache_put(old, detail);
|
||||
@@ -223,7 +239,7 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h
|
||||
{
|
||||
int rv;
|
||||
|
||||
write_lock(&detail->hash_lock);
|
||||
spin_lock(&detail->hash_lock);
|
||||
rv = cache_is_valid(h);
|
||||
if (rv == -EAGAIN) {
|
||||
set_bit(CACHE_NEGATIVE, &h->flags);
|
||||
@@ -231,7 +247,7 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h
|
||||
detail);
|
||||
rv = -ENOENT;
|
||||
}
|
||||
write_unlock(&detail->hash_lock);
|
||||
spin_unlock(&detail->hash_lock);
|
||||
cache_fresh_unlocked(h, detail);
|
||||
return rv;
|
||||
}
|
||||
@@ -341,7 +357,7 @@ static struct delayed_work cache_cleaner;
|
||||
|
||||
void sunrpc_init_cache_detail(struct cache_detail *cd)
|
||||
{
|
||||
rwlock_init(&cd->hash_lock);
|
||||
spin_lock_init(&cd->hash_lock);
|
||||
INIT_LIST_HEAD(&cd->queue);
|
||||
spin_lock(&cache_list_lock);
|
||||
cd->nextcheck = 0;
|
||||
@@ -361,11 +377,11 @@ void sunrpc_destroy_cache_detail(struct cache_detail *cd)
|
||||
{
|
||||
cache_purge(cd);
|
||||
spin_lock(&cache_list_lock);
|
||||
write_lock(&cd->hash_lock);
|
||||
spin_lock(&cd->hash_lock);
|
||||
if (current_detail == cd)
|
||||
current_detail = NULL;
|
||||
list_del_init(&cd->others);
|
||||
write_unlock(&cd->hash_lock);
|
||||
spin_unlock(&cd->hash_lock);
|
||||
spin_unlock(&cache_list_lock);
|
||||
if (list_empty(&cache_list)) {
|
||||
/* module must be being unloaded so its safe to kill the worker */
|
||||
@@ -422,7 +438,7 @@ static int cache_clean(void)
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
write_lock(¤t_detail->hash_lock);
|
||||
spin_lock(¤t_detail->hash_lock);
|
||||
|
||||
/* Ok, now to clean this strand */
|
||||
|
||||
@@ -433,13 +449,13 @@ static int cache_clean(void)
|
||||
if (!cache_is_expired(current_detail, ch))
|
||||
continue;
|
||||
|
||||
hlist_del_init(&ch->cache_list);
|
||||
hlist_del_init_rcu(&ch->cache_list);
|
||||
current_detail->entries--;
|
||||
rv = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
write_unlock(¤t_detail->hash_lock);
|
||||
spin_unlock(¤t_detail->hash_lock);
|
||||
d = current_detail;
|
||||
if (!ch)
|
||||
current_index ++;
|
||||
@@ -494,9 +510,9 @@ void cache_purge(struct cache_detail *detail)
|
||||
struct hlist_node *tmp = NULL;
|
||||
int i = 0;
|
||||
|
||||
write_lock(&detail->hash_lock);
|
||||
spin_lock(&detail->hash_lock);
|
||||
if (!detail->entries) {
|
||||
write_unlock(&detail->hash_lock);
|
||||
spin_unlock(&detail->hash_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -504,17 +520,17 @@ void cache_purge(struct cache_detail *detail)
|
||||
for (i = 0; i < detail->hash_size; i++) {
|
||||
head = &detail->hash_table[i];
|
||||
hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
|
||||
hlist_del_init(&ch->cache_list);
|
||||
hlist_del_init_rcu(&ch->cache_list);
|
||||
detail->entries--;
|
||||
|
||||
set_bit(CACHE_CLEANED, &ch->flags);
|
||||
write_unlock(&detail->hash_lock);
|
||||
spin_unlock(&detail->hash_lock);
|
||||
cache_fresh_unlocked(ch, detail);
|
||||
cache_put(ch, detail);
|
||||
write_lock(&detail->hash_lock);
|
||||
spin_lock(&detail->hash_lock);
|
||||
}
|
||||
}
|
||||
write_unlock(&detail->hash_lock);
|
||||
spin_unlock(&detail->hash_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cache_purge);
|
||||
|
||||
@@ -1289,21 +1305,19 @@ EXPORT_SYMBOL_GPL(qword_get);
|
||||
* get a header, then pass each real item in the cache
|
||||
*/
|
||||
|
||||
void *cache_seq_start(struct seq_file *m, loff_t *pos)
|
||||
__acquires(cd->hash_lock)
|
||||
static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
loff_t n = *pos;
|
||||
unsigned int hash, entry;
|
||||
struct cache_head *ch;
|
||||
struct cache_detail *cd = m->private;
|
||||
|
||||
read_lock(&cd->hash_lock);
|
||||
if (!n--)
|
||||
return SEQ_START_TOKEN;
|
||||
hash = n >> 32;
|
||||
entry = n & ((1LL<<32) - 1);
|
||||
|
||||
hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
|
||||
hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
|
||||
if (!entry--)
|
||||
return ch;
|
||||
n &= ~((1LL<<32) - 1);
|
||||
@@ -1315,12 +1329,12 @@ void *cache_seq_start(struct seq_file *m, loff_t *pos)
|
||||
if (hash >= cd->hash_size)
|
||||
return NULL;
|
||||
*pos = n+1;
|
||||
return hlist_entry_safe(cd->hash_table[hash].first,
|
||||
return hlist_entry_safe(rcu_dereference_raw(
|
||||
hlist_first_rcu(&cd->hash_table[hash])),
|
||||
struct cache_head, cache_list);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cache_seq_start);
|
||||
|
||||
void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
|
||||
static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
|
||||
{
|
||||
struct cache_head *ch = p;
|
||||
int hash = (*pos >> 32);
|
||||
@@ -1333,7 +1347,8 @@ void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
|
||||
*pos += 1LL<<32;
|
||||
} else {
|
||||
++*pos;
|
||||
return hlist_entry_safe(ch->cache_list.next,
|
||||
return hlist_entry_safe(rcu_dereference_raw(
|
||||
hlist_next_rcu(&ch->cache_list)),
|
||||
struct cache_head, cache_list);
|
||||
}
|
||||
*pos &= ~((1LL<<32) - 1);
|
||||
@@ -1345,18 +1360,32 @@ void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
|
||||
if (hash >= cd->hash_size)
|
||||
return NULL;
|
||||
++*pos;
|
||||
return hlist_entry_safe(cd->hash_table[hash].first,
|
||||
return hlist_entry_safe(rcu_dereference_raw(
|
||||
hlist_first_rcu(&cd->hash_table[hash])),
|
||||
struct cache_head, cache_list);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cache_seq_next);
|
||||
|
||||
void cache_seq_stop(struct seq_file *m, void *p)
|
||||
__releases(cd->hash_lock)
|
||||
void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
|
||||
__acquires(RCU)
|
||||
{
|
||||
struct cache_detail *cd = m->private;
|
||||
read_unlock(&cd->hash_lock);
|
||||
rcu_read_lock();
|
||||
return __cache_seq_start(m, pos);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cache_seq_stop);
|
||||
EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
|
||||
|
||||
void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
|
||||
{
|
||||
return cache_seq_next(file, p, pos);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
|
||||
|
||||
void cache_seq_stop_rcu(struct seq_file *m, void *p)
|
||||
__releases(RCU)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
|
||||
|
||||
static int c_show(struct seq_file *m, void *p)
|
||||
{
|
||||
@@ -1384,9 +1413,9 @@ static int c_show(struct seq_file *m, void *p)
|
||||
}
|
||||
|
||||
static const struct seq_operations cache_content_op = {
|
||||
.start = cache_seq_start,
|
||||
.next = cache_seq_next,
|
||||
.stop = cache_seq_stop,
|
||||
.start = cache_seq_start_rcu,
|
||||
.next = cache_seq_next_rcu,
|
||||
.stop = cache_seq_stop_rcu,
|
||||
.show = c_show,
|
||||
};
|
||||
|
||||
@@ -1844,13 +1873,13 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
|
||||
|
||||
void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
|
||||
{
|
||||
write_lock(&cd->hash_lock);
|
||||
spin_lock(&cd->hash_lock);
|
||||
if (!hlist_unhashed(&h->cache_list)){
|
||||
hlist_del_init(&h->cache_list);
|
||||
hlist_del_init_rcu(&h->cache_list);
|
||||
cd->entries--;
|
||||
write_unlock(&cd->hash_lock);
|
||||
spin_unlock(&cd->hash_lock);
|
||||
cache_put(h, cd);
|
||||
} else
|
||||
write_unlock(&cd->hash_lock);
|
||||
spin_unlock(&cd->hash_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
|
||||
|
@@ -987,7 +987,7 @@ static void call_xpt_users(struct svc_xprt *xprt)
|
||||
spin_lock(&xprt->xpt_lock);
|
||||
while (!list_empty(&xprt->xpt_users)) {
|
||||
u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
|
||||
list_del(&u->list);
|
||||
list_del_init(&u->list);
|
||||
u->callback(u);
|
||||
}
|
||||
spin_unlock(&xprt->xpt_lock);
|
||||
|
@@ -27,12 +27,32 @@
|
||||
extern struct auth_ops svcauth_null;
|
||||
extern struct auth_ops svcauth_unix;
|
||||
|
||||
static DEFINE_SPINLOCK(authtab_lock);
|
||||
static struct auth_ops *authtab[RPC_AUTH_MAXFLAVOR] = {
|
||||
[0] = &svcauth_null,
|
||||
[1] = &svcauth_unix,
|
||||
static struct auth_ops __rcu *authtab[RPC_AUTH_MAXFLAVOR] = {
|
||||
[RPC_AUTH_NULL] = (struct auth_ops __force __rcu *)&svcauth_null,
|
||||
[RPC_AUTH_UNIX] = (struct auth_ops __force __rcu *)&svcauth_unix,
|
||||
};
|
||||
|
||||
static struct auth_ops *
|
||||
svc_get_auth_ops(rpc_authflavor_t flavor)
|
||||
{
|
||||
struct auth_ops *aops;
|
||||
|
||||
if (flavor >= RPC_AUTH_MAXFLAVOR)
|
||||
return NULL;
|
||||
rcu_read_lock();
|
||||
aops = rcu_dereference(authtab[flavor]);
|
||||
if (aops != NULL && !try_module_get(aops->owner))
|
||||
aops = NULL;
|
||||
rcu_read_unlock();
|
||||
return aops;
|
||||
}
|
||||
|
||||
static void
|
||||
svc_put_auth_ops(struct auth_ops *aops)
|
||||
{
|
||||
module_put(aops->owner);
|
||||
}
|
||||
|
||||
int
|
||||
svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
|
||||
{
|
||||
@@ -45,14 +65,11 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
|
||||
|
||||
dprintk("svc: svc_authenticate (%d)\n", flavor);
|
||||
|
||||
spin_lock(&authtab_lock);
|
||||
if (flavor >= RPC_AUTH_MAXFLAVOR || !(aops = authtab[flavor]) ||
|
||||
!try_module_get(aops->owner)) {
|
||||
spin_unlock(&authtab_lock);
|
||||
aops = svc_get_auth_ops(flavor);
|
||||
if (aops == NULL) {
|
||||
*authp = rpc_autherr_badcred;
|
||||
return SVC_DENIED;
|
||||
}
|
||||
spin_unlock(&authtab_lock);
|
||||
|
||||
rqstp->rq_auth_slack = 0;
|
||||
init_svc_cred(&rqstp->rq_cred);
|
||||
@@ -82,7 +99,7 @@ int svc_authorise(struct svc_rqst *rqstp)
|
||||
|
||||
if (aops) {
|
||||
rv = aops->release(rqstp);
|
||||
module_put(aops->owner);
|
||||
svc_put_auth_ops(aops);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
@@ -90,13 +107,14 @@ int svc_authorise(struct svc_rqst *rqstp)
|
||||
int
|
||||
svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops)
|
||||
{
|
||||
struct auth_ops *old;
|
||||
int rv = -EINVAL;
|
||||
spin_lock(&authtab_lock);
|
||||
if (flavor < RPC_AUTH_MAXFLAVOR && authtab[flavor] == NULL) {
|
||||
authtab[flavor] = aops;
|
||||
rv = 0;
|
||||
|
||||
if (flavor < RPC_AUTH_MAXFLAVOR) {
|
||||
old = cmpxchg((struct auth_ops ** __force)&authtab[flavor], NULL, aops);
|
||||
if (old == NULL || old == aops)
|
||||
rv = 0;
|
||||
}
|
||||
spin_unlock(&authtab_lock);
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_auth_register);
|
||||
@@ -104,10 +122,8 @@ EXPORT_SYMBOL_GPL(svc_auth_register);
|
||||
void
|
||||
svc_auth_unregister(rpc_authflavor_t flavor)
|
||||
{
|
||||
spin_lock(&authtab_lock);
|
||||
if (flavor < RPC_AUTH_MAXFLAVOR)
|
||||
authtab[flavor] = NULL;
|
||||
spin_unlock(&authtab_lock);
|
||||
rcu_assign_pointer(authtab[flavor], NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_auth_unregister);
|
||||
|
||||
@@ -127,10 +143,11 @@ static struct hlist_head auth_domain_table[DN_HASHMAX];
|
||||
static DEFINE_SPINLOCK(auth_domain_lock);
|
||||
|
||||
static void auth_domain_release(struct kref *kref)
|
||||
__releases(&auth_domain_lock)
|
||||
{
|
||||
struct auth_domain *dom = container_of(kref, struct auth_domain, ref);
|
||||
|
||||
hlist_del(&dom->hash);
|
||||
hlist_del_rcu(&dom->hash);
|
||||
dom->flavour->domain_release(dom);
|
||||
spin_unlock(&auth_domain_lock);
|
||||
}
|
||||
@@ -159,7 +176,7 @@ auth_domain_lookup(char *name, struct auth_domain *new)
|
||||
}
|
||||
}
|
||||
if (new)
|
||||
hlist_add_head(&new->hash, head);
|
||||
hlist_add_head_rcu(&new->hash, head);
|
||||
spin_unlock(&auth_domain_lock);
|
||||
return new;
|
||||
}
|
||||
@@ -167,6 +184,21 @@ EXPORT_SYMBOL_GPL(auth_domain_lookup);
|
||||
|
||||
struct auth_domain *auth_domain_find(char *name)
|
||||
{
|
||||
return auth_domain_lookup(name, NULL);
|
||||
struct auth_domain *hp;
|
||||
struct hlist_head *head;
|
||||
|
||||
head = &auth_domain_table[hash_str(name, DN_HASHBITS)];
|
||||
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(hp, head, hash) {
|
||||
if (strcmp(hp->name, name)==0) {
|
||||
if (!kref_get_unless_zero(&hp->ref))
|
||||
hp = NULL;
|
||||
rcu_read_unlock();
|
||||
return hp;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(auth_domain_find);
|
||||
|
@@ -37,20 +37,26 @@ struct unix_domain {
|
||||
extern struct auth_ops svcauth_null;
|
||||
extern struct auth_ops svcauth_unix;
|
||||
|
||||
static void svcauth_unix_domain_release(struct auth_domain *dom)
|
||||
static void svcauth_unix_domain_release_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
|
||||
struct unix_domain *ud = container_of(dom, struct unix_domain, h);
|
||||
|
||||
kfree(dom->name);
|
||||
kfree(ud);
|
||||
}
|
||||
|
||||
static void svcauth_unix_domain_release(struct auth_domain *dom)
|
||||
{
|
||||
call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu);
|
||||
}
|
||||
|
||||
struct auth_domain *unix_domain_find(char *name)
|
||||
{
|
||||
struct auth_domain *rv;
|
||||
struct unix_domain *new = NULL;
|
||||
|
||||
rv = auth_domain_lookup(name, NULL);
|
||||
rv = auth_domain_find(name);
|
||||
while(1) {
|
||||
if (rv) {
|
||||
if (new && rv != &new->h)
|
||||
@@ -91,6 +97,7 @@ struct ip_map {
|
||||
char m_class[8]; /* e.g. "nfsd" */
|
||||
struct in6_addr m_addr;
|
||||
struct unix_domain *m_client;
|
||||
struct rcu_head m_rcu;
|
||||
};
|
||||
|
||||
static void ip_map_put(struct kref *kref)
|
||||
@@ -101,7 +108,7 @@ static void ip_map_put(struct kref *kref)
|
||||
if (test_bit(CACHE_VALID, &item->flags) &&
|
||||
!test_bit(CACHE_NEGATIVE, &item->flags))
|
||||
auth_domain_put(&im->m_client->h);
|
||||
kfree(im);
|
||||
kfree_rcu(im, m_rcu);
|
||||
}
|
||||
|
||||
static inline int hash_ip6(const struct in6_addr *ip)
|
||||
@@ -280,9 +287,9 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
|
||||
|
||||
strcpy(ip.m_class, class);
|
||||
ip.m_addr = *addr;
|
||||
ch = sunrpc_cache_lookup(cd, &ip.h,
|
||||
hash_str(class, IP_HASHBITS) ^
|
||||
hash_ip6(addr));
|
||||
ch = sunrpc_cache_lookup_rcu(cd, &ip.h,
|
||||
hash_str(class, IP_HASHBITS) ^
|
||||
hash_ip6(addr));
|
||||
|
||||
if (ch)
|
||||
return container_of(ch, struct ip_map, h);
|
||||
@@ -412,6 +419,7 @@ struct unix_gid {
|
||||
struct cache_head h;
|
||||
kuid_t uid;
|
||||
struct group_info *gi;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
static int unix_gid_hash(kuid_t uid)
|
||||
@@ -426,7 +434,7 @@ static void unix_gid_put(struct kref *kref)
|
||||
if (test_bit(CACHE_VALID, &item->flags) &&
|
||||
!test_bit(CACHE_NEGATIVE, &item->flags))
|
||||
put_group_info(ug->gi);
|
||||
kfree(ug);
|
||||
kfree_rcu(ug, rcu);
|
||||
}
|
||||
|
||||
static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
|
||||
@@ -619,7 +627,7 @@ static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid)
|
||||
struct cache_head *ch;
|
||||
|
||||
ug.uid = uid;
|
||||
ch = sunrpc_cache_lookup(cd, &ug.h, unix_gid_hash(uid));
|
||||
ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid));
|
||||
if (ch)
|
||||
return container_of(ch, struct unix_gid, h);
|
||||
else
|
||||
|
@@ -325,59 +325,34 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
|
||||
/*
|
||||
* Generic recvfrom routine.
|
||||
*/
|
||||
static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr,
|
||||
int buflen)
|
||||
static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov,
|
||||
unsigned int nr, size_t buflen, unsigned int base)
|
||||
{
|
||||
struct svc_sock *svsk =
|
||||
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
|
||||
struct msghdr msg = {
|
||||
.msg_flags = MSG_DONTWAIT,
|
||||
};
|
||||
int len;
|
||||
struct msghdr msg = { NULL };
|
||||
ssize_t len;
|
||||
|
||||
rqstp->rq_xprt_hlen = 0;
|
||||
|
||||
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
||||
iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nr, buflen);
|
||||
len = sock_recvmsg(svsk->sk_sock, &msg, msg.msg_flags);
|
||||
if (base != 0) {
|
||||
iov_iter_advance(&msg.msg_iter, base);
|
||||
buflen -= base;
|
||||
}
|
||||
len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
|
||||
/* If we read a full record, then assume there may be more
|
||||
* data to read (stream based sockets only!)
|
||||
*/
|
||||
if (len == buflen)
|
||||
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
||||
|
||||
dprintk("svc: socket %p recvfrom(%p, %zu) = %d\n",
|
||||
dprintk("svc: socket %p recvfrom(%p, %zu) = %zd\n",
|
||||
svsk, iov[0].iov_base, iov[0].iov_len, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
static int svc_partial_recvfrom(struct svc_rqst *rqstp,
|
||||
struct kvec *iov, int nr,
|
||||
int buflen, unsigned int base)
|
||||
{
|
||||
size_t save_iovlen;
|
||||
void *save_iovbase;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
if (base == 0)
|
||||
return svc_recvfrom(rqstp, iov, nr, buflen);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
if (iov[i].iov_len > base)
|
||||
break;
|
||||
base -= iov[i].iov_len;
|
||||
}
|
||||
save_iovlen = iov[i].iov_len;
|
||||
save_iovbase = iov[i].iov_base;
|
||||
iov[i].iov_len -= base;
|
||||
iov[i].iov_base += base;
|
||||
ret = svc_recvfrom(rqstp, &iov[i], nr - i, buflen);
|
||||
iov[i].iov_len = save_iovlen;
|
||||
iov[i].iov_base = save_iovbase;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set socket snd and rcv buffer lengths
|
||||
*/
|
||||
@@ -962,7 +937,8 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
||||
want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
|
||||
iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
|
||||
iov.iov_len = want;
|
||||
if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
|
||||
len = svc_recvfrom(rqstp, &iov, 1, want, 0);
|
||||
if (len < 0)
|
||||
goto error;
|
||||
svsk->sk_tcplen += len;
|
||||
|
||||
@@ -1088,14 +1064,13 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
|
||||
|
||||
vec = rqstp->rq_vec;
|
||||
|
||||
pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0],
|
||||
svsk->sk_datalen + want);
|
||||
pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], base + want);
|
||||
|
||||
rqstp->rq_respages = &rqstp->rq_pages[pnum];
|
||||
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
||||
|
||||
/* Now receive data */
|
||||
len = svc_partial_recvfrom(rqstp, vec, pnum, want, base);
|
||||
len = svc_recvfrom(rqstp, vec, pnum, base + want, base);
|
||||
if (len >= 0) {
|
||||
svsk->sk_tcplen += len;
|
||||
svsk->sk_datalen += len;
|
||||
|
@@ -5,8 +5,6 @@
|
||||
* Support for backward direction RPCs on RPC/RDMA (server-side).
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <linux/sunrpc/svc_rdma.h>
|
||||
|
||||
#include "xprt_rdma.h"
|
||||
@@ -32,7 +30,6 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
struct kvec *dst, *src = &rcvbuf->head[0];
|
||||
struct rpc_rqst *req;
|
||||
unsigned long cwnd;
|
||||
u32 credits;
|
||||
size_t len;
|
||||
__be32 xid;
|
||||
@@ -66,6 +63,8 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
|
||||
if (dst->iov_len < len)
|
||||
goto out_unlock;
|
||||
memcpy(dst->iov_base, p, len);
|
||||
xprt_pin_rqst(req);
|
||||
spin_unlock(&xprt->queue_lock);
|
||||
|
||||
credits = be32_to_cpup(rdma_resp + 2);
|
||||
if (credits == 0)
|
||||
@@ -74,15 +73,13 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
|
||||
credits = r_xprt->rx_buf.rb_bc_max_requests;
|
||||
|
||||
spin_lock_bh(&xprt->transport_lock);
|
||||
cwnd = xprt->cwnd;
|
||||
xprt->cwnd = credits << RPC_CWNDSHIFT;
|
||||
if (xprt->cwnd > cwnd)
|
||||
xprt_release_rqst_cong(req->rq_task);
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
|
||||
|
||||
spin_lock(&xprt->queue_lock);
|
||||
ret = 0;
|
||||
xprt_complete_rqst(req->rq_task, rcvbuf->len);
|
||||
xprt_unpin_rqst(req);
|
||||
rcvbuf->len = 0;
|
||||
|
||||
out_unlock:
|
||||
@@ -251,7 +248,6 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt)
|
||||
dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
|
||||
|
||||
xprt_free(xprt);
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static const struct rpc_xprt_ops xprt_rdma_bc_procs = {
|
||||
@@ -323,20 +319,9 @@ xprt_setup_rdma_bc(struct xprt_create *args)
|
||||
args->bc_xprt->xpt_bc_xprt = xprt;
|
||||
xprt->bc_xprt = args->bc_xprt;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
goto out_fail;
|
||||
|
||||
/* Final put for backchannel xprt is in __svc_rdma_free */
|
||||
xprt_get(xprt);
|
||||
return xprt;
|
||||
|
||||
out_fail:
|
||||
xprt_rdma_free_addresses(xprt);
|
||||
args->bc_xprt->xpt_bc_xprt = NULL;
|
||||
args->bc_xprt->xpt_bc_xps = NULL;
|
||||
xprt_put(xprt);
|
||||
xprt_free(xprt);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
struct xprt_class xprt_rdma_bc = {
|
||||
|
@@ -475,10 +475,12 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
|
||||
|
||||
/* Qualify the transport resource defaults with the
|
||||
* capabilities of this particular device */
|
||||
newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
|
||||
/* transport hdr, head iovec, one page list entry, tail iovec */
|
||||
if (newxprt->sc_max_send_sges < 4) {
|
||||
pr_err("svcrdma: too few Send SGEs available (%d)\n",
|
||||
/* Transport header, head iovec, tail iovec */
|
||||
newxprt->sc_max_send_sges = 3;
|
||||
/* Add one SGE per page list entry */
|
||||
newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
|
||||
if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
|
||||
pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
|
||||
newxprt->sc_max_send_sges);
|
||||
goto errout;
|
||||
}
|
||||
|
@@ -6,7 +6,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/xfrm.h>
|
||||
|
Reference in New Issue
Block a user