ceph: move sb->wb_pagevec_pool to be a global mempool
When doing some testing recently, I hit some page allocation failures on mount, when creating the wb_pagevec_pool for the mount. That requires 128k (32 contiguous pages), and after thrashing the memory during an xfstests run, sometimes that would fail. 128k for each mount seems like a lot to hold in reserve for a rainy day, so let's change this to a global mempool that gets allocated when the module is plugged in. Signed-off-by: Jeff Layton <jlayton@kernel.org> Reviewed-by: Ilya Dryomov <idryomov@gmail.com> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:

committed by
Ilya Dryomov

parent
b748fc7a87
commit
a0102bda5b
@@ -862,8 +862,7 @@ static void writepages_finish(struct ceph_osd_request *req)
|
||||
|
||||
osd_data = osd_req_op_extent_osd_data(req, 0);
|
||||
if (osd_data->pages_from_pool)
|
||||
mempool_free(osd_data->pages,
|
||||
ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
|
||||
mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
|
||||
else
|
||||
kfree(osd_data->pages);
|
||||
ceph_osdc_put_request(req);
|
||||
@@ -955,10 +954,10 @@ retry:
|
||||
int num_ops = 0, op_idx;
|
||||
unsigned i, pvec_pages, max_pages, locked_pages = 0;
|
||||
struct page **pages = NULL, **data_pages;
|
||||
mempool_t *pool = NULL; /* Becomes non-null if mempool used */
|
||||
struct page *page;
|
||||
pgoff_t strip_unit_end = 0;
|
||||
u64 offset = 0, len = 0;
|
||||
bool from_pool = false;
|
||||
|
||||
max_pages = wsize >> PAGE_SHIFT;
|
||||
|
||||
@@ -1057,16 +1056,16 @@ get_more_pages:
|
||||
sizeof(*pages),
|
||||
GFP_NOFS);
|
||||
if (!pages) {
|
||||
pool = fsc->wb_pagevec_pool;
|
||||
pages = mempool_alloc(pool, GFP_NOFS);
|
||||
from_pool = true;
|
||||
pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
|
||||
BUG_ON(!pages);
|
||||
}
|
||||
|
||||
len = 0;
|
||||
} else if (page->index !=
|
||||
(offset + len) >> PAGE_SHIFT) {
|
||||
if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS :
|
||||
CEPH_OSD_MAX_OPS)) {
|
||||
if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS :
|
||||
CEPH_OSD_MAX_OPS)) {
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
unlock_page(page);
|
||||
break;
|
||||
@@ -1161,7 +1160,7 @@ new_request:
|
||||
offset, len);
|
||||
osd_req_op_extent_osd_data_pages(req, op_idx,
|
||||
data_pages, len, 0,
|
||||
!!pool, false);
|
||||
from_pool, false);
|
||||
osd_req_op_extent_update(req, op_idx, len);
|
||||
|
||||
len = 0;
|
||||
@@ -1188,12 +1187,12 @@ new_request:
|
||||
dout("writepages got pages at %llu~%llu\n", offset, len);
|
||||
|
||||
osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
|
||||
0, !!pool, false);
|
||||
0, from_pool, false);
|
||||
osd_req_op_extent_update(req, op_idx, len);
|
||||
|
||||
BUG_ON(op_idx + 1 != req->r_num_ops);
|
||||
|
||||
pool = NULL;
|
||||
from_pool = false;
|
||||
if (i < locked_pages) {
|
||||
BUG_ON(num_ops <= req->r_num_ops);
|
||||
num_ops -= req->r_num_ops;
|
||||
@@ -1204,8 +1203,8 @@ new_request:
|
||||
pages = kmalloc_array(locked_pages, sizeof(*pages),
|
||||
GFP_NOFS);
|
||||
if (!pages) {
|
||||
pool = fsc->wb_pagevec_pool;
|
||||
pages = mempool_alloc(pool, GFP_NOFS);
|
||||
from_pool = true;
|
||||
pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
|
||||
BUG_ON(!pages);
|
||||
}
|
||||
memcpy(pages, data_pages + i,
|
||||
|
Reference in New Issue
Block a user