nilfs2: Convert to XArray
This is close to a 1:1 replacement of radix tree APIs with their XArray equivalents. It would be possible to optimise nilfs_copy_back_pages(), but that doesn't seem to be in the performance path. Also, I think it has a pre-existing bug, and I've added a note to that effect in the source code. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
@@ -289,7 +289,7 @@ repeat:
|
||||
* @dmap: destination page cache
|
||||
* @smap: source page cache
|
||||
*
|
||||
* No pages must no be added to the cache during this process.
|
||||
* No pages must be added to the cache during this process.
|
||||
* This must be ensured by the caller.
|
||||
*/
|
||||
void nilfs_copy_back_pages(struct address_space *dmap,
|
||||
@@ -298,7 +298,6 @@ void nilfs_copy_back_pages(struct address_space *dmap,
|
||||
struct pagevec pvec;
|
||||
unsigned int i, n;
|
||||
pgoff_t index = 0;
|
||||
int err;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
repeat:
|
||||
@@ -313,35 +312,34 @@ repeat:
|
||||
lock_page(page);
|
||||
dpage = find_lock_page(dmap, offset);
|
||||
if (dpage) {
|
||||
/* override existing page on the destination cache */
|
||||
/* overwrite existing page in the destination cache */
|
||||
WARN_ON(PageDirty(dpage));
|
||||
nilfs_copy_page(dpage, page, 0);
|
||||
unlock_page(dpage);
|
||||
put_page(dpage);
|
||||
/* Do we not need to remove page from smap here? */
|
||||
} else {
|
||||
struct page *page2;
|
||||
struct page *p;
|
||||
|
||||
/* move the page to the destination cache */
|
||||
xa_lock_irq(&smap->i_pages);
|
||||
page2 = radix_tree_delete(&smap->i_pages, offset);
|
||||
WARN_ON(page2 != page);
|
||||
|
||||
p = __xa_erase(&smap->i_pages, offset);
|
||||
WARN_ON(page != p);
|
||||
smap->nrpages--;
|
||||
xa_unlock_irq(&smap->i_pages);
|
||||
|
||||
xa_lock_irq(&dmap->i_pages);
|
||||
err = radix_tree_insert(&dmap->i_pages, offset, page);
|
||||
if (unlikely(err < 0)) {
|
||||
WARN_ON(err == -EEXIST);
|
||||
p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
|
||||
if (unlikely(p)) {
|
||||
/* Probably -ENOMEM */
|
||||
page->mapping = NULL;
|
||||
put_page(page); /* for cache */
|
||||
put_page(page);
|
||||
} else {
|
||||
page->mapping = dmap;
|
||||
dmap->nrpages++;
|
||||
if (PageDirty(page))
|
||||
radix_tree_tag_set(&dmap->i_pages,
|
||||
offset,
|
||||
PAGECACHE_TAG_DIRTY);
|
||||
__xa_set_mark(&dmap->i_pages, offset,
|
||||
PAGECACHE_TAG_DIRTY);
|
||||
}
|
||||
xa_unlock_irq(&dmap->i_pages);
|
||||
}
|
||||
@@ -467,8 +465,7 @@ int __nilfs_clear_page_dirty(struct page *page)
|
||||
if (mapping) {
|
||||
xa_lock_irq(&mapping->i_pages);
|
||||
if (test_bit(PG_dirty, &page->flags)) {
|
||||
radix_tree_tag_clear(&mapping->i_pages,
|
||||
page_index(page),
|
||||
__xa_clear_mark(&mapping->i_pages, page_index(page),
|
||||
PAGECACHE_TAG_DIRTY);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
return clear_page_dirty_for_io(page);
|
||||
|
Reference in New Issue
Block a user