page cache: use xa_lock
Remove the address_space ->tree_lock and use the xa_lock newly added to the radix_tree_root. Rename the address_space ->page_tree to ->i_pages, since we don't really care that it's a tree. [willy@infradead.org: fix nds32, fs/dax.c] Link: http://lkml.kernel.org/r/20180406145415.GB20605@bombadil.infradead.orgLink: http://lkml.kernel.org/r/20180313132639.17387-9-willy@infradead.org Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Acked-by: Jeff Layton <jlayton@redhat.com> Cc: Darrick J. Wong <darrick.wong@oracle.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
f6bb2a2c0b
commit
b93b016313
@@ -331,15 +331,15 @@ repeat:
|
||||
struct page *page2;
|
||||
|
||||
/* move the page to the destination cache */
|
||||
spin_lock_irq(&smap->tree_lock);
|
||||
page2 = radix_tree_delete(&smap->page_tree, offset);
|
||||
xa_lock_irq(&smap->i_pages);
|
||||
page2 = radix_tree_delete(&smap->i_pages, offset);
|
||||
WARN_ON(page2 != page);
|
||||
|
||||
smap->nrpages--;
|
||||
spin_unlock_irq(&smap->tree_lock);
|
||||
xa_unlock_irq(&smap->i_pages);
|
||||
|
||||
spin_lock_irq(&dmap->tree_lock);
|
||||
err = radix_tree_insert(&dmap->page_tree, offset, page);
|
||||
xa_lock_irq(&dmap->i_pages);
|
||||
err = radix_tree_insert(&dmap->i_pages, offset, page);
|
||||
if (unlikely(err < 0)) {
|
||||
WARN_ON(err == -EEXIST);
|
||||
page->mapping = NULL;
|
||||
@@ -348,11 +348,11 @@ repeat:
|
||||
page->mapping = dmap;
|
||||
dmap->nrpages++;
|
||||
if (PageDirty(page))
|
||||
radix_tree_tag_set(&dmap->page_tree,
|
||||
radix_tree_tag_set(&dmap->i_pages,
|
||||
offset,
|
||||
PAGECACHE_TAG_DIRTY);
|
||||
}
|
||||
spin_unlock_irq(&dmap->tree_lock);
|
||||
xa_unlock_irq(&dmap->i_pages);
|
||||
}
|
||||
unlock_page(page);
|
||||
}
|
||||
@@ -474,15 +474,15 @@ int __nilfs_clear_page_dirty(struct page *page)
|
||||
struct address_space *mapping = page->mapping;
|
||||
|
||||
if (mapping) {
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
xa_lock_irq(&mapping->i_pages);
|
||||
if (test_bit(PG_dirty, &page->flags)) {
|
||||
radix_tree_tag_clear(&mapping->page_tree,
|
||||
radix_tree_tag_clear(&mapping->i_pages,
|
||||
page_index(page),
|
||||
PAGECACHE_TAG_DIRTY);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
return clear_page_dirty_for_io(page);
|
||||
}
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
return 0;
|
||||
}
|
||||
return TestClearPageDirty(page);
|
||||
|
Reference in New Issue
Block a user