page cache: use xa_lock
Remove the address_space ->tree_lock and use the xa_lock newly added to the radix_tree_root. Rename the address_space ->page_tree to ->i_pages, since we don't really care that it's a tree. [willy@infradead.org: fix nds32, fs/dax.c] Link: http://lkml.kernel.org/r/20180406145415.GB20605@bombadil.infradead.orgLink: http://lkml.kernel.org/r/20180313132639.17387-9-willy@infradead.org Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Acked-by: Jeff Layton <jlayton@redhat.com> Cc: Darrick J. Wong <darrick.wong@oracle.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
f6bb2a2c0b
commit
b93b016313
@@ -202,7 +202,7 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
|
||||
* @mapping: address space the page was backing
|
||||
* @page: the page being evicted
|
||||
*
|
||||
* Returns a shadow entry to be stored in @mapping->page_tree in place
|
||||
* Returns a shadow entry to be stored in @mapping->i_pages in place
|
||||
* of the evicted @page so that a later refault can be detected.
|
||||
*/
|
||||
void *workingset_eviction(struct address_space *mapping, struct page *page)
|
||||
@@ -348,7 +348,7 @@ void workingset_update_node(struct radix_tree_node *node)
|
||||
*
|
||||
* Avoid acquiring the list_lru lock when the nodes are
|
||||
* already where they should be. The list_empty() test is safe
|
||||
* as node->private_list is protected by &mapping->tree_lock.
|
||||
* as node->private_list is protected by the i_pages lock.
|
||||
*/
|
||||
if (node->count && node->count == node->exceptional) {
|
||||
if (list_empty(&node->private_list))
|
||||
@@ -366,7 +366,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
|
||||
unsigned long nodes;
|
||||
unsigned long cache;
|
||||
|
||||
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
|
||||
/* list_lru lock nests inside the IRQ-safe i_pages lock */
|
||||
local_irq_disable();
|
||||
nodes = list_lru_shrink_count(&shadow_nodes, sc);
|
||||
local_irq_enable();
|
||||
@@ -419,21 +419,21 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
|
||||
|
||||
/*
|
||||
* Page cache insertions and deletions synchroneously maintain
|
||||
* the shadow node LRU under the mapping->tree_lock and the
|
||||
* the shadow node LRU under the i_pages lock and the
|
||||
* lru_lock. Because the page cache tree is emptied before
|
||||
* the inode can be destroyed, holding the lru_lock pins any
|
||||
* address_space that has radix tree nodes on the LRU.
|
||||
*
|
||||
* We can then safely transition to the mapping->tree_lock to
|
||||
* We can then safely transition to the i_pages lock to
|
||||
* pin only the address_space of the particular node we want
|
||||
* to reclaim, take the node off-LRU, and drop the lru_lock.
|
||||
*/
|
||||
|
||||
node = container_of(item, struct radix_tree_node, private_list);
|
||||
mapping = container_of(node->root, struct address_space, page_tree);
|
||||
mapping = container_of(node->root, struct address_space, i_pages);
|
||||
|
||||
/* Coming from the list, invert the lock order */
|
||||
if (!spin_trylock(&mapping->tree_lock)) {
|
||||
if (!xa_trylock(&mapping->i_pages)) {
|
||||
spin_unlock(lru_lock);
|
||||
ret = LRU_RETRY;
|
||||
goto out;
|
||||
@@ -468,11 +468,11 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
|
||||
if (WARN_ON_ONCE(node->exceptional))
|
||||
goto out_invalid;
|
||||
inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
|
||||
__radix_tree_delete_node(&mapping->page_tree, node,
|
||||
__radix_tree_delete_node(&mapping->i_pages, node,
|
||||
workingset_lookup_update(mapping));
|
||||
|
||||
out_invalid:
|
||||
spin_unlock(&mapping->tree_lock);
|
||||
xa_unlock(&mapping->i_pages);
|
||||
ret = LRU_REMOVED_RETRY;
|
||||
out:
|
||||
local_irq_enable();
|
||||
@@ -487,7 +487,7 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
|
||||
/* list_lru lock nests inside the IRQ-safe i_pages lock */
|
||||
local_irq_disable();
|
||||
ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
|
||||
local_irq_enable();
|
||||
@@ -503,7 +503,7 @@ static struct shrinker workingset_shadow_shrinker = {
|
||||
|
||||
/*
|
||||
* Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
|
||||
* mapping->tree_lock.
|
||||
* i_pages lock.
|
||||
*/
|
||||
static struct lock_class_key shadow_nodes_key;
|
||||
|
||||
|
Reference in New Issue
Block a user