mm: remove zone_lru_lock() function, access ->lru_lock directly
We have common pattern to access lru_lock from a page pointer: zone_lru_lock(page_zone(page)) Which is silly, because it unfolds to this: &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]->zone_pgdat->lru_lock while we can simply do &NODE_DATA(page_to_nid(page))->lru_lock Remove zone_lru_lock() function, since it's only complicate things. Use 'page_pgdat(page)->lru_lock' pattern instead. [aryabinin@virtuozzo.com: a slightly better version of __split_huge_page()] Link: http://lkml.kernel.org/r/20190301121651.7741-1-aryabinin@virtuozzo.com Link: http://lkml.kernel.org/r/20190228083329.31892-2-aryabinin@virtuozzo.com Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Rik van Riel <riel@surriel.com> Cc: William Kucharski <william.kucharski@oracle.com> Cc: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
a7ca12f9d9
commit
f4b7e272b5
16
mm/vmscan.c
16
mm/vmscan.c
@@ -1614,8 +1614,8 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* zone_lru_lock is heavily contended. Some of the functions that
|
||||
/**
|
||||
* pgdat->lru_lock is heavily contended. Some of the functions that
|
||||
* shrink the lists perform better by taking out a batch of pages
|
||||
* and working on them outside the LRU lock.
|
||||
*
|
||||
@@ -1750,11 +1750,11 @@ int isolate_lru_page(struct page *page)
|
||||
WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
|
||||
|
||||
if (PageLRU(page)) {
|
||||
struct zone *zone = page_zone(page);
|
||||
pg_data_t *pgdat = page_pgdat(page);
|
||||
struct lruvec *lruvec;
|
||||
|
||||
spin_lock_irq(zone_lru_lock(zone));
|
||||
lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
|
||||
spin_lock_irq(&pgdat->lru_lock);
|
||||
lruvec = mem_cgroup_page_lruvec(page, pgdat);
|
||||
if (PageLRU(page)) {
|
||||
int lru = page_lru(page);
|
||||
get_page(page);
|
||||
@@ -1762,7 +1762,7 @@ int isolate_lru_page(struct page *page)
|
||||
del_page_from_lru_list(page, lruvec, lru);
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock_irq(zone_lru_lock(zone));
|
||||
spin_unlock_irq(&pgdat->lru_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -1990,9 +1990,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
|
||||
* processes, from rmap.
|
||||
*
|
||||
* If the pages are mostly unmapped, the processing is fast and it is
|
||||
* appropriate to hold zone_lru_lock across the whole operation. But if
|
||||
* appropriate to hold pgdat->lru_lock across the whole operation. But if
|
||||
* the pages are mapped, the processing is slow (page_referenced()) so we
|
||||
* should drop zone_lru_lock around each page. It's impossible to balance
|
||||
* should drop pgdat->lru_lock around each page. It's impossible to balance
|
||||
* this, so instead we remove the pages from the LRU while processing them.
|
||||
* It is safe to rely on PG_active against the non-LRU pages in here because
|
||||
* nobody will play with that bit on a non-LRU page.
|
||||
|
||||
Reference in New Issue
Block a user