mm: make per-memcg LRU lists exclusive
Now that all code that operated on global per-zone LRU lists is converted to operate on per-memory cgroup LRU lists instead, there is no reason to keep the double-LRU scheme around any longer. The pc->lru member is removed and page->lru is linked directly to the per-memory cgroup LRU lists, which removes two pointers from a descriptor that exists for every page frame in the system. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Ying Han <yinghan@google.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
这个提交包含在:
23
mm/swap.c
23
mm/swap.c
@@ -232,12 +232,14 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
|
||||
static void pagevec_move_tail_fn(struct page *page, void *arg)
|
||||
{
|
||||
int *pgmoved = arg;
|
||||
struct zone *zone = page_zone(page);
|
||||
|
||||
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
||||
enum lru_list lru = page_lru_base_type(page);
|
||||
list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
|
||||
mem_cgroup_rotate_reclaimable_page(page);
|
||||
struct lruvec *lruvec;
|
||||
|
||||
lruvec = mem_cgroup_lru_move_lists(page_zone(page),
|
||||
page, lru, lru);
|
||||
list_move_tail(&page->lru, &lruvec->lists[lru]);
|
||||
(*pgmoved)++;
|
||||
}
|
||||
}
|
||||
@@ -476,12 +478,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
|
||||
*/
|
||||
SetPageReclaim(page);
|
||||
} else {
|
||||
struct lruvec *lruvec;
|
||||
/*
|
||||
* The page's writeback ends up during pagevec
|
||||
* We moves tha page into tail of inactive.
|
||||
*/
|
||||
list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
|
||||
mem_cgroup_rotate_reclaimable_page(page);
|
||||
lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
|
||||
list_move_tail(&page->lru, &lruvec->lists[lru]);
|
||||
__count_vm_event(PGROTATED);
|
||||
}
|
||||
|
||||
@@ -663,6 +666,8 @@ void lru_add_page_tail(struct zone* zone,
|
||||
SetPageLRU(page_tail);
|
||||
|
||||
if (page_evictable(page_tail, NULL)) {
|
||||
struct lruvec *lruvec;
|
||||
|
||||
if (PageActive(page)) {
|
||||
SetPageActive(page_tail);
|
||||
active = 1;
|
||||
@@ -672,11 +677,13 @@ void lru_add_page_tail(struct zone* zone,
|
||||
lru = LRU_INACTIVE_ANON;
|
||||
}
|
||||
update_page_reclaim_stat(zone, page_tail, file, active);
|
||||
lruvec = mem_cgroup_lru_add_list(zone, page_tail, lru);
|
||||
if (likely(PageLRU(page)))
|
||||
__add_page_to_lru_list(zone, page_tail, lru,
|
||||
page->lru.prev);
|
||||
list_add(&page_tail->lru, page->lru.prev);
|
||||
else
|
||||
add_page_to_lru_list(zone, page_tail, lru);
|
||||
list_add(&page_tail->lru, &lruvec->lists[lru]);
|
||||
__mod_zone_page_state(zone, NR_LRU_BASE + lru,
|
||||
hpage_nr_pages(page_tail));
|
||||
} else {
|
||||
SetPageUnevictable(page_tail);
|
||||
add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
|
||||
|
在新工单中引用
屏蔽一个用户