mm: move page mapped accounting to the node
Reclaim makes decisions based on the number of pages that are mapped but it's mixing node and zone information. Account NR_FILE_MAPPED and NR_ANON_PAGES pages on the node. Link: http://lkml.kernel.org/r/1467970510-21195-18-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
281e37265f
commit
50658e2e04
14
mm/rmap.c
14
mm/rmap.c
@@ -1214,7 +1214,7 @@ void do_page_add_anon_rmap(struct page *page,
|
||||
*/
|
||||
if (compound)
|
||||
__inc_zone_page_state(page, NR_ANON_THPS);
|
||||
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
|
||||
__mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, nr);
|
||||
}
|
||||
if (unlikely(PageKsm(page)))
|
||||
return;
|
||||
@@ -1258,7 +1258,7 @@ void page_add_new_anon_rmap(struct page *page,
|
||||
/* increment count (starts at -1) */
|
||||
atomic_set(&page->_mapcount, 0);
|
||||
}
|
||||
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr);
|
||||
__mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, nr);
|
||||
__page_set_anon_rmap(page, vma, address, 1);
|
||||
}
|
||||
|
||||
@@ -1293,7 +1293,7 @@ void page_add_file_rmap(struct page *page, bool compound)
|
||||
if (!atomic_inc_and_test(&page->_mapcount))
|
||||
goto out;
|
||||
}
|
||||
__mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, nr);
|
||||
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
|
||||
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
||||
out:
|
||||
unlock_page_memcg(page);
|
||||
@@ -1329,11 +1329,11 @@ static void page_remove_file_rmap(struct page *page, bool compound)
|
||||
}
|
||||
|
||||
/*
|
||||
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
||||
* We use the irq-unsafe __{inc|mod}_zone_page_state because
|
||||
* these counters are not modified in interrupt context, and
|
||||
* pte lock(a spinlock) is held, which implies preemption disabled.
|
||||
*/
|
||||
__mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, -nr);
|
||||
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
|
||||
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
||||
|
||||
if (unlikely(PageMlocked(page)))
|
||||
@@ -1375,7 +1375,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
|
||||
clear_page_mlock(page);
|
||||
|
||||
if (nr) {
|
||||
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr);
|
||||
__mod_node_page_state(page_pgdat(page), NR_ANON_PAGES, -nr);
|
||||
deferred_split_huge_page(page);
|
||||
}
|
||||
}
|
||||
@@ -1404,7 +1404,7 @@ void page_remove_rmap(struct page *page, bool compound)
|
||||
* these counters are not modified in interrupt context, and
|
||||
* pte lock(a spinlock) is held, which implies preemption disabled.
|
||||
*/
|
||||
__dec_zone_page_state(page, NR_ANON_PAGES);
|
||||
__dec_node_page_state(page, NR_ANON_PAGES);
|
||||
|
||||
if (unlikely(PageMlocked(page)))
|
||||
clear_page_mlock(page);
|
||||
|
Reference in New Issue
Block a user