mm: memcontrol: switch to native NR_ANON_MAPPED counter
Memcg maintains a private MEMCG_RSS counter. This divergence from the generic VM accounting means unnecessary code overhead, and creates a dependency for memcg that page->mapping is set up at the time of charging, so that page types can be told apart. Convert the generic accounting sites to mod_lruvec_page_state and friends to maintain the per-cgroup vmstat counter of NR_ANON_MAPPED. We use lock_page_memcg() to stabilize page->mem_cgroup during rmap changes, the same way we do for NR_FILE_MAPPED. With the previous patch removing MEMCG_CACHE and the private NR_SHMEM counter, this patch finally eliminates the need to have page->mapping set up at charge time. However, we need to have page->mem_cgroup set up by the time rmap runs and does the accounting, so switch the commit and the rmap callbacks around. v2: fix temporary accounting bug by switching rmap<->commit (Joonsoo) Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Alex Shi <alex.shi@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Michal Hocko <mhocko@suse.com> Cc: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Balbir Singh <bsingharora@gmail.com> Link: http://lkml.kernel.org/r/20200508183105.225460-11-hannes@cmpxchg.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
0d1c20722a
commit
be5d0a74c6
47
mm/rmap.c
47
mm/rmap.c
@@ -1114,6 +1114,11 @@ void do_page_add_anon_rmap(struct page *page,
|
||||
bool compound = flags & RMAP_COMPOUND;
|
||||
bool first;
|
||||
|
||||
if (unlikely(PageKsm(page)))
|
||||
lock_page_memcg(page);
|
||||
else
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
|
||||
if (compound) {
|
||||
atomic_t *mapcount;
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
@@ -1134,12 +1139,13 @@ void do_page_add_anon_rmap(struct page *page,
|
||||
*/
|
||||
if (compound)
|
||||
__inc_node_page_state(page, NR_ANON_THPS);
|
||||
__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
|
||||
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
|
||||
}
|
||||
if (unlikely(PageKsm(page)))
|
||||
return;
|
||||
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
if (unlikely(PageKsm(page))) {
|
||||
unlock_page_memcg(page);
|
||||
return;
|
||||
}
|
||||
|
||||
/* address might be in next vma when migration races vma_adjust */
|
||||
if (first)
|
||||
@@ -1181,7 +1187,7 @@ void page_add_new_anon_rmap(struct page *page,
|
||||
/* increment count (starts at -1) */
|
||||
atomic_set(&page->_mapcount, 0);
|
||||
}
|
||||
__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
|
||||
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
|
||||
__page_set_anon_rmap(page, vma, address, 1);
|
||||
}
|
||||
|
||||
@@ -1230,13 +1236,12 @@ static void page_remove_file_rmap(struct page *page, bool compound)
|
||||
int i, nr = 1;
|
||||
|
||||
VM_BUG_ON_PAGE(compound && !PageHead(page), page);
|
||||
lock_page_memcg(page);
|
||||
|
||||
/* Hugepages are not counted in NR_FILE_MAPPED for now. */
|
||||
if (unlikely(PageHuge(page))) {
|
||||
/* hugetlb pages are always mapped with pmds */
|
||||
atomic_dec(compound_mapcount_ptr(page));
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/* page still mapped by someone else? */
|
||||
@@ -1246,14 +1251,14 @@ static void page_remove_file_rmap(struct page *page, bool compound)
|
||||
nr++;
|
||||
}
|
||||
if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
|
||||
goto out;
|
||||
return;
|
||||
if (PageSwapBacked(page))
|
||||
__dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
|
||||
else
|
||||
__dec_node_page_state(page, NR_FILE_PMDMAPPED);
|
||||
} else {
|
||||
if (!atomic_add_negative(-1, &page->_mapcount))
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1265,8 +1270,6 @@ static void page_remove_file_rmap(struct page *page, bool compound)
|
||||
|
||||
if (unlikely(PageMlocked(page)))
|
||||
clear_page_mlock(page);
|
||||
out:
|
||||
unlock_page_memcg(page);
|
||||
}
|
||||
|
||||
static void page_remove_anon_compound_rmap(struct page *page)
|
||||
@@ -1310,7 +1313,7 @@ static void page_remove_anon_compound_rmap(struct page *page)
|
||||
clear_page_mlock(page);
|
||||
|
||||
if (nr)
|
||||
__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
|
||||
__mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1322,22 +1325,28 @@ static void page_remove_anon_compound_rmap(struct page *page)
|
||||
*/
|
||||
void page_remove_rmap(struct page *page, bool compound)
|
||||
{
|
||||
if (!PageAnon(page))
|
||||
return page_remove_file_rmap(page, compound);
|
||||
lock_page_memcg(page);
|
||||
|
||||
if (compound)
|
||||
return page_remove_anon_compound_rmap(page);
|
||||
if (!PageAnon(page)) {
|
||||
page_remove_file_rmap(page, compound);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (compound) {
|
||||
page_remove_anon_compound_rmap(page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* page still mapped by someone else? */
|
||||
if (!atomic_add_negative(-1, &page->_mapcount))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
||||
* these counters are not modified in interrupt context, and
|
||||
* pte lock(a spinlock) is held, which implies preemption disabled.
|
||||
*/
|
||||
__dec_node_page_state(page, NR_ANON_MAPPED);
|
||||
__dec_lruvec_page_state(page, NR_ANON_MAPPED);
|
||||
|
||||
if (unlikely(PageMlocked(page)))
|
||||
clear_page_mlock(page);
|
||||
@@ -1354,6 +1363,8 @@ void page_remove_rmap(struct page *page, bool compound)
|
||||
* Leaving it set also helps swapoff to reinstate ptes
|
||||
* faster for those pages still in swapcache.
|
||||
*/
|
||||
out:
|
||||
unlock_page_memcg(page);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user