mm: memcontrol: switch to native NR_FILE_PAGES and NR_SHMEM counters
Memcg maintains private MEMCG_CACHE and NR_SHMEM counters. This divergence from the generic VM accounting means unnecessary code overhead, and creates a dependency for memcg that page->mapping is set up at the time of charging, so that page types can be told apart. Convert the generic accounting sites to mod_lruvec_page_state and friends to maintain the per-cgroup vmstat counters of NR_FILE_PAGES and NR_SHMEM. The page is already locked in these places, so page->mem_cgroup is stable; we only need minimal tweaks of two mem_cgroup_migrate() calls to ensure it's set up in time. Then replace MEMCG_CACHE with NR_FILE_PAGES and delete the private NR_SHMEM accounting sites. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Alex Shi <alex.shi@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Michal Hocko <mhocko@suse.com> Cc: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Balbir Singh <bsingharora@gmail.com> Link: http://lkml.kernel.org/r/20200508183105.225460-10-hannes@cmpxchg.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
这个提交包含在:
17
mm/filemap.c
17
mm/filemap.c
@@ -199,9 +199,9 @@ static void unaccount_page_cache_page(struct address_space *mapping,
|
||||
|
||||
nr = hpage_nr_pages(page);
|
||||
|
||||
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
|
||||
__mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
|
||||
if (PageSwapBacked(page)) {
|
||||
__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
|
||||
__mod_lruvec_page_state(page, NR_SHMEM, -nr);
|
||||
if (PageTransHuge(page))
|
||||
__dec_node_page_state(page, NR_SHMEM_THPS);
|
||||
} else if (PageTransHuge(page)) {
|
||||
@@ -802,21 +802,22 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
||||
new->mapping = mapping;
|
||||
new->index = offset;
|
||||
|
||||
mem_cgroup_migrate(old, new);
|
||||
|
||||
xas_lock_irqsave(&xas, flags);
|
||||
xas_store(&xas, new);
|
||||
|
||||
old->mapping = NULL;
|
||||
/* hugetlb pages do not participate in page cache accounting. */
|
||||
if (!PageHuge(old))
|
||||
__dec_node_page_state(old, NR_FILE_PAGES);
|
||||
__dec_lruvec_page_state(old, NR_FILE_PAGES);
|
||||
if (!PageHuge(new))
|
||||
__inc_node_page_state(new, NR_FILE_PAGES);
|
||||
__inc_lruvec_page_state(new, NR_FILE_PAGES);
|
||||
if (PageSwapBacked(old))
|
||||
__dec_node_page_state(old, NR_SHMEM);
|
||||
__dec_lruvec_page_state(old, NR_SHMEM);
|
||||
if (PageSwapBacked(new))
|
||||
__inc_node_page_state(new, NR_SHMEM);
|
||||
__inc_lruvec_page_state(new, NR_SHMEM);
|
||||
xas_unlock_irqrestore(&xas, flags);
|
||||
mem_cgroup_migrate(old, new);
|
||||
if (freepage)
|
||||
freepage(old);
|
||||
put_page(old);
|
||||
@@ -867,7 +868,7 @@ static int __add_to_page_cache_locked(struct page *page,
|
||||
|
||||
/* hugetlb pages do not participate in page cache accounting */
|
||||
if (!huge)
|
||||
__inc_node_page_state(page, NR_FILE_PAGES);
|
||||
__inc_lruvec_page_state(page, NR_FILE_PAGES);
|
||||
unlock:
|
||||
xas_unlock_irq(&xas);
|
||||
} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
|
||||
|
在新工单中引用
屏蔽一个用户