mm: code cleanup for MADV_FREE
Some comments for MADV_FREE is revised and added to help people understand the MADV_FREE code, especially the page flag, PG_swapbacked. This makes page_is_file_cache() isn't consistent with its comments. So the function is renamed to page_is_file_lru() to make them consistent again. All these are put in one patch as one logical change. Suggested-by: David Hildenbrand <david@redhat.com> Suggested-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: David Rientjes <rientjes@google.com> Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Michal Hocko <mhocko@kernel.org> Acked-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Minchan Kim <minchan@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@surriel.com> Link: http://lkml.kernel.org/r/20200317100342.2730705-1-ying.huang@intel.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
7a9547fd4e
commit
9de4f22a60
12
mm/vmscan.c
12
mm/vmscan.c
@@ -919,7 +919,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
|
||||
* exceptional entries and shadow exceptional entries in the
|
||||
* same address_space.
|
||||
*/
|
||||
if (reclaimed && page_is_file_cache(page) &&
|
||||
if (reclaimed && page_is_file_lru(page) &&
|
||||
!mapping_exiting(mapping) && !dax_mapping(mapping))
|
||||
shadow = workingset_eviction(page, target_memcg);
|
||||
__delete_from_page_cache(page, shadow);
|
||||
@@ -1043,7 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
|
||||
* Anonymous pages are not handled by flushers and must be written
|
||||
* from reclaim context. Do not stall reclaim based on them
|
||||
*/
|
||||
if (!page_is_file_cache(page) ||
|
||||
if (!page_is_file_lru(page) ||
|
||||
(PageAnon(page) && !PageSwapBacked(page))) {
|
||||
*dirty = false;
|
||||
*writeback = false;
|
||||
@@ -1315,7 +1315,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||
* the rest of the LRU for clean pages and see
|
||||
* the same dirty pages again (PageReclaim).
|
||||
*/
|
||||
if (page_is_file_cache(page) &&
|
||||
if (page_is_file_lru(page) &&
|
||||
(!current_is_kswapd() || !PageReclaim(page) ||
|
||||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) {
|
||||
/*
|
||||
@@ -1459,7 +1459,7 @@ activate_locked:
|
||||
try_to_free_swap(page);
|
||||
VM_BUG_ON_PAGE(PageActive(page), page);
|
||||
if (!PageMlocked(page)) {
|
||||
int type = page_is_file_cache(page);
|
||||
int type = page_is_file_lru(page);
|
||||
SetPageActive(page);
|
||||
stat->nr_activate[type] += nr_pages;
|
||||
count_memcg_page_event(page, PGACTIVATE);
|
||||
@@ -1497,7 +1497,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
||||
LIST_HEAD(clean_pages);
|
||||
|
||||
list_for_each_entry_safe(page, next, page_list, lru) {
|
||||
if (page_is_file_cache(page) && !PageDirty(page) &&
|
||||
if (page_is_file_lru(page) && !PageDirty(page) &&
|
||||
!__PageMovable(page) && !PageUnevictable(page)) {
|
||||
ClearPageActive(page);
|
||||
list_move(&page->lru, &clean_pages);
|
||||
@@ -2053,7 +2053,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
||||
* IO, plus JVM can create lots of anon VM_EXEC pages,
|
||||
* so we ignore them here.
|
||||
*/
|
||||
if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
|
||||
if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
|
||||
list_add(&page->lru, &l_active);
|
||||
continue;
|
||||
}
|
||||
|
Reference in New Issue
Block a user