diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 51b1dcfb5022..1bf2615449fe 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -85,7 +85,7 @@ extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); -extern void shmem_mark_page_lazyfree(struct page *page); +extern void shmem_mark_page_lazyfree(struct page *page, bool tail); /* Flag allocation requirements to shmem_getpage */ enum sgp_type { diff --git a/include/linux/swap.h b/include/linux/swap.h index f750b27773ea..beda0a50d0b9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -353,7 +353,7 @@ extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); -extern void mark_page_lazyfree_movetail(struct page *page); +extern void mark_page_lazyfree_movetail(struct page *page, bool tail); extern void swap_setup(void); extern void __lru_cache_add_inactive_or_unevictable(struct page *page, diff --git a/mm/shmem.c b/mm/shmem.c index efcffd4836c3..604c6d89d243 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4295,9 +4295,9 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, } EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); -void shmem_mark_page_lazyfree(struct page *page) +void shmem_mark_page_lazyfree(struct page *page, bool tail) { - mark_page_lazyfree_movetail(page); + mark_page_lazyfree_movetail(page, tail); } EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree); diff --git a/mm/swap.c b/mm/swap.c index 09923999ddc2..8d5c61de5a6e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -634,6 +634,8 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec, void *arg) { + bool *add_to_tail = (bool *)arg; + if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && !PageSwapCache(page)) { bool active = PageActive(page); @@ -642,7 +644,10 @@ static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec, LRU_INACTIVE_ANON + active); ClearPageActive(page); ClearPageReferenced(page); - add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE); + if (add_to_tail && *add_to_tail) + add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE); + else + add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); } } @@ -769,7 +774,7 @@ void mark_page_lazyfree(struct page *page) * mark_page_lazyfree_movetail() moves @page to the tail of inactive file list. * This is done to accelerate the reclaim of @page. */ -void mark_page_lazyfree_movetail(struct page *page) +void mark_page_lazyfree_movetail(struct page *page, bool tail) { if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && !PageSwapCache(page)) { @@ -780,7 +785,7 @@ void mark_page_lazyfree_movetail(struct page *page) get_page(page); if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, - lru_lazyfree_movetail_fn, NULL); + lru_lazyfree_movetail_fn, &tail); local_unlock(&lru_pvecs.lock); } }