ANDROID: mm: provision to add shmem pages to inactive file lru head

Commit 9975da5f43 ("ANDROID: mm: allow fast reclaim of shmem pages")
allows pages to add only to the tail of the inactive file lru for faster
reclaims. Extend the same to allow the pages added to the head of the
inactive file lru too. This will enable users to selectively add the
shmem file pages to head or tail of the inactive file lru.

Bug: 187798288
Change-Id: Icf167e1e3ea68257291478e1f16de678ecbf6320
Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
This commit is contained in:
Charan Teja Reddy
2021-06-25 12:30:50 +05:30
committed by Todd Kjos
parent 9bb1247653
commit 8011eb2215
4 changed files with 12 additions and 7 deletions

View File

@@ -85,7 +85,7 @@ extern bool shmem_huge_enabled(struct vm_area_struct *vma);
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
extern void shmem_mark_page_lazyfree(struct page *page);
extern void shmem_mark_page_lazyfree(struct page *page, bool tail);
/* Flag allocation requirements to shmem_getpage */
enum sgp_type {

View File

@@ -353,7 +353,7 @@ extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void mark_page_lazyfree_movetail(struct page *page);
extern void mark_page_lazyfree_movetail(struct page *page, bool tail);
extern void swap_setup(void);
extern void __lru_cache_add_inactive_or_unevictable(struct page *page,

View File

@@ -4295,9 +4295,9 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
}
EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
void shmem_mark_page_lazyfree(struct page *page)
void shmem_mark_page_lazyfree(struct page *page, bool tail)
{
mark_page_lazyfree_movetail(page);
mark_page_lazyfree_movetail(page, tail);
}
EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree);

View File

@@ -634,6 +634,8 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
bool *add_to_tail = (bool *)arg;
if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
!PageSwapCache(page)) {
bool active = PageActive(page);
@@ -642,7 +644,10 @@ static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec,
LRU_INACTIVE_ANON + active);
ClearPageActive(page);
ClearPageReferenced(page);
add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE);
if (add_to_tail && *add_to_tail)
add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE);
else
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
}
}
@@ -769,7 +774,7 @@ void mark_page_lazyfree(struct page *page)
* mark_page_lazyfree_movetail() moves @page to the tail of inactive file list.
* This is done to accelerate the reclaim of @page.
*/
void mark_page_lazyfree_movetail(struct page *page)
void mark_page_lazyfree_movetail(struct page *page, bool tail)
{
if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
!PageSwapCache(page)) {
@@ -780,7 +785,7 @@ void mark_page_lazyfree_movetail(struct page *page)
get_page(page);
if (pagevec_add_and_need_flush(pvec, page))
pagevec_lru_move_fn(pvec,
lru_lazyfree_movetail_fn, NULL);
lru_lazyfree_movetail_fn, &tail);
local_unlock(&lru_pvecs.lock);
}
}