ANDROID: mm: add reclaim_shmem_address_space() for faster reclaims
Add the functionality that allow users of shmem to reclaim its pages without going through the kswapd/direct reclaim path. An example usecase is: Say that device allocates a larger amount of shmem pages and shares it with hardware. To faster reclaims such pages, drivers can register the shrinkers and call reclaim_shmem_address_space(). The implementation of this function is mostly borrowed from reclaim_address_space() implemented for per process reclaim[1]. [1] https://lore.kernel.org/patchwork/cover/378056/ Bug: 187798288 Change-Id: I03d2c3b9610612af977f89ddeabb63b8e9e50918 Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
This commit is contained in:

committed by
Suren Baghdasaryan

parent
4c3dddf408
commit
daeabfe7fa
@@ -3355,6 +3355,8 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
|
||||
|
||||
extern int sysctl_nr_trim_pages;
|
||||
extern bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr);
|
||||
extern int reclaim_shmem_address_space(struct address_space *mapping);
|
||||
extern int reclaim_pages_from_list(struct list_head *page_list);
|
||||
|
||||
/**
|
||||
* seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
|
||||
|
39
mm/shmem.c
39
mm/shmem.c
@@ -38,6 +38,7 @@
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/frontswap.h>
|
||||
#include <linux/fs_parser.h>
|
||||
#include <linux/mm_inline.h>
|
||||
|
||||
#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
|
||||
|
||||
@@ -4290,3 +4291,41 @@ void shmem_mark_page_lazyfree(struct page *page)
|
||||
mark_page_lazyfree_movetail(page);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree);
|
||||
|
||||
int reclaim_shmem_address_space(struct address_space *mapping)
|
||||
{
|
||||
pgoff_t start = 0;
|
||||
struct page *page;
|
||||
LIST_HEAD(page_list);
|
||||
int reclaimed;
|
||||
XA_STATE(xas, &mapping->i_pages, start);
|
||||
|
||||
if (!shmem_mapping(mapping))
|
||||
return -EINVAL;
|
||||
|
||||
lru_add_drain();
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, page, ULONG_MAX) {
|
||||
if (xas_retry(&xas, page))
|
||||
continue;
|
||||
if (xa_is_value(page))
|
||||
continue;
|
||||
if (isolate_lru_page(page))
|
||||
continue;
|
||||
|
||||
list_add(&page->lru, &page_list);
|
||||
inc_node_page_state(page, NR_ISOLATED_ANON +
|
||||
page_is_file_lru(page));
|
||||
|
||||
if (need_resched()) {
|
||||
xas_pause(&xas);
|
||||
cond_resched_rcu();
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
reclaimed = reclaim_pages_from_list(&page_list);
|
||||
|
||||
return reclaimed;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reclaim_shmem_address_space);
|
||||
|
30
mm/vmscan.c
30
mm/vmscan.c
@@ -1562,6 +1562,36 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
|
||||
return nr_reclaimed;
|
||||
}
|
||||
|
||||
int reclaim_pages_from_list(struct list_head *page_list)
|
||||
{
|
||||
struct scan_control sc = {
|
||||
.gfp_mask = GFP_KERNEL,
|
||||
.priority = DEF_PRIORITY,
|
||||
.may_writepage = 1,
|
||||
.may_unmap = 1,
|
||||
.may_swap = 1,
|
||||
};
|
||||
unsigned long nr_reclaimed;
|
||||
struct reclaim_stat dummy_stat;
|
||||
struct page *page;
|
||||
|
||||
list_for_each_entry(page, page_list, lru)
|
||||
ClearPageActive(page);
|
||||
|
||||
nr_reclaimed = shrink_page_list(page_list, NULL, &sc,
|
||||
&dummy_stat, false);
|
||||
while (!list_empty(page_list)) {
|
||||
|
||||
page = lru_to_page(page_list);
|
||||
list_del(&page->lru);
|
||||
dec_node_page_state(page, NR_ISOLATED_ANON +
|
||||
page_is_file_lru(page));
|
||||
putback_lru_page(page);
|
||||
}
|
||||
|
||||
return nr_reclaimed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempt to remove the specified page from its LRU. Only take this page
|
||||
* if it is of the appropriate PageActive status. Pages which are being
|
||||
|
Reference in New Issue
Block a user