ANDROID: implement wrapper for reverse migration

Reverse migration is used to do the balancing the occupancy of memory
zones in a node in the system whose imabalance may be caused by
migration of pages to other zones by an operation, eg: hotremove and
then hotadding the same memory. In this case there is a lot of free
memory in newly hotadd memory which can be filled up by the previous
migrated pages(as part of offline/hotremove) thus may free up some
pressure in other zones of the node.

Upstream discussion: https://lore.kernel.org/patchwork/cover/1382106/

Change-Id: Ib3137dab0db66ecf6858c4077dcadb9dfd0c6b1c
Bug: 175403896
Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
This commit is contained in:
Charan Teja Reddy
2021-02-16 13:59:45 +05:30
committed by Suren Baghdasaryan
parent ea527a52d1
commit 8cd9aa93b7
5 changed files with 52 additions and 0 deletions

View File

@@ -182,6 +182,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
extern int kcompactd_run(int nid); extern int kcompactd_run(int nid);
extern void kcompactd_stop(int nid); extern void kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
extern unsigned long isolate_and_split_free_page(struct page *page,
struct list_head *list);
#else #else
static inline void reset_isolation_suitable(pg_data_t *pgdat) static inline void reset_isolation_suitable(pg_data_t *pgdat)
@@ -236,6 +238,12 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat,
{ {
} }
static unsigned long isolate_and_split_free_page(struct page *page,
struct list_head *list)
{
return 0;
}
#endif /* CONFIG_COMPACTION */ #endif /* CONFIG_COMPACTION */
struct node; struct node;

View File

@@ -1014,6 +1014,7 @@ extern struct pglist_data contig_page_data;
extern struct pglist_data *first_online_pgdat(void); extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone); extern struct zone *next_zone(struct zone *zone);
extern int isolate_anon_lru_page(struct page *page);
/** /**
* for_each_online_pgdat - helper macro to iterate over all online nodes * for_each_online_pgdat - helper macro to iterate over all online nodes

View File

@@ -763,6 +763,29 @@ isolate_freepages_range(struct compact_control *cc,
return pfn; return pfn;
} }
unsigned long isolate_and_split_free_page(struct page *page,
struct list_head *list)
{
unsigned long isolated;
unsigned int order;
if (!PageBuddy(page))
return 0;
order = buddy_order(page);
isolated = __isolate_free_page(page, order);
if (!isolated)
return 0;
set_page_private(page, order);
list_add(&page->lru, list);
split_map_pages(list);
return isolated;
}
EXPORT_SYMBOL_GPL(isolate_and_split_free_page);
/* Similar to reclaim, but different enough that they don't share logic */ /* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(pg_data_t *pgdat) static bool too_many_isolated(pg_data_t *pgdat)
{ {

View File

@@ -172,6 +172,7 @@ void putback_movable_pages(struct list_head *l)
} }
} }
} }
EXPORT_SYMBOL_GPL(putback_movable_pages);
/* /*
* Restore a potential migration pte to a working pte entry * Restore a potential migration pte to a working pte entry
@@ -1509,6 +1510,7 @@ out:
return rc; return rc;
} }
EXPORT_SYMBOL_GPL(migrate_pages);
struct page *alloc_migration_target(struct page *page, unsigned long private) struct page *alloc_migration_target(struct page *page, unsigned long private)
{ {

View File

@@ -537,6 +537,24 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
{ {
return __get_pfnblock_flags_mask(page, pfn, mask); return __get_pfnblock_flags_mask(page, pfn, mask);
} }
EXPORT_SYMBOL_GPL(get_pfnblock_flags_mask);
int isolate_anon_lru_page(struct page *page)
{
int ret;
if (!PageLRU(page) || !PageAnon(page))
return -EINVAL;
if (!get_page_unless_zero(page))
return -EINVAL;
ret = isolate_lru_page(page);
put_page(page);
return ret;
}
EXPORT_SYMBOL_GPL(isolate_anon_lru_page);
static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
{ {