ANDROID: implement wrapper for reverse migration
Reverse migration is used to do the balancing the occupancy of memory zones in a node in the system whose imabalance may be caused by migration of pages to other zones by an operation, eg: hotremove and then hotadding the same memory. In this case there is a lot of free memory in newly hotadd memory which can be filled up by the previous migrated pages(as part of offline/hotremove) thus may free up some pressure in other zones of the node. Upstream discussion: https://lore.kernel.org/patchwork/cover/1382106/ Change-Id: Ib3137dab0db66ecf6858c4077dcadb9dfd0c6b1c Bug: 175403896 Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
This commit is contained in:

committed by
Suren Baghdasaryan

parent
ea527a52d1
commit
8cd9aa93b7
@@ -182,6 +182,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
|
||||
extern int kcompactd_run(int nid);
|
||||
extern void kcompactd_stop(int nid);
|
||||
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
|
||||
extern unsigned long isolate_and_split_free_page(struct page *page,
|
||||
struct list_head *list);
|
||||
|
||||
#else
|
||||
static inline void reset_isolation_suitable(pg_data_t *pgdat)
|
||||
@@ -236,6 +238,12 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat,
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned long isolate_and_split_free_page(struct page *page,
|
||||
struct list_head *list)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_COMPACTION */
|
||||
|
||||
struct node;
|
||||
|
Reference in New Issue
Block a user