ANDROID: implement wrapper for reverse migration
Reverse migration is used to do the balancing the occupancy of memory zones in a node in the system whose imabalance may be caused by migration of pages to other zones by an operation, eg: hotremove and then hotadding the same memory. In this case there is a lot of free memory in newly hotadd memory which can be filled up by the previous migrated pages(as part of offline/hotremove) thus may free up some pressure in other zones of the node. Upstream discussion: https://lore.kernel.org/patchwork/cover/1382106/ Change-Id: Ib3137dab0db66ecf6858c4077dcadb9dfd0c6b1c Bug: 175403896 Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
This commit is contained in:

committed by
Suren Baghdasaryan

parent
ea527a52d1
commit
8cd9aa93b7
@@ -182,6 +182,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
|
||||
extern int kcompactd_run(int nid);
|
||||
extern void kcompactd_stop(int nid);
|
||||
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
|
||||
extern unsigned long isolate_and_split_free_page(struct page *page,
|
||||
struct list_head *list);
|
||||
|
||||
#else
|
||||
static inline void reset_isolation_suitable(pg_data_t *pgdat)
|
||||
@@ -236,6 +238,12 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat,
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned long isolate_and_split_free_page(struct page *page,
|
||||
struct list_head *list)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_COMPACTION */
|
||||
|
||||
struct node;
|
||||
|
@@ -1014,6 +1014,7 @@ extern struct pglist_data contig_page_data;
|
||||
extern struct pglist_data *first_online_pgdat(void);
|
||||
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
|
||||
extern struct zone *next_zone(struct zone *zone);
|
||||
extern int isolate_anon_lru_page(struct page *page);
|
||||
|
||||
/**
|
||||
* for_each_online_pgdat - helper macro to iterate over all online nodes
|
||||
|
@@ -763,6 +763,29 @@ isolate_freepages_range(struct compact_control *cc,
|
||||
return pfn;
|
||||
}
|
||||
|
||||
unsigned long isolate_and_split_free_page(struct page *page,
|
||||
struct list_head *list)
|
||||
{
|
||||
unsigned long isolated;
|
||||
unsigned int order;
|
||||
|
||||
if (!PageBuddy(page))
|
||||
return 0;
|
||||
|
||||
order = buddy_order(page);
|
||||
isolated = __isolate_free_page(page, order);
|
||||
if (!isolated)
|
||||
return 0;
|
||||
|
||||
set_page_private(page, order);
|
||||
list_add(&page->lru, list);
|
||||
|
||||
split_map_pages(list);
|
||||
|
||||
return isolated;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(isolate_and_split_free_page);
|
||||
|
||||
/* Similar to reclaim, but different enough that they don't share logic */
|
||||
static bool too_many_isolated(pg_data_t *pgdat)
|
||||
{
|
||||
|
@@ -172,6 +172,7 @@ void putback_movable_pages(struct list_head *l)
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(putback_movable_pages);
|
||||
|
||||
/*
|
||||
* Restore a potential migration pte to a working pte entry
|
||||
@@ -1509,6 +1510,7 @@ out:
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(migrate_pages);
|
||||
|
||||
struct page *alloc_migration_target(struct page *page, unsigned long private)
|
||||
{
|
||||
|
@@ -537,6 +537,24 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
|
||||
{
|
||||
return __get_pfnblock_flags_mask(page, pfn, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_pfnblock_flags_mask);
|
||||
|
||||
int isolate_anon_lru_page(struct page *page)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!PageLRU(page) || !PageAnon(page))
|
||||
return -EINVAL;
|
||||
|
||||
if (!get_page_unless_zero(page))
|
||||
return -EINVAL;
|
||||
|
||||
ret = isolate_lru_page(page);
|
||||
put_page(page);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(isolate_anon_lru_page);
|
||||
|
||||
static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
|
||||
{
|
||||
|
Reference in New Issue
Block a user