Revert "FROMLIST: mm: replace migrate_prep with lru_add_drain_all"

This reverts commit 134ac2d4dc.

Bug: 180018981
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: I579c72398447f770c235e4fefd246f1dbbd250d0
This commit is contained in:
Minchan Kim
2021-03-19 12:34:42 -07:00
committed by Suren Baghdasaryan
parent 7ed55d7025
commit d3c1bf42af
6 changed files with 32 additions and 11 deletions

View File

@@ -45,6 +45,8 @@ extern struct page *alloc_migration_target(struct page *page, unsigned long priv
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
extern void putback_movable_page(struct page *page);
extern void migrate_prep(void);
extern void migrate_prep_local(void);
extern void migrate_page_states(struct page *newpage, struct page *page);
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
@@ -64,6 +66,9 @@ static inline struct page *alloc_migration_target(struct page *page,
static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return -EBUSY; }
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
static inline void migrate_page_states(struct page *newpage, struct page *page)
{
}

View File

@@ -2275,8 +2275,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
cc->free_pfn, end_pfn, sync);
/* lru_add_drain_all could be expensive with involving other CPUs */
lru_add_drain();
migrate_prep_local();
while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
int err;

View File

@@ -1127,7 +1127,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
int err = 0;
nodemask_t tmp;
lru_add_drain_all();
migrate_prep();
mmap_read_lock(mm);
@@ -1326,7 +1326,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
lru_add_drain_all();
migrate_prep();
}
{
NODEMASK_SCRATCH(scratch);

View File

@@ -57,6 +57,28 @@
#include "internal.h"
/*
* migrate_prep() needs to be called before we start compiling a list of pages
* to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
* undesirable, use migrate_prep_local()
*/
void migrate_prep(void)
{
/*
* Clear the LRU lists so pages can be isolated.
* Note that pages may be moved off the LRU after we have
* drained them. Those pages will fail to migrate like other
* pages that may be busy.
*/
lru_add_drain_all();
}
/* Do the necessary work of migrate_prep but not if it involves other CPUs */
void migrate_prep_local(void)
{
lru_add_drain();
}
int isolate_movable_page(struct page *page, isolate_mode_t mode)
{
struct address_space *mapping;
@@ -1697,7 +1719,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
int start, i;
int err = 0, err1;
lru_add_drain_all();
migrate_prep();
for (i = start = 0; i < nr_pages; i++) {
const void __user *p;

View File

@@ -8520,7 +8520,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
if (cc->alloc_contig && cc->mode == MIGRATE_ASYNC)
max_tries = 1;
lru_add_drain_all();
migrate_prep();
while (pfn < end || !list_empty(&cc->migratepages)) {
if (fatal_signal_pending(current)) {

View File

@@ -753,11 +753,6 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
}
/*
* lru_add_drain_all() usually needs to be called before we start compiling
* a list of pages to be migrated using isolate_lru_page(). Note that pages
* may be moved off the LRU after we have drained them. Those pages will
* fail to migrate like other pages that may be busy.
*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
* kworkers being shut down before our page_alloc_cpu_dead callback is
* executed on the offlined cpu.