Revert "BACKPORT: FROMLIST: mm: disable LRU pagevec during the migration temporarily"

This reverts commit 3039d8580c.

Bug: 180018981
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: Iadadd7f4c7aafa439afd7a1c3e575e5e31602bef
This commit is contained in:
Minchan Kim
2021-03-19 12:34:33 -07:00
committed by Suren Baghdasaryan
parent 9e2790466f
commit 7ed55d7025
5 changed files with 17 additions and 77 deletions

View File

@@ -341,9 +341,6 @@ extern void lru_cache_add(struct page *);
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *head);
extern void mark_page_accessed(struct page *);
extern void lru_cache_disable(void);
extern void lru_cache_enable(void);
extern bool lru_cache_disabled(void);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);

View File

@@ -1504,7 +1504,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
!IS_ALIGNED(start_pfn | nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
lru_cache_disable();
mem_hotplug_begin();
/*
@@ -1563,6 +1562,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
}
cond_resched();
lru_add_drain_all();
ret = scan_movable_pages(pfn, end_pfn, &pfn);
if (!ret) {
@@ -1647,8 +1647,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
memory_notify(MEM_OFFLINE, &arg);
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
mem_hotplug_done();
lru_cache_enable();
return 0;
failed_removal_isolated:
@@ -1661,7 +1659,6 @@ failed_removal:
reason);
/* pushback to free area */
mem_hotplug_done();
lru_cache_enable();
return ret;
}

View File

@@ -1326,7 +1326,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
lru_cache_disable();
lru_add_drain_all();
}
{
NODEMASK_SCRATCH(scratch);
@@ -1374,8 +1374,6 @@ up_out:
mmap_write_unlock(mm);
mpol_out:
mpol_put(new);
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
lru_cache_enable();
return err;
}

View File

@@ -1697,7 +1697,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
int start, i;
int err = 0, err1;
lru_cache_disable();
lru_add_drain_all();
for (i = start = 0; i < nr_pages; i++) {
const void __user *p;
@@ -1766,7 +1766,6 @@ out_flush:
if (err >= 0)
err = err1;
out:
lru_cache_enable();
return err;
}

View File

@@ -257,18 +257,6 @@ static void pagevec_move_tail(struct pagevec *pvec)
__count_vm_events(PGROTATED, pgmoved);
}
/* return true if pagevec needs to drain */
static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
{
bool ret = false;
if (!pagevec_add(pvec, page) || PageCompound(page) ||
lru_cache_disabled())
ret = true;
return ret;
}
/*
* Writeback is about to end against a page which has been marked for immediate
* reclaim. If it still appears to be reclaimable, move it to the tail of the
@@ -284,7 +272,7 @@ void rotate_reclaimable_page(struct page *page)
get_page(page);
local_lock_irqsave(&lru_rotate.lock, flags);
pvec = this_cpu_ptr(&lru_rotate.pvec);
if (pagevec_add_and_need_flush(pvec, page))
if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_move_tail(pvec);
local_unlock_irqrestore(&lru_rotate.lock, flags);
}
@@ -369,7 +357,7 @@ static void activate_page(struct page *page)
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.activate_page);
get_page(page);
if (pagevec_add_and_need_flush(pvec, page))
if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
local_unlock(&lru_pvecs.lock);
}
@@ -481,7 +469,7 @@ void lru_cache_add(struct page *page)
get_page(page);
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
if (pagevec_add_and_need_flush(pvec, page))
if (!pagevec_add(pvec, page) || PageCompound(page))
__pagevec_lru_add(pvec);
local_unlock(&lru_pvecs.lock);
}
@@ -690,7 +678,7 @@ void deactivate_file_page(struct page *page)
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
if (pagevec_add_and_need_flush(pvec, page))
if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
local_unlock(&lru_pvecs.lock);
}
@@ -712,7 +700,7 @@ void deactivate_page(struct page *page)
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
get_page(page);
if (pagevec_add_and_need_flush(pvec, page))
if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
local_unlock(&lru_pvecs.lock);
}
@@ -734,7 +722,7 @@ void mark_page_lazyfree(struct page *page)
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
get_page(page);
if (pagevec_add_and_need_flush(pvec, page))
if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
local_unlock(&lru_pvecs.lock);
}
@@ -765,13 +753,18 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
}
/*
* lru_add_drain_all() usually needs to be called before we start compiling
* a list of pages to be migrated using isolate_lru_page(). Note that pages
* may be moved off the LRU after we have drained them. Those pages will
* fail to migrate like other pages that may be busy.
*
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
* kworkers being shut down before our page_alloc_cpu_dead callback is
* executed on the offlined cpu.
* Calling this function with cpu hotplug locks held can actually lead
* to obscure indirect dependencies via WQ context.
*/
static void __lru_add_drain_all(bool force_all_cpus)
void lru_add_drain_all(void)
{
/*
* lru_drain_gen - Global pages generation number
@@ -816,7 +809,7 @@ static void __lru_add_drain_all(bool force_all_cpus)
* (C) Exit the draining operation if a newer generation, from another
* lru_add_drain_all(), was already scheduled for draining. Check (A).
*/
if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
if (unlikely(this_gen != lru_drain_gen))
goto done;
/*
@@ -846,8 +839,7 @@ static void __lru_add_drain_all(bool force_all_cpus)
for_each_online_cpu(cpu) {
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
if (force_all_cpus ||
pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
@@ -865,11 +857,6 @@ static void __lru_add_drain_all(bool force_all_cpus)
done:
mutex_unlock(&lock);
}
void lru_add_drain_all(void)
{
__lru_add_drain_all(false);
}
#else
void lru_add_drain_all(void)
{
@@ -877,44 +864,6 @@ void lru_add_drain_all(void)
}
#endif /* CONFIG_SMP */
static atomic_t lru_disable_count = ATOMIC_INIT(0);
bool lru_cache_disabled(void)
{
return atomic_read(&lru_disable_count);
}
void lru_cache_enable(void)
{
atomic_dec(&lru_disable_count);
}
/*
* lru_cache_disable() needs to be called before we start compiling
* a list of pages to be migrated using isolate_lru_page().
* It drains pages on LRU cache and then disable on all cpus until
* lru_cache_enable is called.
*
* Must be paired with a call to lru_cache_enable().
*/
void lru_cache_disable(void)
{
atomic_inc(&lru_disable_count);
#ifdef CONFIG_SMP
/*
* lru_add_drain_all in the force mode will schedule draining on
* all online CPUs so any calls of lru_cache_disabled wrapped by
* local_lock or preemption disabled would be ordered by that.
* The atomic operation doesn't need to have stronger ordering
* requirements because that is enforeced by the scheduling
* guarantees.
*/
__lru_add_drain_all(true);
#else
lru_add_drain();
#endif
}
/**
* release_pages - batched put_page()
* @pages: array of pages to release