Revert "BACKPORT: FROMLIST: mm: disable LRU pagevec during the migration temporarily"
This reverts commit 3039d8580c
.
Bug: 180018981
Signed-off-by: Minchan Kim <minchan@google.com>
Change-Id: Iadadd7f4c7aafa439afd7a1c3e575e5e31602bef
This commit is contained in:

committed by
Suren Baghdasaryan

parent
9e2790466f
commit
7ed55d7025
79
mm/swap.c
79
mm/swap.c
@@ -257,18 +257,6 @@ static void pagevec_move_tail(struct pagevec *pvec)
|
||||
__count_vm_events(PGROTATED, pgmoved);
|
||||
}
|
||||
|
||||
/* return true if pagevec needs to drain */
|
||||
static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if (!pagevec_add(pvec, page) || PageCompound(page) ||
|
||||
lru_cache_disabled())
|
||||
ret = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Writeback is about to end against a page which has been marked for immediate
|
||||
* reclaim. If it still appears to be reclaimable, move it to the tail of the
|
||||
@@ -284,7 +272,7 @@ void rotate_reclaimable_page(struct page *page)
|
||||
get_page(page);
|
||||
local_lock_irqsave(&lru_rotate.lock, flags);
|
||||
pvec = this_cpu_ptr(&lru_rotate.pvec);
|
||||
if (pagevec_add_and_need_flush(pvec, page))
|
||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||
pagevec_move_tail(pvec);
|
||||
local_unlock_irqrestore(&lru_rotate.lock, flags);
|
||||
}
|
||||
@@ -369,7 +357,7 @@ static void activate_page(struct page *page)
|
||||
local_lock(&lru_pvecs.lock);
|
||||
pvec = this_cpu_ptr(&lru_pvecs.activate_page);
|
||||
get_page(page);
|
||||
if (pagevec_add_and_need_flush(pvec, page))
|
||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||
pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
||||
local_unlock(&lru_pvecs.lock);
|
||||
}
|
||||
@@ -481,7 +469,7 @@ void lru_cache_add(struct page *page)
|
||||
get_page(page);
|
||||
local_lock(&lru_pvecs.lock);
|
||||
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
|
||||
if (pagevec_add_and_need_flush(pvec, page))
|
||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||
__pagevec_lru_add(pvec);
|
||||
local_unlock(&lru_pvecs.lock);
|
||||
}
|
||||
@@ -690,7 +678,7 @@ void deactivate_file_page(struct page *page)
|
||||
local_lock(&lru_pvecs.lock);
|
||||
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
|
||||
|
||||
if (pagevec_add_and_need_flush(pvec, page))
|
||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
|
||||
local_unlock(&lru_pvecs.lock);
|
||||
}
|
||||
@@ -712,7 +700,7 @@ void deactivate_page(struct page *page)
|
||||
local_lock(&lru_pvecs.lock);
|
||||
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
|
||||
get_page(page);
|
||||
if (pagevec_add_and_need_flush(pvec, page))
|
||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
||||
local_unlock(&lru_pvecs.lock);
|
||||
}
|
||||
@@ -734,7 +722,7 @@ void mark_page_lazyfree(struct page *page)
|
||||
local_lock(&lru_pvecs.lock);
|
||||
pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
|
||||
get_page(page);
|
||||
if (pagevec_add_and_need_flush(pvec, page))
|
||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
|
||||
local_unlock(&lru_pvecs.lock);
|
||||
}
|
||||
@@ -765,13 +753,18 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
||||
}
|
||||
|
||||
/*
|
||||
* lru_add_drain_all() usually needs to be called before we start compiling
|
||||
* a list of pages to be migrated using isolate_lru_page(). Note that pages
|
||||
* may be moved off the LRU after we have drained them. Those pages will
|
||||
* fail to migrate like other pages that may be busy.
|
||||
*
|
||||
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
|
||||
* kworkers being shut down before our page_alloc_cpu_dead callback is
|
||||
* executed on the offlined cpu.
|
||||
* Calling this function with cpu hotplug locks held can actually lead
|
||||
* to obscure indirect dependencies via WQ context.
|
||||
*/
|
||||
static void __lru_add_drain_all(bool force_all_cpus)
|
||||
void lru_add_drain_all(void)
|
||||
{
|
||||
/*
|
||||
* lru_drain_gen - Global pages generation number
|
||||
@@ -816,7 +809,7 @@ static void __lru_add_drain_all(bool force_all_cpus)
|
||||
* (C) Exit the draining operation if a newer generation, from another
|
||||
* lru_add_drain_all(), was already scheduled for draining. Check (A).
|
||||
*/
|
||||
if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
|
||||
if (unlikely(this_gen != lru_drain_gen))
|
||||
goto done;
|
||||
|
||||
/*
|
||||
@@ -846,8 +839,7 @@ static void __lru_add_drain_all(bool force_all_cpus)
|
||||
for_each_online_cpu(cpu) {
|
||||
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
|
||||
|
||||
if (force_all_cpus ||
|
||||
pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
|
||||
if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
|
||||
data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
|
||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
|
||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
|
||||
@@ -865,11 +857,6 @@ static void __lru_add_drain_all(bool force_all_cpus)
|
||||
done:
|
||||
mutex_unlock(&lock);
|
||||
}
|
||||
|
||||
void lru_add_drain_all(void)
|
||||
{
|
||||
__lru_add_drain_all(false);
|
||||
}
|
||||
#else
|
||||
void lru_add_drain_all(void)
|
||||
{
|
||||
@@ -877,44 +864,6 @@ void lru_add_drain_all(void)
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static atomic_t lru_disable_count = ATOMIC_INIT(0);
|
||||
|
||||
bool lru_cache_disabled(void)
|
||||
{
|
||||
return atomic_read(&lru_disable_count);
|
||||
}
|
||||
|
||||
void lru_cache_enable(void)
|
||||
{
|
||||
atomic_dec(&lru_disable_count);
|
||||
}
|
||||
|
||||
/*
|
||||
* lru_cache_disable() needs to be called before we start compiling
|
||||
* a list of pages to be migrated using isolate_lru_page().
|
||||
* It drains pages on LRU cache and then disable on all cpus until
|
||||
* lru_cache_enable is called.
|
||||
*
|
||||
* Must be paired with a call to lru_cache_enable().
|
||||
*/
|
||||
void lru_cache_disable(void)
|
||||
{
|
||||
atomic_inc(&lru_disable_count);
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* lru_add_drain_all in the force mode will schedule draining on
|
||||
* all online CPUs so any calls of lru_cache_disabled wrapped by
|
||||
* local_lock or preemption disabled would be ordered by that.
|
||||
* The atomic operation doesn't need to have stronger ordering
|
||||
* requirements because that is enforeced by the scheduling
|
||||
* guarantees.
|
||||
*/
|
||||
__lru_add_drain_all(true);
|
||||
#else
|
||||
lru_add_drain();
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* release_pages - batched put_page()
|
||||
* @pages: array of pages to release
|
||||
|
Reference in New Issue
Block a user