BACKPORT: FROMLIST: mm: disable LRU pagevec during the migration temporarily
LRU pagevec holds refcount of pages until the pagevec are drained. It could prevent migration since the refcount of the page is greater than the expection in migration logic. To mitigate the issue, callers of migrate_pages drains LRU pagevec via migrate_prep or lru_add_drain_all before migrate_pages call. However, it's not enough because pages coming into pagevec after the draining call still could stay at the pagevec so it could keep preventing page migration. Since some callers of migrate_pages have retrial logic with LRU draining, the page would migrate at next trail but it is still fragile in that it doesn't close the fundamental race between upcoming LRU pages into pagvec and migration so the migration failure could cause contiguous memory allocation failure in the end. To close the race, this patch disables lru caches(i.e, pagevec) during ongoing migration until migrate is done. Since it's really hard to reproduce, I measured how many times migrate_pages retried with force mode(it is about a fallback to a sync migration) with below debug code. int migrate_pages(struct list_head *from, new_page_t get_new_page, .. .. if (rc && reason == MR_CONTIG_RANGE && pass > 2) { printk(KERN_ERR, "pfn 0x%lx reason %d\n", page_to_pfn(page), rc); dump_page(page, "fail to migrate"); } The test was repeating android apps launching with cma allocation in background every five seconds. Total cma allocation count was about 500 during the testing. With this patch, the dump_page count was reduced from 400 to 30. The new interface is also useful for memory hotplug which currently drains lru pcp caches after each migration failure. This is rather suboptimal as it has to disrupt others running during the operation. With the new interface the operation happens only once. This is also in line with pcp allocator cache which are disabled for the offlining as well. Bug: 180018981 Link: https://lore.kernel.org/linux-mm/20210310161429.399432-2-minchan@kernel.org/ [minchan: Resolved conflict in mm/memory_hotplug.c, mm/swap.c] Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Minchan Kim <minchan@google.com> Change-Id: Ie1e09cf26e3105b674a9aed4ac65070efee608af
This commit is contained in:
@@ -341,6 +341,9 @@ extern void lru_cache_add(struct page *);
|
|||||||
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
|
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
|
||||||
struct lruvec *lruvec, struct list_head *head);
|
struct lruvec *lruvec, struct list_head *head);
|
||||||
extern void mark_page_accessed(struct page *);
|
extern void mark_page_accessed(struct page *);
|
||||||
|
extern void lru_cache_disable(void);
|
||||||
|
extern void lru_cache_enable(void);
|
||||||
|
extern bool lru_cache_disabled(void);
|
||||||
extern void lru_add_drain(void);
|
extern void lru_add_drain(void);
|
||||||
extern void lru_add_drain_cpu(int cpu);
|
extern void lru_add_drain_cpu(int cpu);
|
||||||
extern void lru_add_drain_cpu_zone(struct zone *zone);
|
extern void lru_add_drain_cpu_zone(struct zone *zone);
|
||||||
|
@@ -1504,6 +1504,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
|||||||
!IS_ALIGNED(start_pfn | nr_pages, PAGES_PER_SECTION)))
|
!IS_ALIGNED(start_pfn | nr_pages, PAGES_PER_SECTION)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
lru_cache_disable();
|
||||||
mem_hotplug_begin();
|
mem_hotplug_begin();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1562,7 +1563,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
|||||||
}
|
}
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
lru_add_drain_all();
|
|
||||||
|
|
||||||
ret = scan_movable_pages(pfn, end_pfn, &pfn);
|
ret = scan_movable_pages(pfn, end_pfn, &pfn);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
@@ -1647,6 +1647,8 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
|||||||
memory_notify(MEM_OFFLINE, &arg);
|
memory_notify(MEM_OFFLINE, &arg);
|
||||||
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
|
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
|
||||||
mem_hotplug_done();
|
mem_hotplug_done();
|
||||||
|
lru_cache_enable();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
failed_removal_isolated:
|
failed_removal_isolated:
|
||||||
@@ -1659,6 +1661,7 @@ failed_removal:
|
|||||||
reason);
|
reason);
|
||||||
/* pushback to free area */
|
/* pushback to free area */
|
||||||
mem_hotplug_done();
|
mem_hotplug_done();
|
||||||
|
lru_cache_enable();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1326,7 +1326,7 @@ static long do_mbind(unsigned long start, unsigned long len,
|
|||||||
|
|
||||||
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
|
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
|
||||||
|
|
||||||
lru_add_drain_all();
|
lru_cache_disable();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
NODEMASK_SCRATCH(scratch);
|
NODEMASK_SCRATCH(scratch);
|
||||||
@@ -1374,6 +1374,8 @@ up_out:
|
|||||||
mmap_write_unlock(mm);
|
mmap_write_unlock(mm);
|
||||||
mpol_out:
|
mpol_out:
|
||||||
mpol_put(new);
|
mpol_put(new);
|
||||||
|
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
|
||||||
|
lru_cache_enable();
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1697,7 +1697,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
|
|||||||
int start, i;
|
int start, i;
|
||||||
int err = 0, err1;
|
int err = 0, err1;
|
||||||
|
|
||||||
lru_add_drain_all();
|
lru_cache_disable();
|
||||||
|
|
||||||
for (i = start = 0; i < nr_pages; i++) {
|
for (i = start = 0; i < nr_pages; i++) {
|
||||||
const void __user *p;
|
const void __user *p;
|
||||||
@@ -1766,6 +1766,7 @@ out_flush:
|
|||||||
if (err >= 0)
|
if (err >= 0)
|
||||||
err = err1;
|
err = err1;
|
||||||
out:
|
out:
|
||||||
|
lru_cache_enable();
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
79
mm/swap.c
79
mm/swap.c
@@ -257,6 +257,18 @@ static void pagevec_move_tail(struct pagevec *pvec)
|
|||||||
__count_vm_events(PGROTATED, pgmoved);
|
__count_vm_events(PGROTATED, pgmoved);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* return true if pagevec needs to drain */
|
||||||
|
static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
|
||||||
|
{
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
if (!pagevec_add(pvec, page) || PageCompound(page) ||
|
||||||
|
lru_cache_disabled())
|
||||||
|
ret = true;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Writeback is about to end against a page which has been marked for immediate
|
* Writeback is about to end against a page which has been marked for immediate
|
||||||
* reclaim. If it still appears to be reclaimable, move it to the tail of the
|
* reclaim. If it still appears to be reclaimable, move it to the tail of the
|
||||||
@@ -272,7 +284,7 @@ void rotate_reclaimable_page(struct page *page)
|
|||||||
get_page(page);
|
get_page(page);
|
||||||
local_lock_irqsave(&lru_rotate.lock, flags);
|
local_lock_irqsave(&lru_rotate.lock, flags);
|
||||||
pvec = this_cpu_ptr(&lru_rotate.pvec);
|
pvec = this_cpu_ptr(&lru_rotate.pvec);
|
||||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
if (pagevec_add_and_need_flush(pvec, page))
|
||||||
pagevec_move_tail(pvec);
|
pagevec_move_tail(pvec);
|
||||||
local_unlock_irqrestore(&lru_rotate.lock, flags);
|
local_unlock_irqrestore(&lru_rotate.lock, flags);
|
||||||
}
|
}
|
||||||
@@ -357,7 +369,7 @@ static void activate_page(struct page *page)
|
|||||||
local_lock(&lru_pvecs.lock);
|
local_lock(&lru_pvecs.lock);
|
||||||
pvec = this_cpu_ptr(&lru_pvecs.activate_page);
|
pvec = this_cpu_ptr(&lru_pvecs.activate_page);
|
||||||
get_page(page);
|
get_page(page);
|
||||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
if (pagevec_add_and_need_flush(pvec, page))
|
||||||
pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
||||||
local_unlock(&lru_pvecs.lock);
|
local_unlock(&lru_pvecs.lock);
|
||||||
}
|
}
|
||||||
@@ -469,7 +481,7 @@ void lru_cache_add(struct page *page)
|
|||||||
get_page(page);
|
get_page(page);
|
||||||
local_lock(&lru_pvecs.lock);
|
local_lock(&lru_pvecs.lock);
|
||||||
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
|
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
|
||||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
if (pagevec_add_and_need_flush(pvec, page))
|
||||||
__pagevec_lru_add(pvec);
|
__pagevec_lru_add(pvec);
|
||||||
local_unlock(&lru_pvecs.lock);
|
local_unlock(&lru_pvecs.lock);
|
||||||
}
|
}
|
||||||
@@ -678,7 +690,7 @@ void deactivate_file_page(struct page *page)
|
|||||||
local_lock(&lru_pvecs.lock);
|
local_lock(&lru_pvecs.lock);
|
||||||
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
|
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
|
||||||
|
|
||||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
if (pagevec_add_and_need_flush(pvec, page))
|
||||||
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
|
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
|
||||||
local_unlock(&lru_pvecs.lock);
|
local_unlock(&lru_pvecs.lock);
|
||||||
}
|
}
|
||||||
@@ -700,7 +712,7 @@ void deactivate_page(struct page *page)
|
|||||||
local_lock(&lru_pvecs.lock);
|
local_lock(&lru_pvecs.lock);
|
||||||
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
|
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
|
||||||
get_page(page);
|
get_page(page);
|
||||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
if (pagevec_add_and_need_flush(pvec, page))
|
||||||
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
||||||
local_unlock(&lru_pvecs.lock);
|
local_unlock(&lru_pvecs.lock);
|
||||||
}
|
}
|
||||||
@@ -722,7 +734,7 @@ void mark_page_lazyfree(struct page *page)
|
|||||||
local_lock(&lru_pvecs.lock);
|
local_lock(&lru_pvecs.lock);
|
||||||
pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
|
pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
|
||||||
get_page(page);
|
get_page(page);
|
||||||
if (!pagevec_add(pvec, page) || PageCompound(page))
|
if (pagevec_add_and_need_flush(pvec, page))
|
||||||
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
|
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
|
||||||
local_unlock(&lru_pvecs.lock);
|
local_unlock(&lru_pvecs.lock);
|
||||||
}
|
}
|
||||||
@@ -753,18 +765,13 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* lru_add_drain_all() usually needs to be called before we start compiling
|
|
||||||
* a list of pages to be migrated using isolate_lru_page(). Note that pages
|
|
||||||
* may be moved off the LRU after we have drained them. Those pages will
|
|
||||||
* fail to migrate like other pages that may be busy.
|
|
||||||
*
|
|
||||||
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
|
* Doesn't need any cpu hotplug locking because we do rely on per-cpu
|
||||||
* kworkers being shut down before our page_alloc_cpu_dead callback is
|
* kworkers being shut down before our page_alloc_cpu_dead callback is
|
||||||
* executed on the offlined cpu.
|
* executed on the offlined cpu.
|
||||||
* Calling this function with cpu hotplug locks held can actually lead
|
* Calling this function with cpu hotplug locks held can actually lead
|
||||||
* to obscure indirect dependencies via WQ context.
|
* to obscure indirect dependencies via WQ context.
|
||||||
*/
|
*/
|
||||||
void lru_add_drain_all(void)
|
static void __lru_add_drain_all(bool force_all_cpus)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* lru_drain_gen - Global pages generation number
|
* lru_drain_gen - Global pages generation number
|
||||||
@@ -809,7 +816,7 @@ void lru_add_drain_all(void)
|
|||||||
* (C) Exit the draining operation if a newer generation, from another
|
* (C) Exit the draining operation if a newer generation, from another
|
||||||
* lru_add_drain_all(), was already scheduled for draining. Check (A).
|
* lru_add_drain_all(), was already scheduled for draining. Check (A).
|
||||||
*/
|
*/
|
||||||
if (unlikely(this_gen != lru_drain_gen))
|
if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -839,7 +846,8 @@ void lru_add_drain_all(void)
|
|||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
|
struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
|
||||||
|
|
||||||
if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
|
if (force_all_cpus ||
|
||||||
|
pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
|
||||||
data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
|
data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
|
||||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
|
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
|
||||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
|
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
|
||||||
@@ -857,6 +865,11 @@ void lru_add_drain_all(void)
|
|||||||
done:
|
done:
|
||||||
mutex_unlock(&lock);
|
mutex_unlock(&lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void lru_add_drain_all(void)
|
||||||
|
{
|
||||||
|
__lru_add_drain_all(false);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
void lru_add_drain_all(void)
|
void lru_add_drain_all(void)
|
||||||
{
|
{
|
||||||
@@ -864,6 +877,44 @@ void lru_add_drain_all(void)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
static atomic_t lru_disable_count = ATOMIC_INIT(0);
|
||||||
|
|
||||||
|
bool lru_cache_disabled(void)
|
||||||
|
{
|
||||||
|
return atomic_read(&lru_disable_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void lru_cache_enable(void)
|
||||||
|
{
|
||||||
|
atomic_dec(&lru_disable_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* lru_cache_disable() needs to be called before we start compiling
|
||||||
|
* a list of pages to be migrated using isolate_lru_page().
|
||||||
|
* It drains pages on LRU cache and then disable on all cpus until
|
||||||
|
* lru_cache_enable is called.
|
||||||
|
*
|
||||||
|
* Must be paired with a call to lru_cache_enable().
|
||||||
|
*/
|
||||||
|
void lru_cache_disable(void)
|
||||||
|
{
|
||||||
|
atomic_inc(&lru_disable_count);
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/*
|
||||||
|
* lru_add_drain_all in the force mode will schedule draining on
|
||||||
|
* all online CPUs so any calls of lru_cache_disabled wrapped by
|
||||||
|
* local_lock or preemption disabled would be ordered by that.
|
||||||
|
* The atomic operation doesn't need to have stronger ordering
|
||||||
|
* requirements because that is enforeced by the scheduling
|
||||||
|
* guarantees.
|
||||||
|
*/
|
||||||
|
__lru_add_drain_all(true);
|
||||||
|
#else
|
||||||
|
lru_add_drain();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* release_pages - batched put_page()
|
* release_pages - batched put_page()
|
||||||
* @pages: array of pages to release
|
* @pages: array of pages to release
|
||||||
|
Reference in New Issue
Block a user