FROMLIST: mm: fs: Invalidate BH LRU during page migration
Pages containing buffer_heads that are in one of the per-CPU buffer_head LRU caches will be pinned and thus cannot be migrated. This can prevent CMA allocations from succeeding, which are often used on platforms with co-processors (such as a DSP) that can only use physically contiguous memory. It can also prevent memory hot-unplugging from succeeding, which involves migrating at least MIN_MEMORY_BLOCK_SIZE bytes of memory, which ranges from 8 MiB to 1 GiB based on the architecture in use. Correspondingly, invalidate the BH LRU caches before a migration starts and stop any buffer_head from being cached in the LRU caches, until migration has finished. Bug: 180018981 Link: https://lore.kernel.org/linux-mm/20210310161429.399432-3-minchan@kernel.org/ Signed-off-by: Chris Goldsworthy <cgoldswo@codeaurora.org> Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Minchan Kim <minchan@google.com> Change-Id: I7ac085c2ec14a81c3c4d7b65a7eeedb0cfba4ea6
This commit is contained in:
12
fs/buffer.c
12
fs/buffer.c
@@ -1265,6 +1265,14 @@ static void bh_lru_install(struct buffer_head *bh)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
check_irqs_on();
|
check_irqs_on();
|
||||||
|
/*
|
||||||
|
* buffer_head in bh_lru could increase refcount of the page
|
||||||
|
* until it will be invalidated. It causes page migraion failure.
|
||||||
|
* Skip putting upcoming bh into bh_lru until migration is done.
|
||||||
|
*/
|
||||||
|
if (lru_cache_disabled())
|
||||||
|
return;
|
||||||
|
|
||||||
bh_lru_lock();
|
bh_lru_lock();
|
||||||
|
|
||||||
b = this_cpu_ptr(&bh_lrus);
|
b = this_cpu_ptr(&bh_lrus);
|
||||||
@@ -1410,7 +1418,7 @@ EXPORT_SYMBOL(__bread_gfp);
|
|||||||
* This doesn't race because it runs in each cpu either in irq
|
* This doesn't race because it runs in each cpu either in irq
|
||||||
* or with preempt disabled.
|
* or with preempt disabled.
|
||||||
*/
|
*/
|
||||||
static void invalidate_bh_lru(void *arg)
|
void invalidate_bh_lru(void *arg)
|
||||||
{
|
{
|
||||||
struct bh_lru *b = &get_cpu_var(bh_lrus);
|
struct bh_lru *b = &get_cpu_var(bh_lrus);
|
||||||
int i;
|
int i;
|
||||||
@@ -1422,7 +1430,7 @@ static void invalidate_bh_lru(void *arg)
|
|||||||
put_cpu_var(bh_lrus);
|
put_cpu_var(bh_lrus);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool has_bh_in_lru(int cpu, void *dummy)
|
bool has_bh_in_lru(int cpu, void *dummy)
|
||||||
{
|
{
|
||||||
struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
|
struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
|
||||||
int i;
|
int i;
|
||||||
|
@@ -194,6 +194,8 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
|
|||||||
struct buffer_head *__bread_gfp(struct block_device *,
|
struct buffer_head *__bread_gfp(struct block_device *,
|
||||||
sector_t block, unsigned size, gfp_t gfp);
|
sector_t block, unsigned size, gfp_t gfp);
|
||||||
void invalidate_bh_lrus(void);
|
void invalidate_bh_lrus(void);
|
||||||
|
void invalidate_bh_lru(void *arg);
|
||||||
|
bool has_bh_in_lru(int cpu, void *dummy);
|
||||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
|
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
|
||||||
void free_buffer_head(struct buffer_head * bh);
|
void free_buffer_head(struct buffer_head * bh);
|
||||||
void unlock_buffer(struct buffer_head *bh);
|
void unlock_buffer(struct buffer_head *bh);
|
||||||
@@ -406,6 +408,8 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
|
|||||||
static inline void invalidate_inode_buffers(struct inode *inode) {}
|
static inline void invalidate_inode_buffers(struct inode *inode) {}
|
||||||
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
|
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
|
||||||
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
|
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
|
||||||
|
static inline void invalidate_bh_lru(void *arg) {}
|
||||||
|
static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; }
|
||||||
#define buffer_heads_over_limit 0
|
#define buffer_heads_over_limit 0
|
||||||
|
|
||||||
#endif /* CONFIG_BLOCK */
|
#endif /* CONFIG_BLOCK */
|
||||||
|
@@ -36,6 +36,7 @@
|
|||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/page_idle.h>
|
#include <linux/page_idle.h>
|
||||||
#include <linux/local_lock.h>
|
#include <linux/local_lock.h>
|
||||||
|
#include <linux/buffer_head.h>
|
||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
@@ -665,6 +666,7 @@ void lru_add_drain_cpu(int cpu)
|
|||||||
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
|
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
|
||||||
|
|
||||||
activate_page_drain(cpu);
|
activate_page_drain(cpu);
|
||||||
|
invalidate_bh_lru(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -852,7 +854,8 @@ static void __lru_add_drain_all(bool force_all_cpus)
|
|||||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
|
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
|
||||||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
|
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
|
||||||
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
|
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
|
||||||
need_activate_page_drain(cpu)) {
|
need_activate_page_drain(cpu) ||
|
||||||
|
has_bh_in_lru(cpu, NULL)) {
|
||||||
INIT_WORK(work, lru_add_drain_per_cpu);
|
INIT_WORK(work, lru_add_drain_per_cpu);
|
||||||
queue_work_on(cpu, mm_percpu_wq, work);
|
queue_work_on(cpu, mm_percpu_wq, work);
|
||||||
__cpumask_set_cpu(cpu, &has_work);
|
__cpumask_set_cpu(cpu, &has_work);
|
||||||
|
Reference in New Issue
Block a user