mm: page migration avoid touching newpage until no going back
We have had trouble in the past from the way in which page migration's
newpage is initialized in dribs and drabs - see commit 8bdd638091
("mm:
fix direct reclaim writeback regression") which proposed a cleanup.
We have no actual problem now, but I think the procedure would be clearer
(and alternative get_new_page pools safer to implement) if we assert that
newpage is not touched until we are sure that it's going to be used -
except for taking the trylock on it in __unmap_and_move().
So shift the early initializations from move_to_new_page() into
migrate_page_move_mapping(), mapping and NULL-mapping paths. Similarly
migrate_huge_page_move_mapping(), but its NULL-mapping path can just be
deleted: you cannot reach hugetlbfs_migrate_page() with a NULL mapping.
Adjust stages 3 to 8 in the Documentation file accordingly.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
470f119f01
commit
cf4b769abb
49
mm/migrate.c
49
mm/migrate.c
@@ -320,6 +320,14 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||
/* Anonymous page without mapping */
|
||||
if (page_count(page) != expected_count)
|
||||
return -EAGAIN;
|
||||
|
||||
/* No turning back from here */
|
||||
set_page_memcg(newpage, page_memcg(page));
|
||||
newpage->index = page->index;
|
||||
newpage->mapping = page->mapping;
|
||||
if (PageSwapBacked(page))
|
||||
SetPageSwapBacked(newpage);
|
||||
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -355,8 +363,15 @@ int migrate_page_move_mapping(struct address_space *mapping,
|
||||
}
|
||||
|
||||
/*
|
||||
* Now we know that no one else is looking at the page.
|
||||
* Now we know that no one else is looking at the page:
|
||||
* no turning back from here.
|
||||
*/
|
||||
set_page_memcg(newpage, page_memcg(page));
|
||||
newpage->index = page->index;
|
||||
newpage->mapping = page->mapping;
|
||||
if (PageSwapBacked(page))
|
||||
SetPageSwapBacked(newpage);
|
||||
|
||||
get_page(newpage); /* add cache reference */
|
||||
if (PageSwapCache(page)) {
|
||||
SetPageSwapCache(newpage);
|
||||
@@ -403,12 +418,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||
int expected_count;
|
||||
void **pslot;
|
||||
|
||||
if (!mapping) {
|
||||
if (page_count(page) != 1)
|
||||
return -EAGAIN;
|
||||
return MIGRATEPAGE_SUCCESS;
|
||||
}
|
||||
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
|
||||
pslot = radix_tree_lookup_slot(&mapping->page_tree,
|
||||
@@ -426,6 +435,9 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
set_page_memcg(newpage, page_memcg(page));
|
||||
newpage->index = page->index;
|
||||
newpage->mapping = page->mapping;
|
||||
get_page(newpage);
|
||||
|
||||
radix_tree_replace_slot(pslot, newpage);
|
||||
@@ -730,21 +742,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
|
||||
|
||||
/* Prepare mapping for the new page.*/
|
||||
newpage->index = page->index;
|
||||
newpage->mapping = page->mapping;
|
||||
if (PageSwapBacked(page))
|
||||
SetPageSwapBacked(newpage);
|
||||
|
||||
/*
|
||||
* Indirectly called below, migrate_page_copy() copies PG_dirty and thus
|
||||
* needs newpage's memcg set to transfer memcg dirty page accounting.
|
||||
* So perform memcg migration in two steps:
|
||||
* 1. set newpage->mem_cgroup (here)
|
||||
* 2. clear page->mem_cgroup (below)
|
||||
*/
|
||||
set_page_memcg(newpage, page_memcg(page));
|
||||
|
||||
mapping = page_mapping(page);
|
||||
if (!mapping)
|
||||
rc = migrate_page(mapping, newpage, page, mode);
|
||||
@@ -767,9 +764,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
|
||||
set_page_memcg(page, NULL);
|
||||
if (!PageAnon(page))
|
||||
page->mapping = NULL;
|
||||
} else {
|
||||
set_page_memcg(newpage, NULL);
|
||||
newpage->mapping = NULL;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
@@ -971,10 +965,9 @@ out:
|
||||
* it. Otherwise, putback_lru_page() will drop the reference grabbed
|
||||
* during isolation.
|
||||
*/
|
||||
if (put_new_page) {
|
||||
ClearPageSwapBacked(newpage);
|
||||
if (put_new_page)
|
||||
put_new_page(newpage, private);
|
||||
} else if (unlikely(__is_movable_balloon_page(newpage))) {
|
||||
else if (unlikely(__is_movable_balloon_page(newpage))) {
|
||||
/* drop our reference, page already in the balloon */
|
||||
put_page(newpage);
|
||||
} else
|
||||
|
Reference in New Issue
Block a user