mm/migrate: support un-addressable ZONE_DEVICE page in migration
Allow to unmap and restore special swap entry of un-addressable ZONE_DEVICE memory. Link: http://lkml.kernel.org/r/20170817000548.32038-17-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Nellans <dnellans@nvidia.com> Cc: Evgeny Baskakov <ebaskakov@nvidia.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Mark Hairgrove <mhairgrove@nvidia.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Sherry Cheung <SCheung@nvidia.com> Cc: Subhash Gutti <sgutti@nvidia.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Bob Liu <liubo95@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
8c3328f1f3
commit
a5430dda8a
@@ -48,6 +48,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
|
||||
if (!is_swap_pte(*pvmw->pte))
|
||||
return false;
|
||||
entry = pte_to_swp_entry(*pvmw->pte);
|
||||
|
||||
if (!is_migration_entry(entry))
|
||||
return false;
|
||||
if (migration_entry_to_page(entry) - pvmw->page >=
|
||||
@@ -60,6 +61,15 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
|
||||
WARN_ON_ONCE(1);
|
||||
#endif
|
||||
} else {
|
||||
if (is_swap_pte(*pvmw->pte)) {
|
||||
swp_entry_t entry;
|
||||
|
||||
entry = pte_to_swp_entry(*pvmw->pte);
|
||||
if (is_device_private_entry(entry) &&
|
||||
device_private_entry_to_page(entry) == pvmw->page)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!pte_present(*pvmw->pte))
|
||||
return false;
|
||||
|
||||
|
Reference in New Issue
Block a user