mm: remove rest usage of VM_NONLINEAR and pte_file()
One bit in ->vm_flags is unused now! Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
ac51b934f3
commit
0661a33611
70
mm/memory.c
70
mm/memory.c
@@ -811,42 +811,40 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
|
||||
/* pte contains position in swap or file, so copy. */
|
||||
if (unlikely(!pte_present(pte))) {
|
||||
if (!pte_file(pte)) {
|
||||
swp_entry_t entry = pte_to_swp_entry(pte);
|
||||
swp_entry_t entry = pte_to_swp_entry(pte);
|
||||
|
||||
if (likely(!non_swap_entry(entry))) {
|
||||
if (swap_duplicate(entry) < 0)
|
||||
return entry.val;
|
||||
if (likely(!non_swap_entry(entry))) {
|
||||
if (swap_duplicate(entry) < 0)
|
||||
return entry.val;
|
||||
|
||||
/* make sure dst_mm is on swapoff's mmlist. */
|
||||
if (unlikely(list_empty(&dst_mm->mmlist))) {
|
||||
spin_lock(&mmlist_lock);
|
||||
if (list_empty(&dst_mm->mmlist))
|
||||
list_add(&dst_mm->mmlist,
|
||||
&src_mm->mmlist);
|
||||
spin_unlock(&mmlist_lock);
|
||||
}
|
||||
rss[MM_SWAPENTS]++;
|
||||
} else if (is_migration_entry(entry)) {
|
||||
page = migration_entry_to_page(entry);
|
||||
/* make sure dst_mm is on swapoff's mmlist. */
|
||||
if (unlikely(list_empty(&dst_mm->mmlist))) {
|
||||
spin_lock(&mmlist_lock);
|
||||
if (list_empty(&dst_mm->mmlist))
|
||||
list_add(&dst_mm->mmlist,
|
||||
&src_mm->mmlist);
|
||||
spin_unlock(&mmlist_lock);
|
||||
}
|
||||
rss[MM_SWAPENTS]++;
|
||||
} else if (is_migration_entry(entry)) {
|
||||
page = migration_entry_to_page(entry);
|
||||
|
||||
if (PageAnon(page))
|
||||
rss[MM_ANONPAGES]++;
|
||||
else
|
||||
rss[MM_FILEPAGES]++;
|
||||
if (PageAnon(page))
|
||||
rss[MM_ANONPAGES]++;
|
||||
else
|
||||
rss[MM_FILEPAGES]++;
|
||||
|
||||
if (is_write_migration_entry(entry) &&
|
||||
is_cow_mapping(vm_flags)) {
|
||||
/*
|
||||
* COW mappings require pages in both
|
||||
* parent and child to be set to read.
|
||||
*/
|
||||
make_migration_entry_read(&entry);
|
||||
pte = swp_entry_to_pte(entry);
|
||||
if (pte_swp_soft_dirty(*src_pte))
|
||||
pte = pte_swp_mksoft_dirty(pte);
|
||||
set_pte_at(src_mm, addr, src_pte, pte);
|
||||
}
|
||||
if (is_write_migration_entry(entry) &&
|
||||
is_cow_mapping(vm_flags)) {
|
||||
/*
|
||||
* COW mappings require pages in both
|
||||
* parent and child to be set to read.
|
||||
*/
|
||||
make_migration_entry_read(&entry);
|
||||
pte = swp_entry_to_pte(entry);
|
||||
if (pte_swp_soft_dirty(*src_pte))
|
||||
pte = pte_swp_mksoft_dirty(pte);
|
||||
set_pte_at(src_mm, addr, src_pte, pte);
|
||||
}
|
||||
}
|
||||
goto out_set_pte;
|
||||
@@ -1020,11 +1018,9 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
* readonly mappings. The tradeoff is that copy_page_range is more
|
||||
* efficient than faulting.
|
||||
*/
|
||||
if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
|
||||
VM_PFNMAP | VM_MIXEDMAP))) {
|
||||
if (!vma->anon_vma)
|
||||
return 0;
|
||||
}
|
||||
if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
|
||||
!vma->anon_vma)
|
||||
return 0;
|
||||
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
|
||||
|
Reference in New Issue
Block a user