mm, thp: remove infrastructure for handling splitting PMDs
With new refcounting we don't need to mark PMDs splitting. Let's drop code to handle this. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Sasha Levin <sasha.levin@oracle.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
1f19617d77
commit
4b471e8898
18
mm/memory.c
18
mm/memory.c
@@ -566,7 +566,6 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
pgtable_t new = pte_alloc_one(mm, address);
|
||||
int wait_split_huge_page;
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -586,18 +585,14 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
|
||||
|
||||
ptl = pmd_lock(mm, pmd);
|
||||
wait_split_huge_page = 0;
|
||||
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
|
||||
atomic_long_inc(&mm->nr_ptes);
|
||||
pmd_populate(mm, pmd, new);
|
||||
new = NULL;
|
||||
} else if (unlikely(pmd_trans_splitting(*pmd)))
|
||||
wait_split_huge_page = 1;
|
||||
}
|
||||
spin_unlock(ptl);
|
||||
if (new)
|
||||
pte_free(mm, new);
|
||||
if (wait_split_huge_page)
|
||||
wait_split_huge_page(vma->anon_vma, pmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -613,8 +608,7 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
|
||||
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
|
||||
pmd_populate_kernel(&init_mm, pmd, new);
|
||||
new = NULL;
|
||||
} else
|
||||
VM_BUG_ON(pmd_trans_splitting(*pmd));
|
||||
}
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
if (new)
|
||||
pte_free_kernel(&init_mm, new);
|
||||
@@ -3374,14 +3368,6 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
if (pmd_trans_huge(orig_pmd)) {
|
||||
unsigned int dirty = flags & FAULT_FLAG_WRITE;
|
||||
|
||||
/*
|
||||
* If the pmd is splitting, return and retry the
|
||||
* the fault. Alternative: wait until the split
|
||||
* is done, and goto retry.
|
||||
*/
|
||||
if (pmd_trans_splitting(orig_pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_protnone(orig_pmd))
|
||||
return do_huge_pmd_numa_page(mm, vma, address,
|
||||
orig_pmd, pmd);
|
||||
|
Reference in New Issue
Block a user