mm: soft-dirty: keep soft-dirty bits over thp migration
Soft dirty bit is designed to keep tracked over page migration. This patch makes it work in the same manner for thp migration too. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Zi Yan <zi.yan@cs.rutgers.edu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: David Nellans <dnellans@nvidia.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Minchan Kim <minchan@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
84c3fc4e9c
commit
ab6e3d0939
@@ -937,6 +937,8 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
if (is_write_migration_entry(entry)) {
|
||||
make_migration_entry_read(&entry);
|
||||
pmd = swp_entry_to_pmd(entry);
|
||||
if (pmd_swp_soft_dirty(*src_pmd))
|
||||
pmd = pmd_swp_mksoft_dirty(pmd);
|
||||
set_pmd_at(src_mm, addr, src_pmd, pmd);
|
||||
}
|
||||
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
|
||||
@@ -1756,6 +1758,17 @@ static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
|
||||
}
|
||||
#endif
|
||||
|
||||
static pmd_t move_soft_dirty_pmd(pmd_t pmd)
|
||||
{
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
if (unlikely(is_pmd_migration_entry(pmd)))
|
||||
pmd = pmd_swp_mksoft_dirty(pmd);
|
||||
else if (pmd_present(pmd))
|
||||
pmd = pmd_mksoft_dirty(pmd);
|
||||
#endif
|
||||
return pmd;
|
||||
}
|
||||
|
||||
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, unsigned long old_end,
|
||||
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
|
||||
@@ -1798,7 +1811,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
|
||||
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
|
||||
}
|
||||
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
|
||||
pmd = move_soft_dirty_pmd(pmd);
|
||||
set_pmd_at(mm, new_addr, new_pmd, pmd);
|
||||
if (new_ptl != old_ptl)
|
||||
spin_unlock(new_ptl);
|
||||
if (force_flush)
|
||||
@@ -1846,6 +1860,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
*/
|
||||
make_migration_entry_read(&entry);
|
||||
newpmd = swp_entry_to_pmd(entry);
|
||||
if (pmd_swp_soft_dirty(*pmd))
|
||||
newpmd = pmd_swp_mksoft_dirty(newpmd);
|
||||
set_pmd_at(mm, addr, pmd, newpmd);
|
||||
}
|
||||
goto unlock;
|
||||
@@ -2824,6 +2840,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
||||
unsigned long address = pvmw->address;
|
||||
pmd_t pmdval;
|
||||
swp_entry_t entry;
|
||||
pmd_t pmdswp;
|
||||
|
||||
if (!(pvmw->pmd && !pvmw->pte))
|
||||
return;
|
||||
@@ -2837,8 +2854,10 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
||||
if (pmd_dirty(pmdval))
|
||||
set_page_dirty(page);
|
||||
entry = make_migration_entry(page, pmd_write(pmdval));
|
||||
pmdval = swp_entry_to_pmd(entry);
|
||||
set_pmd_at(mm, address, pvmw->pmd, pmdval);
|
||||
pmdswp = swp_entry_to_pmd(entry);
|
||||
if (pmd_soft_dirty(pmdval))
|
||||
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
|
||||
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
|
||||
page_remove_rmap(page, true);
|
||||
put_page(page);
|
||||
|
||||
@@ -2861,6 +2880,8 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
|
||||
entry = pmd_to_swp_entry(*pvmw->pmd);
|
||||
get_page(new);
|
||||
pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
|
||||
if (pmd_swp_soft_dirty(*pvmw->pmd))
|
||||
pmde = pmd_mksoft_dirty(pmde);
|
||||
if (is_write_migration_entry(entry))
|
||||
pmde = maybe_pmd_mkwrite(pmde, vma);
|
||||
|
||||
|
Reference in New Issue
Block a user