Merge 5d30bcacd9
("Merge tag '9p-for-5.7-2' of git://github.com/martinetd/linux") into android-mainline
Baby steps on the way to 5.7-rc1 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I89095a90046a14eab189aab257a75b3dfdb5b1db
This commit is contained in:
@@ -37,12 +37,16 @@
|
||||
|
||||
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||
int dirty_accountable, int prot_numa)
|
||||
unsigned long cp_flags)
|
||||
{
|
||||
pte_t *pte, oldpte;
|
||||
spinlock_t *ptl;
|
||||
unsigned long pages = 0;
|
||||
int target_node = NUMA_NO_NODE;
|
||||
bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
|
||||
bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
|
||||
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
|
||||
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
|
||||
|
||||
/*
|
||||
* Can be called with only the mmap_sem for reading by
|
||||
@@ -98,7 +102,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
* it cannot move them all from MIGRATE_ASYNC
|
||||
* context.
|
||||
*/
|
||||
if (page_is_file_cache(page) && PageDirty(page))
|
||||
if (page_is_file_lru(page) && PageDirty(page))
|
||||
continue;
|
||||
|
||||
/*
|
||||
@@ -114,6 +118,19 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
if (preserve_write)
|
||||
ptent = pte_mk_savedwrite(ptent);
|
||||
|
||||
if (uffd_wp) {
|
||||
ptent = pte_wrprotect(ptent);
|
||||
ptent = pte_mkuffd_wp(ptent);
|
||||
} else if (uffd_wp_resolve) {
|
||||
/*
|
||||
* Leave the write bit to be handled
|
||||
* by PF interrupt handler, then
|
||||
* things like COW could be properly
|
||||
* handled.
|
||||
*/
|
||||
ptent = pte_clear_uffd_wp(ptent);
|
||||
}
|
||||
|
||||
/* Avoid taking write faults for known dirty pages */
|
||||
if (dirty_accountable && pte_dirty(ptent) &&
|
||||
(pte_soft_dirty(ptent) ||
|
||||
@@ -122,11 +139,11 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
}
|
||||
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
|
||||
pages++;
|
||||
} else if (IS_ENABLED(CONFIG_MIGRATION)) {
|
||||
} else if (is_swap_pte(oldpte)) {
|
||||
swp_entry_t entry = pte_to_swp_entry(oldpte);
|
||||
pte_t newpte;
|
||||
|
||||
if (is_write_migration_entry(entry)) {
|
||||
pte_t newpte;
|
||||
/*
|
||||
* A protection check is difficult so
|
||||
* just be safe and disable write
|
||||
@@ -135,22 +152,28 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
newpte = swp_entry_to_pte(entry);
|
||||
if (pte_swp_soft_dirty(oldpte))
|
||||
newpte = pte_swp_mksoft_dirty(newpte);
|
||||
set_pte_at(vma->vm_mm, addr, pte, newpte);
|
||||
|
||||
pages++;
|
||||
}
|
||||
|
||||
if (is_write_device_private_entry(entry)) {
|
||||
pte_t newpte;
|
||||
|
||||
if (pte_swp_uffd_wp(oldpte))
|
||||
newpte = pte_swp_mkuffd_wp(newpte);
|
||||
} else if (is_write_device_private_entry(entry)) {
|
||||
/*
|
||||
* We do not preserve soft-dirtiness. See
|
||||
* copy_one_pte() for explanation.
|
||||
*/
|
||||
make_device_private_entry_read(&entry);
|
||||
newpte = swp_entry_to_pte(entry);
|
||||
set_pte_at(vma->vm_mm, addr, pte, newpte);
|
||||
if (pte_swp_uffd_wp(oldpte))
|
||||
newpte = pte_swp_mkuffd_wp(newpte);
|
||||
} else {
|
||||
newpte = oldpte;
|
||||
}
|
||||
|
||||
if (uffd_wp)
|
||||
newpte = pte_swp_mkuffd_wp(newpte);
|
||||
else if (uffd_wp_resolve)
|
||||
newpte = pte_swp_clear_uffd_wp(newpte);
|
||||
|
||||
if (!pte_same(oldpte, newpte)) {
|
||||
set_pte_at(vma->vm_mm, addr, pte, newpte);
|
||||
pages++;
|
||||
}
|
||||
}
|
||||
@@ -188,7 +211,7 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
|
||||
|
||||
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
||||
pud_t *pud, unsigned long addr, unsigned long end,
|
||||
pgprot_t newprot, int dirty_accountable, int prot_numa)
|
||||
pgprot_t newprot, unsigned long cp_flags)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
unsigned long next;
|
||||
@@ -229,7 +252,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
||||
__split_huge_pmd(vma, pmd, addr, false, NULL);
|
||||
} else {
|
||||
int nr_ptes = change_huge_pmd(vma, pmd, addr,
|
||||
newprot, prot_numa);
|
||||
newprot, cp_flags);
|
||||
|
||||
if (nr_ptes) {
|
||||
if (nr_ptes == HPAGE_PMD_NR) {
|
||||
@@ -244,7 +267,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
||||
/* fall through, the trans huge pmd just split */
|
||||
}
|
||||
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
|
||||
dirty_accountable, prot_numa);
|
||||
cp_flags);
|
||||
pages += this_pages;
|
||||
next:
|
||||
cond_resched();
|
||||
@@ -260,7 +283,7 @@ next:
|
||||
|
||||
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
|
||||
p4d_t *p4d, unsigned long addr, unsigned long end,
|
||||
pgprot_t newprot, int dirty_accountable, int prot_numa)
|
||||
pgprot_t newprot, unsigned long cp_flags)
|
||||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
@@ -272,7 +295,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
|
||||
if (pud_none_or_clear_bad(pud))
|
||||
continue;
|
||||
pages += change_pmd_range(vma, pud, addr, next, newprot,
|
||||
dirty_accountable, prot_numa);
|
||||
cp_flags);
|
||||
} while (pud++, addr = next, addr != end);
|
||||
|
||||
return pages;
|
||||
@@ -280,7 +303,7 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
|
||||
|
||||
static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
|
||||
pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
pgprot_t newprot, int dirty_accountable, int prot_numa)
|
||||
pgprot_t newprot, unsigned long cp_flags)
|
||||
{
|
||||
p4d_t *p4d;
|
||||
unsigned long next;
|
||||
@@ -292,7 +315,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
|
||||
if (p4d_none_or_clear_bad(p4d))
|
||||
continue;
|
||||
pages += change_pud_range(vma, p4d, addr, next, newprot,
|
||||
dirty_accountable, prot_numa);
|
||||
cp_flags);
|
||||
} while (p4d++, addr = next, addr != end);
|
||||
|
||||
return pages;
|
||||
@@ -300,7 +323,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
|
||||
|
||||
static unsigned long change_protection_range(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||
int dirty_accountable, int prot_numa)
|
||||
unsigned long cp_flags)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pgd_t *pgd;
|
||||
@@ -317,7 +340,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
|
||||
if (pgd_none_or_clear_bad(pgd))
|
||||
continue;
|
||||
pages += change_p4d_range(vma, pgd, addr, next, newprot,
|
||||
dirty_accountable, prot_numa);
|
||||
cp_flags);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
|
||||
/* Only flush the TLB if we actually modified any entries: */
|
||||
@@ -330,14 +353,17 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
|
||||
|
||||
unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end, pgprot_t newprot,
|
||||
int dirty_accountable, int prot_numa)
|
||||
unsigned long cp_flags)
|
||||
{
|
||||
unsigned long pages;
|
||||
|
||||
BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
|
||||
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
pages = hugetlb_change_protection(vma, start, end, newprot);
|
||||
else
|
||||
pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
|
||||
pages = change_protection_range(vma, start, end, newprot,
|
||||
cp_flags);
|
||||
|
||||
return pages;
|
||||
}
|
||||
@@ -459,7 +485,7 @@ success:
|
||||
vma_set_page_prot(vma);
|
||||
|
||||
change_protection(vma, start, end, vma->vm_page_prot,
|
||||
dirty_accountable, 0);
|
||||
dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
|
||||
|
||||
/*
|
||||
* Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
|
||||
|
Reference in New Issue
Block a user