mm/mmu_notifier: use structure for invalidate_range_start/end calls v2
To avoid having to change many call sites everytime we want to add a parameter use a structure to group all parameters for the mmu_notifier invalidate_range_start/end cakks. No functional changes with this patch. [akpm@linux-foundation.org: coding style fixes] Link: http://lkml.kernel.org/r/20181205053628.3210-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jan Kara <jack@suse.cz> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Felix Kuehling <felix.kuehling@amd.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> From: Jérôme Glisse <jglisse@redhat.com> Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3 fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n Link: http://lkml.kernel.org/r/20181213171330.8489-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
5d6527a784
commit
ac46d4f3c4
21
mm/ksm.c
21
mm/ksm.c
@@ -1042,8 +1042,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
||||
};
|
||||
int swapped;
|
||||
int err = -EFAULT;
|
||||
unsigned long mmun_start; /* For mmu_notifiers */
|
||||
unsigned long mmun_end; /* For mmu_notifiers */
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
pvmw.address = page_address_in_vma(page, vma);
|
||||
if (pvmw.address == -EFAULT)
|
||||
@@ -1051,9 +1050,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
||||
|
||||
BUG_ON(PageTransCompound(page));
|
||||
|
||||
mmun_start = pvmw.address;
|
||||
mmun_end = pvmw.address + PAGE_SIZE;
|
||||
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
||||
mmu_notifier_range_init(&range, mm, pvmw.address,
|
||||
pvmw.address + PAGE_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
if (!page_vma_mapped_walk(&pvmw))
|
||||
goto out_mn;
|
||||
@@ -1105,7 +1104,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
||||
out_unlock:
|
||||
page_vma_mapped_walk_done(&pvmw);
|
||||
out_mn:
|
||||
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
@@ -1129,8 +1128,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
|
||||
spinlock_t *ptl;
|
||||
unsigned long addr;
|
||||
int err = -EFAULT;
|
||||
unsigned long mmun_start; /* For mmu_notifiers */
|
||||
unsigned long mmun_end; /* For mmu_notifiers */
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
addr = page_address_in_vma(page, vma);
|
||||
if (addr == -EFAULT)
|
||||
@@ -1140,9 +1138,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
|
||||
if (!pmd)
|
||||
goto out;
|
||||
|
||||
mmun_start = addr;
|
||||
mmun_end = addr + PAGE_SIZE;
|
||||
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
||||
mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||
if (!pte_same(*ptep, orig_pte)) {
|
||||
@@ -1188,7 +1185,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
err = 0;
|
||||
out_mn:
|
||||
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
Reference in New Issue
Block a user