pagewalk: separate function pointers from iterator data
The mm_walk structure currently mixed data and code. Split out the operations vectors into a new mm_walk_ops structure, and while we are changing the API also declare the mm_walk structure inside the walk_page_range and walk_page_vma functions. Based on patch from Linus Torvalds. Link: https://lore.kernel.org/r/20190828141955.22210-3-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Steven Price <steven.price@arm.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:

gecommit door
Jason Gunthorpe

bovenliggende
a520110e4a
commit
7b86ac3371
41
mm/madvise.c
41
mm/madvise.c
@@ -226,19 +226,9 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void force_swapin_readahead(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
struct mm_walk walk = {
|
||||
.mm = vma->vm_mm,
|
||||
.pmd_entry = swapin_walk_pmd_entry,
|
||||
.private = vma,
|
||||
};
|
||||
|
||||
walk_page_range(start, end, &walk);
|
||||
|
||||
lru_add_drain(); /* Push any new pages onto the LRU now */
|
||||
}
|
||||
static const struct mm_walk_ops swapin_walk_ops = {
|
||||
.pmd_entry = swapin_walk_pmd_entry,
|
||||
};
|
||||
|
||||
static void force_shm_swapin_readahead(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end,
|
||||
@@ -280,7 +270,8 @@ static long madvise_willneed(struct vm_area_struct *vma,
|
||||
*prev = vma;
|
||||
#ifdef CONFIG_SWAP
|
||||
if (!file) {
|
||||
force_swapin_readahead(vma, start, end);
|
||||
walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
|
||||
lru_add_drain(); /* Push any new pages onto the LRU now */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -441,20 +432,9 @@ next:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void madvise_free_page_range(struct mmu_gather *tlb,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long end)
|
||||
{
|
||||
struct mm_walk free_walk = {
|
||||
.pmd_entry = madvise_free_pte_range,
|
||||
.mm = vma->vm_mm,
|
||||
.private = tlb,
|
||||
};
|
||||
|
||||
tlb_start_vma(tlb, vma);
|
||||
walk_page_range(addr, end, &free_walk);
|
||||
tlb_end_vma(tlb, vma);
|
||||
}
|
||||
static const struct mm_walk_ops madvise_free_walk_ops = {
|
||||
.pmd_entry = madvise_free_pte_range,
|
||||
};
|
||||
|
||||
static int madvise_free_single_vma(struct vm_area_struct *vma,
|
||||
unsigned long start_addr, unsigned long end_addr)
|
||||
@@ -481,7 +461,10 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
|
||||
update_hiwater_rss(mm);
|
||||
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
madvise_free_page_range(&tlb, vma, range.start, range.end);
|
||||
tlb_start_vma(&tlb, vma);
|
||||
walk_page_range(vma->vm_mm, range.start, range.end,
|
||||
&madvise_free_walk_ops, &tlb);
|
||||
tlb_end_vma(&tlb, vma);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_finish_mmu(&tlb, range.start, range.end);
|
||||
|
||||
|
Verwijs in nieuw issue
Block a user