pagewalk: separate function pointers from iterator data
The mm_walk structure currently mixed data and code. Split out the operations vectors into a new mm_walk_ops structure, and while we are changing the API also declare the mm_walk structure inside the walk_page_range and walk_page_vma functions. Based on patch from Linus Torvalds. Link: https://lore.kernel.org/r/20190828141955.22210-3-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Steven Price <steven.price@arm.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
这个提交包含在:
@@ -513,7 +513,9 @@ static int smaps_pte_hole(unsigned long addr, unsigned long end,
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
#define smaps_pte_hole NULL
|
||||
#endif /* CONFIG_SHMEM */
|
||||
|
||||
static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
||||
struct mm_walk *walk)
|
||||
@@ -729,21 +731,24 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define smaps_hugetlb_range NULL
|
||||
#endif /* HUGETLB_PAGE */
|
||||
|
||||
static const struct mm_walk_ops smaps_walk_ops = {
|
||||
.pmd_entry = smaps_pte_range,
|
||||
.hugetlb_entry = smaps_hugetlb_range,
|
||||
};
|
||||
|
||||
static const struct mm_walk_ops smaps_shmem_walk_ops = {
|
||||
.pmd_entry = smaps_pte_range,
|
||||
.hugetlb_entry = smaps_hugetlb_range,
|
||||
.pte_hole = smaps_pte_hole,
|
||||
};
|
||||
|
||||
static void smap_gather_stats(struct vm_area_struct *vma,
|
||||
struct mem_size_stats *mss)
|
||||
{
|
||||
struct mm_walk smaps_walk = {
|
||||
.pmd_entry = smaps_pte_range,
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
.hugetlb_entry = smaps_hugetlb_range,
|
||||
#endif
|
||||
.mm = vma->vm_mm,
|
||||
};
|
||||
|
||||
smaps_walk.private = mss;
|
||||
|
||||
#ifdef CONFIG_SHMEM
|
||||
/* In case of smaps_rollup, reset the value from previous vma */
|
||||
mss->check_shmem_swap = false;
|
||||
@@ -765,12 +770,13 @@ static void smap_gather_stats(struct vm_area_struct *vma,
|
||||
mss->swap += shmem_swapped;
|
||||
} else {
|
||||
mss->check_shmem_swap = true;
|
||||
smaps_walk.pte_hole = smaps_pte_hole;
|
||||
walk_page_vma(vma, &smaps_shmem_walk_ops, mss);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* mmap_sem is held in m_start */
|
||||
walk_page_vma(vma, &smaps_walk);
|
||||
walk_page_vma(vma, &smaps_walk_ops, mss);
|
||||
}
|
||||
|
||||
#define SEQ_PUT_DEC(str, val) \
|
||||
@@ -1118,6 +1124,11 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mm_walk_ops clear_refs_walk_ops = {
|
||||
.pmd_entry = clear_refs_pte_range,
|
||||
.test_walk = clear_refs_test_walk,
|
||||
};
|
||||
|
||||
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
@@ -1151,12 +1162,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
struct clear_refs_private cp = {
|
||||
.type = type,
|
||||
};
|
||||
struct mm_walk clear_refs_walk = {
|
||||
.pmd_entry = clear_refs_pte_range,
|
||||
.test_walk = clear_refs_test_walk,
|
||||
.mm = mm,
|
||||
.private = &cp,
|
||||
};
|
||||
|
||||
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
|
||||
if (down_write_killable(&mm->mmap_sem)) {
|
||||
@@ -1217,7 +1222,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
0, NULL, mm, 0, -1UL);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
|
||||
walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
|
||||
&cp);
|
||||
if (type == CLEAR_REFS_SOFT_DIRTY)
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_finish_mmu(&tlb, 0, -1);
|
||||
@@ -1489,8 +1495,16 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
|
||||
|
||||
return err;
|
||||
}
|
||||
#else
|
||||
#define pagemap_hugetlb_range NULL
|
||||
#endif /* HUGETLB_PAGE */
|
||||
|
||||
static const struct mm_walk_ops pagemap_ops = {
|
||||
.pmd_entry = pagemap_pmd_range,
|
||||
.pte_hole = pagemap_pte_hole,
|
||||
.hugetlb_entry = pagemap_hugetlb_range,
|
||||
};
|
||||
|
||||
/*
|
||||
* /proc/pid/pagemap - an array mapping virtual pages to pfns
|
||||
*
|
||||
@@ -1522,7 +1536,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
|
||||
{
|
||||
struct mm_struct *mm = file->private_data;
|
||||
struct pagemapread pm;
|
||||
struct mm_walk pagemap_walk = {};
|
||||
unsigned long src;
|
||||
unsigned long svpfn;
|
||||
unsigned long start_vaddr;
|
||||
@@ -1550,14 +1563,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
|
||||
if (!pm.buffer)
|
||||
goto out_mm;
|
||||
|
||||
pagemap_walk.pmd_entry = pagemap_pmd_range;
|
||||
pagemap_walk.pte_hole = pagemap_pte_hole;
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
|
||||
#endif
|
||||
pagemap_walk.mm = mm;
|
||||
pagemap_walk.private = ±
|
||||
|
||||
src = *ppos;
|
||||
svpfn = src / PM_ENTRY_BYTES;
|
||||
start_vaddr = svpfn << PAGE_SHIFT;
|
||||
@@ -1586,7 +1591,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
|
||||
ret = down_read_killable(&mm->mmap_sem);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
ret = walk_page_range(start_vaddr, end, &pagemap_walk);
|
||||
ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
|
||||
up_read(&mm->mmap_sem);
|
||||
start_vaddr = end;
|
||||
|
||||
@@ -1798,6 +1803,11 @@ static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct mm_walk_ops show_numa_ops = {
|
||||
.hugetlb_entry = gather_hugetlb_stats,
|
||||
.pmd_entry = gather_pte_stats,
|
||||
};
|
||||
|
||||
/*
|
||||
* Display pages allocated per node and memory policy via /proc.
|
||||
*/
|
||||
@@ -1809,12 +1819,6 @@ static int show_numa_map(struct seq_file *m, void *v)
|
||||
struct numa_maps *md = &numa_priv->md;
|
||||
struct file *file = vma->vm_file;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
struct mm_walk walk = {
|
||||
.hugetlb_entry = gather_hugetlb_stats,
|
||||
.pmd_entry = gather_pte_stats,
|
||||
.private = md,
|
||||
.mm = mm,
|
||||
};
|
||||
struct mempolicy *pol;
|
||||
char buffer[64];
|
||||
int nid;
|
||||
@@ -1848,7 +1852,7 @@ static int show_numa_map(struct seq_file *m, void *v)
|
||||
seq_puts(m, " huge");
|
||||
|
||||
/* mmap_sem is held by m_start */
|
||||
walk_page_vma(vma, &walk);
|
||||
walk_page_vma(vma, &show_numa_ops, md);
|
||||
|
||||
if (!md->pages)
|
||||
goto out;
|
||||
|
在新工单中引用
屏蔽一个用户