Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: "A few little subsystems and a start of a lot of MM patches. Subsystems affected by this patch series: squashfs, ocfs2, parisc, vfs. With mm subsystems: slab-generic, slub, debug, pagecache, gup, swap, memcg, pagemap, memory-failure, vmalloc, kasan" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (128 commits) kasan: move kasan_report() into report.c mm/mm_init.c: report kasan-tag information stored in page->flags ubsan: entirely disable alignment checks under UBSAN_TRAP kasan: fix clang compilation warning due to stack protector x86/mm: remove vmalloc faulting mm: remove vmalloc_sync_(un)mappings() x86/mm/32: implement arch_sync_kernel_mappings() x86/mm/64: implement arch_sync_kernel_mappings() mm/ioremap: track which page-table levels were modified mm/vmalloc: track which page-table levels were modified mm: add functions to track page directory modifications s390: use __vmalloc_node in stack_alloc powerpc: use __vmalloc_node in alloc_vm_stack arm64: use __vmalloc_node in arch_alloc_vmap_stack mm: remove vmalloc_user_node_flags mm: switch the test_vmalloc module to use __vmalloc_node mm: remove __vmalloc_node_flags_caller mm: remove both instances of __vmalloc_node_flags mm: remove the prot argument to __vmalloc_node mm: remove the pgprot argument to __vmalloc ...
This commit is contained in:
@@ -110,8 +110,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
||||
show_val_kb(m, "PageTables: ",
|
||||
global_zone_page_state(NR_PAGETABLE));
|
||||
|
||||
show_val_kb(m, "NFS_Unstable: ",
|
||||
global_node_page_state(NR_UNSTABLE_NFS));
|
||||
show_val_kb(m, "NFS_Unstable: ", 0);
|
||||
show_val_kb(m, "Bounce: ",
|
||||
global_zone_page_state(NR_BOUNCE));
|
||||
show_val_kb(m, "WritebackTmp: ",
|
||||
|
@@ -546,10 +546,17 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
struct mem_size_stats *mss = walk->private;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
bool locked = !!(vma->vm_flags & VM_LOCKED);
|
||||
struct page *page;
|
||||
struct page *page = NULL;
|
||||
|
||||
/* FOLL_DUMP will return -EFAULT on huge zero page */
|
||||
page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
|
||||
if (pmd_present(*pmd)) {
|
||||
/* FOLL_DUMP will return -EFAULT on huge zero page */
|
||||
page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
|
||||
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
|
||||
swp_entry_t entry = pmd_to_swp_entry(*pmd);
|
||||
|
||||
if (is_migration_entry(entry))
|
||||
page = migration_entry_to_page(entry);
|
||||
}
|
||||
if (IS_ERR_OR_NULL(page))
|
||||
return;
|
||||
if (PageAnon(page))
|
||||
@@ -578,8 +585,7 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||
|
||||
ptl = pmd_trans_huge_lock(pmd, vma);
|
||||
if (ptl) {
|
||||
if (pmd_present(*pmd))
|
||||
smaps_pmd_entry(pmd, addr, walk);
|
||||
smaps_pmd_entry(pmd, addr, walk);
|
||||
spin_unlock(ptl);
|
||||
goto out;
|
||||
}
|
||||
|
Reference in New Issue
Block a user