Merge branch 'akpm' (patches from Andrew)
Merge first patch-bomb from Andrew Morton: - A few hotfixes which missed 4.4 becasue I was asleep. cc'ed to -stable - A few misc fixes - OCFS2 updates - Part of MM. Including pretty large changes to page-flags handling and to thp management which have been buffered up for 2-3 cycles now. I have a lot of MM material this time. [ It turns out the THP part wasn't quite ready, so that got dropped from this series - Linus ] * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) zsmalloc: reorganize struct size_class to pack 4 bytes hole mm/zbud.c: use list_last_entry() instead of list_tail_entry() zram/zcomp: do not zero out zcomp private pages zram: pass gfp from zcomp frontend to backend zram: try vmalloc() after kmalloc() zram/zcomp: use GFP_NOIO to allocate streams mm: add tracepoint for scanning pages drivers/base/memory.c: fix kernel warning during memory hotplug on ppc64 mm/page_isolation: use macro to judge the alignment mm: fix noisy sparse warning in LIBCFS_ALLOC_PRE() mm: rework virtual memory accounting include/linux/memblock.h: fix ordering of 'flags' argument in comments mm: move lru_to_page to mm_inline.h Documentation/filesystems: describe the shared memory usage/accounting memory-hotplug: don't BUG() in register_memory_resource() hugetlb: make mm and fs code explicitly non-modular mm/swapfile.c: use list_for_each_entry_safe in free_swap_count_continuations mm: /proc/pid/clear_refs: no need to clear VM_SOFTDIRTY in clear_soft_dirty_pmd() mm: make sure isolate_lru_page() is never called for tail page vmstat: make vmstat_updater deferrable again and shut down on idle ...
This commit is contained in:
83
mm/shmem.c
83
mm/shmem.c
@@ -359,6 +359,87 @@ static int shmem_free_swap(struct address_space *mapping,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine (in bytes) how many of the shmem object's pages mapped by the
|
||||
* given offsets are swapped out.
|
||||
*
|
||||
* This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
|
||||
* as long as the inode doesn't go away and racy results are not a problem.
|
||||
*/
|
||||
unsigned long shmem_partial_swap_usage(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t end)
|
||||
{
|
||||
struct radix_tree_iter iter;
|
||||
void **slot;
|
||||
struct page *page;
|
||||
unsigned long swapped = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
restart:
|
||||
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
|
||||
if (iter.index >= end)
|
||||
break;
|
||||
|
||||
page = radix_tree_deref_slot(slot);
|
||||
|
||||
/*
|
||||
* This should only be possible to happen at index 0, so we
|
||||
* don't need to reset the counter, nor do we risk infinite
|
||||
* restarts.
|
||||
*/
|
||||
if (radix_tree_deref_retry(page))
|
||||
goto restart;
|
||||
|
||||
if (radix_tree_exceptional_entry(page))
|
||||
swapped++;
|
||||
|
||||
if (need_resched()) {
|
||||
cond_resched_rcu();
|
||||
start = iter.index + 1;
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return swapped << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine (in bytes) how many of the shmem object's pages mapped by the
|
||||
* given vma is swapped out.
|
||||
*
|
||||
* This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
|
||||
* as long as the inode doesn't go away and racy results are not a problem.
|
||||
*/
|
||||
unsigned long shmem_swap_usage(struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *inode = file_inode(vma->vm_file);
|
||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
unsigned long swapped;
|
||||
|
||||
/* Be careful as we don't hold info->lock */
|
||||
swapped = READ_ONCE(info->swapped);
|
||||
|
||||
/*
|
||||
* The easier cases are when the shmem object has nothing in swap, or
|
||||
* the vma maps it whole. Then we can simply use the stats that we
|
||||
* already track.
|
||||
*/
|
||||
if (!swapped)
|
||||
return 0;
|
||||
|
||||
if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
|
||||
return swapped << PAGE_SHIFT;
|
||||
|
||||
/* Here comes the more involved part */
|
||||
return shmem_partial_swap_usage(mapping,
|
||||
linear_page_index(vma, vma->vm_start),
|
||||
linear_page_index(vma, vma->vm_end));
|
||||
}
|
||||
|
||||
/*
|
||||
* SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
|
||||
*/
|
||||
@@ -3064,7 +3145,7 @@ static int shmem_init_inodecache(void)
|
||||
{
|
||||
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
|
||||
sizeof(struct shmem_inode_info),
|
||||
0, SLAB_PANIC, shmem_init_inode);
|
||||
0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Verwijs in nieuw issue
Block a user