tmpfs: convert mem_cgroup shmem to radix-swap
Remove mem_cgroup_shmem_charge_fallback(): it was only required when we had to move swappage to filecache with GFP_NOWAIT. Remove the GFP_NOWAIT special case from mem_cgroup_cache_charge(), by moving its call out from shmem_add_to_page_cache() to two of thats three callers. But leave it doing mem_cgroup_uncharge_cache_page() on error: although asymmetrical, it's easier for all 3 callers to handle. These two changes would also be appropriate if anyone were to start using shmem_read_mapping_page_gfp() with GFP_NOWAIT. Remove mem_cgroup_get_shmem_target(): mc_handle_file_pte() can test radix_tree_exceptional_entry() to get what it needs for itself. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
83
mm/shmem.c
83
mm/shmem.c
@@ -262,15 +262,11 @@ static int shmem_add_to_page_cache(struct page *page,
|
||||
struct address_space *mapping,
|
||||
pgoff_t index, gfp_t gfp, void *expected)
|
||||
{
|
||||
int error;
|
||||
int error = 0;
|
||||
|
||||
VM_BUG_ON(!PageLocked(page));
|
||||
VM_BUG_ON(!PageSwapBacked(page));
|
||||
|
||||
error = mem_cgroup_cache_charge(page, current->mm,
|
||||
gfp & GFP_RECLAIM_MASK);
|
||||
if (error)
|
||||
goto out;
|
||||
if (!expected)
|
||||
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
|
||||
if (!error) {
|
||||
@@ -300,7 +296,6 @@ static int shmem_add_to_page_cache(struct page *page,
|
||||
}
|
||||
if (error)
|
||||
mem_cgroup_uncharge_cache_page(page);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -660,7 +655,6 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
|
||||
* Charge page using GFP_KERNEL while we can wait, before taking
|
||||
* the shmem_swaplist_mutex which might hold up shmem_writepage().
|
||||
* Charged back to the user (not to caller) when swap account is used.
|
||||
* shmem_add_to_page_cache() will be called with GFP_NOWAIT.
|
||||
*/
|
||||
error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
|
||||
if (error)
|
||||
@@ -954,8 +948,11 @@ repeat:
|
||||
goto failed;
|
||||
}
|
||||
|
||||
error = shmem_add_to_page_cache(page, mapping, index,
|
||||
gfp, swp_to_radix_entry(swap));
|
||||
error = mem_cgroup_cache_charge(page, current->mm,
|
||||
gfp & GFP_RECLAIM_MASK);
|
||||
if (!error)
|
||||
error = shmem_add_to_page_cache(page, mapping, index,
|
||||
gfp, swp_to_radix_entry(swap));
|
||||
if (error)
|
||||
goto failed;
|
||||
|
||||
@@ -990,8 +987,11 @@ repeat:
|
||||
|
||||
SetPageSwapBacked(page);
|
||||
__set_page_locked(page);
|
||||
error = shmem_add_to_page_cache(page, mapping, index,
|
||||
gfp, NULL);
|
||||
error = mem_cgroup_cache_charge(page, current->mm,
|
||||
gfp & GFP_RECLAIM_MASK);
|
||||
if (!error)
|
||||
error = shmem_add_to_page_cache(page, mapping, index,
|
||||
gfp, NULL);
|
||||
if (error)
|
||||
goto decused;
|
||||
lru_cache_add_anon(page);
|
||||
@@ -2442,42 +2442,6 @@ out4:
|
||||
return error;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
||||
/**
|
||||
* mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file
|
||||
* @inode: the inode to be searched
|
||||
* @index: the page offset to be searched
|
||||
* @pagep: the pointer for the found page to be stored
|
||||
* @swapp: the pointer for the found swap entry to be stored
|
||||
*
|
||||
* If a page is found, refcount of it is incremented. Callers should handle
|
||||
* these refcount.
|
||||
*/
|
||||
void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index,
|
||||
struct page **pagep, swp_entry_t *swapp)
|
||||
{
|
||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||
struct page *page = NULL;
|
||||
swp_entry_t swap = {0};
|
||||
|
||||
if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
|
||||
goto out;
|
||||
|
||||
spin_lock(&info->lock);
|
||||
#ifdef CONFIG_SWAP
|
||||
swap = shmem_get_swap(info, index);
|
||||
if (swap.val)
|
||||
page = find_get_page(&swapper_space, swap.val);
|
||||
else
|
||||
#endif
|
||||
page = find_get_page(inode->i_mapping, index);
|
||||
spin_unlock(&info->lock);
|
||||
out:
|
||||
*pagep = page;
|
||||
*swapp = swap;
|
||||
}
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_SHMEM */
|
||||
|
||||
/*
|
||||
@@ -2523,31 +2487,6 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(shmem_truncate_range);
|
||||
|
||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
||||
/**
|
||||
* mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file
|
||||
* @inode: the inode to be searched
|
||||
* @index: the page offset to be searched
|
||||
* @pagep: the pointer for the found page to be stored
|
||||
* @swapp: the pointer for the found swap entry to be stored
|
||||
*
|
||||
* If a page is found, refcount of it is incremented. Callers should handle
|
||||
* these refcount.
|
||||
*/
|
||||
void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index,
|
||||
struct page **pagep, swp_entry_t *swapp)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
|
||||
if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
|
||||
goto out;
|
||||
page = find_get_page(inode->i_mapping, index);
|
||||
out:
|
||||
*pagep = page;
|
||||
*swapp = (swp_entry_t){0};
|
||||
}
|
||||
#endif
|
||||
|
||||
#define shmem_vm_ops generic_file_vm_ops
|
||||
#define shmem_file_operations ramfs_file_operations
|
||||
#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
|
||||
|
Reference in New Issue
Block a user