mm: swap: clean up swap readahead
When I see recent change of swap readahead, I am very unhappy about current code structure which diverges two swap readahead algorithm in do_swap_page. This patch is to clean it up. Main motivation is that fault handler doesn't need to be aware of readahead algorithms but just should call swapin_readahead. As first step, this patch cleans up a little bit but not perfect (I just separate for review easier) so next patch will make the goal complete. [minchan@kernel.org: do not check readahead flag with THP anon] Link: http://lkml.kernel.org/r/874lm83zho.fsf@yhuang-dev.intel.com Link: http://lkml.kernel.org/r/20180227232611.169883-1-minchan@kernel.org Link: http://lkml.kernel.org/r/1509520520-32367-2-git-send-email-minchan@kernel.org Link: http://lkml.kernel.org/r/20180220085249.151400-2-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Hugh Dickins <hughd@google.com> Cc: Huang Ying <ying.huang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
e830c63a62
commit
eaf649ebc3
26
mm/memory.c
26
mm/memory.c
@@ -2883,26 +2883,16 @@ EXPORT_SYMBOL(unmap_mapping_range);
|
||||
int do_swap_page(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct page *page = NULL, *swapcache = NULL;
|
||||
struct page *page = NULL, *swapcache;
|
||||
struct mem_cgroup *memcg;
|
||||
struct vma_swap_readahead swap_ra;
|
||||
swp_entry_t entry;
|
||||
pte_t pte;
|
||||
int locked;
|
||||
int exclusive = 0;
|
||||
int ret = 0;
|
||||
bool vma_readahead = swap_use_vma_readahead();
|
||||
|
||||
if (vma_readahead) {
|
||||
page = swap_readahead_detect(vmf, &swap_ra);
|
||||
swapcache = page;
|
||||
}
|
||||
|
||||
if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
|
||||
if (page)
|
||||
put_page(page);
|
||||
if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
|
||||
goto out;
|
||||
}
|
||||
|
||||
entry = pte_to_swp_entry(vmf->orig_pte);
|
||||
if (unlikely(non_swap_entry(entry))) {
|
||||
@@ -2928,11 +2918,8 @@ int do_swap_page(struct vm_fault *vmf)
|
||||
|
||||
|
||||
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
|
||||
if (!page) {
|
||||
page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
|
||||
vmf->address);
|
||||
swapcache = page;
|
||||
}
|
||||
page = lookup_swap_cache(entry, vma, vmf->address);
|
||||
swapcache = page;
|
||||
|
||||
if (!page) {
|
||||
struct swap_info_struct *si = swp_swap_info(entry);
|
||||
@@ -2949,9 +2936,9 @@ int do_swap_page(struct vm_fault *vmf)
|
||||
swap_readpage(page, true);
|
||||
}
|
||||
} else {
|
||||
if (vma_readahead)
|
||||
if (swap_use_vma_readahead())
|
||||
page = do_swap_page_readahead(entry,
|
||||
GFP_HIGHUSER_MOVABLE, vmf, &swap_ra);
|
||||
GFP_HIGHUSER_MOVABLE, vmf);
|
||||
else
|
||||
page = swapin_readahead(entry,
|
||||
GFP_HIGHUSER_MOVABLE, vma, vmf->address);
|
||||
@@ -2982,7 +2969,6 @@ int do_swap_page(struct vm_fault *vmf)
|
||||
*/
|
||||
ret = VM_FAULT_HWPOISON;
|
||||
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
|
||||
swapcache = page;
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user