diff --git a/include/linux/swap.h b/include/linux/swap.h index 667935c0dbd4..4459831d23ad 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -351,8 +351,14 @@ extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); -extern void lru_cache_add_inactive_or_unevictable(struct page *page, - struct vm_area_struct *vma); +extern void __lru_cache_add_inactive_or_unevictable(struct page *page, + unsigned long vma_flags); + +static inline void lru_cache_add_inactive_or_unevictable(struct page *page, + struct vm_area_struct *vma) +{ + return __lru_cache_add_inactive_or_unevictable(page, vma->vm_flags); +} /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); diff --git a/mm/memory.c b/mm/memory.c index 733ce084d594..eca5654616e8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2947,7 +2947,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) */ ptep_clear_flush_notify(vma, vmf->address, vmf->pte); page_add_new_anon_rmap(new_page, vma, vmf->address, false); - lru_cache_add_inactive_or_unevictable(new_page, vma); + __lru_cache_add_inactive_or_unevictable(new_page, vmf->vma_flags); /* * We call the notify macro here because, when using secondary * mmu page tables (such as kvm shadow page tables), we want the @@ -3489,7 +3489,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) /* ksm created a completely new copy */ if (unlikely(page != swapcache && swapcache)) { page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_inactive_or_unevictable(page, vma); + __lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags); } else { do_page_add_anon_rmap(page, vma, vmf->address, exclusive); } @@ -3639,7 +3639,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_inactive_or_unevictable(page, vma); + __lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags); setpte: set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); @@ -3901,7 +3901,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) if (write && !(vmf->vma_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, vmf->address, false); - lru_cache_add_inactive_or_unevictable(page, vma); + __lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, false); diff --git a/mm/swap.c b/mm/swap.c index 47a47681c86b..d160a07830a7 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -483,14 +483,14 @@ EXPORT_SYMBOL(lru_cache_add); * Place @page on the inactive or unevictable LRU list, depending on its * evictability. */ -void lru_cache_add_inactive_or_unevictable(struct page *page, - struct vm_area_struct *vma) +void __lru_cache_add_inactive_or_unevictable(struct page *page, + unsigned long vma_flags) { bool unevictable; VM_BUG_ON_PAGE(PageLRU(page), page); - unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; + unevictable = (vma_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; if (unlikely(unevictable) && !TestSetPageMlocked(page)) { int nr_pages = thp_nr_pages(page); /*