FROMLIST: mm: introduce __lru_cache_add_active_or_unevictable
The speculative page fault handler which is run without holding the mmap_sem is calling lru_cache_add_active_or_unevictable() but the vm_flags is not guaranteed to remain constant. Introducing __lru_cache_add_active_or_unevictable() which has the vma flags value parameter instead of the vma pointer. Change-Id: I68decbe0f80847403127c45c97565e47512532e9 Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> Link: https://lore.kernel.org/lkml/1523975611-15978-15-git-send-email-ldufour@linux.vnet.ibm.com/ Bug: 161210518 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
This commit is contained in:

committed by
Suren Baghdasaryan

parent
320b684750
commit
cbff8f3907
@@ -351,8 +351,14 @@ extern void deactivate_page(struct page *page);
|
||||
extern void mark_page_lazyfree(struct page *page);
|
||||
extern void swap_setup(void);
|
||||
|
||||
extern void lru_cache_add_inactive_or_unevictable(struct page *page,
|
||||
struct vm_area_struct *vma);
|
||||
extern void __lru_cache_add_inactive_or_unevictable(struct page *page,
|
||||
unsigned long vma_flags);
|
||||
|
||||
static inline void lru_cache_add_inactive_or_unevictable(struct page *page,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return __lru_cache_add_inactive_or_unevictable(page, vma->vm_flags);
|
||||
}
|
||||
|
||||
/* linux/mm/vmscan.c */
|
||||
extern unsigned long zone_reclaimable_pages(struct zone *zone);
|
||||
|
@@ -2947,7 +2947,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||
*/
|
||||
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
|
||||
page_add_new_anon_rmap(new_page, vma, vmf->address, false);
|
||||
lru_cache_add_inactive_or_unevictable(new_page, vma);
|
||||
__lru_cache_add_inactive_or_unevictable(new_page, vmf->vma_flags);
|
||||
/*
|
||||
* We call the notify macro here because, when using secondary
|
||||
* mmu page tables (such as kvm shadow page tables), we want the
|
||||
@@ -3489,7 +3489,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
||||
/* ksm created a completely new copy */
|
||||
if (unlikely(page != swapcache && swapcache)) {
|
||||
page_add_new_anon_rmap(page, vma, vmf->address, false);
|
||||
lru_cache_add_inactive_or_unevictable(page, vma);
|
||||
__lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags);
|
||||
} else {
|
||||
do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
|
||||
}
|
||||
@@ -3639,7 +3639,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
|
||||
|
||||
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
|
||||
page_add_new_anon_rmap(page, vma, vmf->address, false);
|
||||
lru_cache_add_inactive_or_unevictable(page, vma);
|
||||
__lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags);
|
||||
setpte:
|
||||
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
|
||||
|
||||
@@ -3901,7 +3901,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
|
||||
if (write && !(vmf->vma_flags & VM_SHARED)) {
|
||||
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
|
||||
page_add_new_anon_rmap(page, vma, vmf->address, false);
|
||||
lru_cache_add_inactive_or_unevictable(page, vma);
|
||||
__lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags);
|
||||
} else {
|
||||
inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
|
||||
page_add_file_rmap(page, false);
|
||||
|
@@ -483,14 +483,14 @@ EXPORT_SYMBOL(lru_cache_add);
|
||||
* Place @page on the inactive or unevictable LRU list, depending on its
|
||||
* evictability.
|
||||
*/
|
||||
void lru_cache_add_inactive_or_unevictable(struct page *page,
|
||||
struct vm_area_struct *vma)
|
||||
void __lru_cache_add_inactive_or_unevictable(struct page *page,
|
||||
unsigned long vma_flags)
|
||||
{
|
||||
bool unevictable;
|
||||
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
|
||||
unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
|
||||
unevictable = (vma_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
|
||||
if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
|
||||
int nr_pages = thp_nr_pages(page);
|
||||
/*
|
||||
|
Reference in New Issue
Block a user