FROMLIST: mm: speculative page fault handler return VMA
When the speculative page fault handler is returning VM_RETRY, there is a chance that VMA fetched without grabbing the mmap_sem can be reused by the legacy page fault handler. By reusing it, we avoid calling find_vma() again. To achieve, that we must ensure that the VMA structure will not be freed in our back. This is done by getting the reference on it (get_vma()) and by assuming that the caller will call the new service can_reuse_spf_vma() once it has grabbed the mmap_sem. can_reuse_spf_vma() is first checking that the VMA is still in the RB tree , and then that the VMA's boundaries matched the passed address and release the reference on the VMA so that it can be freed if needed. In the case the VMA is freed, can_reuse_spf_vma() will have returned false as the VMA is no more in the RB tree. In the architecture page fault handler, the call to the new service reuse_spf_or_find_vma() should be made in place of find_vma(), this will handle the check on the spf_vma and if needed call find_vma(). Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com> Change-Id: Ia56dcf807e8bddf6788fd696dd80372db35476f0 Link: https://lore.kernel.org/lkml/1523975611-15978-23-git-send-email-ldufour@linux.vnet.ibm.com/ Bug: 161210518 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:

committed by
Suren Baghdasaryan

parent
736ae8bde8
commit
99e15a0799
@@ -1770,25 +1770,37 @@ extern int fixup_user_fault(struct mm_struct *mm,
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
extern int __handle_speculative_fault(struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
unsigned int flags);
|
||||
unsigned int flags,
|
||||
struct vm_area_struct **vma);
|
||||
static inline int handle_speculative_fault(struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
unsigned int flags)
|
||||
unsigned int flags,
|
||||
struct vm_area_struct **vma)
|
||||
{
|
||||
/*
|
||||
* Try speculative page fault for multithreaded user space task only.
|
||||
*/
|
||||
if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1)
|
||||
if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1) {
|
||||
*vma = NULL;
|
||||
return VM_FAULT_RETRY;
|
||||
return __handle_speculative_fault(mm, address, flags);
|
||||
}
|
||||
return __handle_speculative_fault(mm, address, flags, vma);
|
||||
}
|
||||
extern bool can_reuse_spf_vma(struct vm_area_struct *vma,
|
||||
unsigned long address);
|
||||
#else
|
||||
static inline int handle_speculative_fault(struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
unsigned int flags)
|
||||
unsigned int flags,
|
||||
struct vm_area_struct **vma)
|
||||
{
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
static inline bool can_reuse_spf_vma(struct vm_area_struct *vma,
|
||||
unsigned long address)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
|
||||
void unmap_mapping_pages(struct address_space *mapping,
|
||||
|
Reference in New Issue
Block a user