From 86ee4a531ea09ad12f2328366801bfb375d229b8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 16 Apr 2019 15:45:19 +0200 Subject: [PATCH] FROMLIST: x86/mm: add speculative pagefault handling Try a speculative fault before acquiring mmap_sem, if it returns with VM_FAULT_RETRY continue with the mmap_sem acquisition and do the traditional fault. Signed-off-by: Peter Zijlstra (Intel) [Clearing of FAULT_FLAG_ALLOW_RETRY is now done in handle_speculative_fault()] [Retry with usual fault path in the case VM_ERROR is returned by handle_speculative_fault(). This allows signal to be delivered] [Don't build SPF call if !CONFIG_SPECULATIVE_PAGE_FAULT] [Handle memory protection key fault] Signed-off-by: Laurent Dufour Link: https://lore.kernel.org/patchwork/patch/1062684/ Bug: 161210518 Signed-off-by: Suren Baghdasaryan Change-Id: If994d027e8602d8d647dfe560c7ac68b49baf2f5 --- arch/x86/mm/fault.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 82bf37a5c9ec..465f15bedb0c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1214,7 +1214,7 @@ void do_user_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, unsigned long address) { - struct vm_area_struct *vma; + struct vm_area_struct *vma = NULL; struct task_struct *tsk; struct mm_struct *mm; vm_fault_t fault; @@ -1298,6 +1298,16 @@ void do_user_addr_fault(struct pt_regs *regs, } #endif + /* + * Do not try to do a speculative page fault if the fault was due to + * protection keys since it can't be resolved. + */ + if (!(hw_error_code & X86_PF_PK)) { + fault = handle_speculative_fault(mm, address, flags, &vma); + if (fault != VM_FAULT_RETRY) + goto done; + } + /* * Kernel-mode access to the user address space should only occur * on well-defined single instructions listed in the exception @@ -1391,6 +1401,8 @@ good_area: } mmap_read_unlock(mm); + +done: if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, hw_error_code, address, fault); return;