ANDROID: mm: allow limited speculative page faulting in do_swap_page()

Speculative page handling was disabled in do_swap_page() because it was
unsafe to call migration_entry_wait(). Another calls which are not safe
without taking mmap_lock are ksm_might_need_to_copy() because it relies
on the VMA being stable and readahead. However if we avoid these cases,
the rest seems to be safe. Relax the check to avoid only these unsafe
cases and allow speculation otherwise.

Bug: 322762567
Change-Id: Ic1fda0a5549088d5f37004dbacf3193116a5f868
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
Suren Baghdasaryan
2024-02-08 13:45:31 -08:00
parent febebd3d31
commit d47a714fa7

View File

@@ -3623,8 +3623,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
void *shadow = NULL; void *shadow = NULL;
if (vmf->flags & FAULT_FLAG_SPECULATIVE) { if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
/* ksm_might_need_to_copy() needs a stable VMA, spf can't be used */
#ifdef CONFIG_KSM
pte_unmap(vmf->pte); pte_unmap(vmf->pte);
return VM_FAULT_RETRY; return VM_FAULT_RETRY;
#endif
} }
ret = pte_unmap_same(vmf); ret = pte_unmap_same(vmf);
@@ -3641,6 +3644,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
entry = pte_to_swp_entry(vmf->orig_pte); entry = pte_to_swp_entry(vmf->orig_pte);
if (unlikely(non_swap_entry(entry))) { if (unlikely(non_swap_entry(entry))) {
if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
ret = VM_FAULT_RETRY;
goto out;
}
if (is_migration_entry(entry)) { if (is_migration_entry(entry)) {
migration_entry_wait(vma->vm_mm, vmf->pmd, migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address); vmf->address);