BACKPORT: FROMGIT: userfaultfd: support minor fault handling for shmem
Patch series "userfaultfd: support minor fault handling for shmem", v2. Overview ======== See my original series [1] for a detailed overview of minor fault handling in general. The feature in this series works exactly like the hugetblfs version (from userspace's perspective). I'm sending this as a separate series because: - The original minor fault handling series has a full set of R-Bs, and seems close to being merged. So, it seems reasonable to start looking at this next step, which extends the basic functionality. - shmem is different enough that this series may require some additional work before it's ready, and I don't want to delay the original series unnecessarily by bundling them together. Use Case ======== In some cases it is useful to have VM memory backed by tmpfs instead of hugetlbfs. So, this feature will be used to support the same VM live migration use case described in my original series. Additionally, Android folks (Lokesh Gidra <lokeshgidra@google.com>) hope to optimize the Android Runtime garbage collector using this feature: "The plan is to use userfaultfd for concurrently compacting the heap. With this feature, the heap can be shared-mapped at another location where the GC-thread(s) could continue the compaction operation without the need to invoke userfault ioctl(UFFDIO_COPY) each time. OTOH, if and when Java threads get faults on the heap, UFFDIO_CONTINUE can be used to resume execution. Furthermore, this feature enables updating references in the 'non-moving' portion of the heap efficiently. Without this feature, uneccessary page copying (ioctl(UFFDIO_COPY)) would be required." [1] https://lore.kernel.org/linux-fsdevel/20210301222728.176417-1-axelrasmussen@google.com/T/#t This patch (of 5): Modify the userfaultfd register API to allow registering shmem VMAs in minor mode. Modify the shmem mcopy implementation to support UFFDIO_CONTINUE in order to resolve such faults. Combine the shmem mcopy handler functions into a single shmem_mcopy_atomic_pte, which takes a mode parameter. This matches how the hugetlbfs implementation is structured, and lets us remove a good chunk of boilerplate. Link: https://lkml.kernel.org/r/20210302000133.272579-1-axelrasmussen@google.com Link: https://lkml.kernel.org/r/20210302000133.272579-2-axelrasmussen@google.com Signed-off-by: Axel Rasmussen <axelrasmussen@google.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Lokesh Gidra <lokeshgidra@google.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Peter Xu <peterx@redhat.com> Cc: Shaohua Li <shli@fb.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Wang Qing <wangqing@vivo.com> Cc: Brian Geffon <bgeffon@google.com> Cc: Cannon Matthews <cannonmatthews@google.com> Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Oliver Upton <oupton@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> (cherry picked from commit 4cc6e15679966aa49afc5b114c3c83ba0ac39b05 https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git akpm) Link: https://lore.kernel.org/patchwork/patch/1388146/ Conflicts: mm/shmem.c (1. Manual rebase 2. Enclosed shmem_copy_atomic_pte() with CONFIG_USERFAULTFD to avoid compile erros when USERFAULTFD is not enabled.) Signed-off-by: Lokesh Gidra <lokeshgidra@google.com> Bug: 160737021 Bug: 169683130 Change-Id: Idcd822b2a124a089121b9ad8c65061f6979126ec
This commit is contained in:

committed by
Todd Kjos

parent
357700fcc3
commit
d672123ec4
94
mm/shmem.c
94
mm/shmem.c
@@ -77,7 +77,6 @@ static struct vfsmount *shm_mnt;
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/fcntl.h>
|
||||
#include <uapi/linux/memfd.h>
|
||||
#include <linux/userfaultfd_k.h>
|
||||
#include <linux/rmap.h>
|
||||
#include <linux/uuid.h>
|
||||
|
||||
@@ -1787,8 +1786,8 @@ unlock:
|
||||
* vm. If we swap it in we mark it dirty since we also free the swap
|
||||
* entry since a page cannot live in both the swap and page cache.
|
||||
*
|
||||
* vmf and fault_type are only supplied by shmem_fault:
|
||||
* otherwise they are NULL.
|
||||
* vma, vmf, and fault_type are only supplied by shmem_fault: otherwise they
|
||||
* are NULL.
|
||||
*/
|
||||
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
|
||||
struct page **pagep, enum sgp_type sgp, gfp_t gfp,
|
||||
@@ -1830,6 +1829,12 @@ repeat:
|
||||
return error;
|
||||
}
|
||||
|
||||
if (page && vma && userfaultfd_minor(vma)) {
|
||||
unlock_page(page);
|
||||
*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (page)
|
||||
hindex = page->index;
|
||||
if (page && sgp == SGP_WRITE)
|
||||
@@ -2357,14 +2362,13 @@ bool shmem_mapping(struct address_space *mapping)
|
||||
return mapping->a_ops == &shmem_aops;
|
||||
}
|
||||
|
||||
static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
bool zeropage,
|
||||
struct page **pagep)
|
||||
#ifdef CONFIG_USERFAULTFD
|
||||
int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr, unsigned long src_addr,
|
||||
enum mcopy_atomic_mode mode, struct page **pagep)
|
||||
{
|
||||
bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
|
||||
struct inode *inode = file_inode(dst_vma->vm_file);
|
||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
@@ -2381,12 +2385,17 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
if (!shmem_inode_acct_block(inode, 1))
|
||||
goto out;
|
||||
|
||||
if (!*pagep) {
|
||||
if (is_continue) {
|
||||
ret = -EFAULT;
|
||||
page = find_lock_page(mapping, pgoff);
|
||||
if (!page)
|
||||
goto out_unacct_blocks;
|
||||
} else if (!*pagep) {
|
||||
page = shmem_alloc_page(gfp, info, pgoff);
|
||||
if (!page)
|
||||
goto out_unacct_blocks;
|
||||
|
||||
if (!zeropage) { /* mcopy_atomic */
|
||||
if (mode == MCOPY_ATOMIC_NORMAL) { /* mcopy_atomic */
|
||||
page_kaddr = kmap_atomic(page);
|
||||
ret = copy_from_user(page_kaddr,
|
||||
(const void __user *)src_addr,
|
||||
@@ -2400,7 +2409,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
/* don't free the page */
|
||||
return -ENOENT;
|
||||
}
|
||||
} else { /* mfill_zeropage_atomic */
|
||||
} else { /* zeropage */
|
||||
clear_highpage(page);
|
||||
}
|
||||
} else {
|
||||
@@ -2408,10 +2417,13 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
*pagep = NULL;
|
||||
}
|
||||
|
||||
VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
|
||||
__SetPageLocked(page);
|
||||
__SetPageSwapBacked(page);
|
||||
__SetPageUptodate(page);
|
||||
if (!is_continue) {
|
||||
VM_BUG_ON(PageSwapBacked(page));
|
||||
VM_BUG_ON(PageLocked(page));
|
||||
__SetPageLocked(page);
|
||||
__SetPageSwapBacked(page);
|
||||
__SetPageUptodate(page);
|
||||
}
|
||||
|
||||
ret = -EFAULT;
|
||||
offset = linear_page_index(dst_vma, dst_addr);
|
||||
@@ -2419,10 +2431,13 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
if (unlikely(offset >= max_off))
|
||||
goto out_release;
|
||||
|
||||
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
|
||||
gfp & GFP_RECLAIM_MASK, dst_mm);
|
||||
if (ret)
|
||||
goto out_release;
|
||||
/* If page wasn't already in the page cache, add it. */
|
||||
if (!is_continue) {
|
||||
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
|
||||
gfp & GFP_RECLAIM_MASK, dst_mm);
|
||||
if (ret)
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
|
||||
if (dst_vma->vm_flags & VM_WRITE)
|
||||
@@ -2449,13 +2464,15 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
if (!pte_none(*dst_pte))
|
||||
goto out_release_unlock;
|
||||
|
||||
lru_cache_add(page);
|
||||
if (!is_continue) {
|
||||
lru_cache_add(page);
|
||||
|
||||
spin_lock_irq(&info->lock);
|
||||
info->alloced++;
|
||||
inode->i_blocks += BLOCKS_PER_PAGE;
|
||||
shmem_recalc_inode(inode);
|
||||
spin_unlock_irq(&info->lock);
|
||||
spin_lock_irq(&info->lock);
|
||||
info->alloced++;
|
||||
inode->i_blocks += BLOCKS_PER_PAGE;
|
||||
shmem_recalc_inode(inode);
|
||||
spin_unlock_irq(&info->lock);
|
||||
}
|
||||
|
||||
inc_mm_counter(dst_mm, mm_counter_file(page));
|
||||
page_add_file_rmap(page, false);
|
||||
@@ -2479,28 +2496,7 @@ out_unacct_blocks:
|
||||
shmem_inode_unacct_blocks(inode, 1);
|
||||
goto out;
|
||||
}
|
||||
|
||||
int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
||||
pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
struct page **pagep)
|
||||
{
|
||||
return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
|
||||
dst_addr, src_addr, false, pagep);
|
||||
}
|
||||
|
||||
int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
|
||||
pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
|
||||
return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
|
||||
dst_addr, 0, true, &page);
|
||||
}
|
||||
#endif /* CONFIG_USERFAULTFD */
|
||||
|
||||
#ifdef CONFIG_TMPFS
|
||||
static const struct inode_operations shmem_symlink_inode_operations;
|
||||
|
Reference in New Issue
Block a user