Revert "BACKPORT: FROMGIT: userfaultfd: support minor fault handling for shmem"

This reverts commit d672123ec4 as an
updated version of the patch-set will be merged later.

Signed-off-by: Lokesh Gidra <lokeshgidra@google.com>
Bug: 187930641
Change-Id: I765fe86a2dc0305482a0590c14143dee27840b8a
This commit is contained in:
Lokesh Gidra
2021-05-13 05:11:10 -07:00
committed by Suren Baghdasaryan
parent 13e14ca0e0
commit 84330a5f50
6 changed files with 84 additions and 81 deletions

View File

@@ -77,6 +77,7 @@ static struct vfsmount *shm_mnt;
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <uapi/linux/memfd.h>
#include <linux/userfaultfd_k.h>
#include <linux/rmap.h>
#include <linux/uuid.h>
@@ -1786,8 +1787,8 @@ unlock:
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache.
*
* vma, vmf, and fault_type are only supplied by shmem_fault: otherwise they
* are NULL.
* vmf and fault_type are only supplied by shmem_fault:
* otherwise they are NULL.
*/
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, gfp_t gfp,
@@ -1829,12 +1830,6 @@ repeat:
return error;
}
if (page && vma && userfaultfd_minor(vma)) {
unlock_page(page);
*fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
return 0;
}
if (page)
hindex = page->index;
if (page && sgp == SGP_WRITE)
@@ -2348,13 +2343,14 @@ bool shmem_mapping(struct address_space *mapping)
return mapping->a_ops == &shmem_aops;
}
#ifdef CONFIG_USERFAULTFD
int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long src_addr,
enum mcopy_atomic_mode mode, struct page **pagep)
static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
bool zeropage,
struct page **pagep)
{
bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
struct inode *inode = file_inode(dst_vma->vm_file);
struct shmem_inode_info *info = SHMEM_I(inode);
struct address_space *mapping = inode->i_mapping;
@@ -2381,17 +2377,12 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
goto out;
}
if (is_continue) {
ret = -EFAULT;
page = find_lock_page(mapping, pgoff);
if (!page)
goto out_unacct_blocks;
} else if (!*pagep) {
if (!*pagep) {
page = shmem_alloc_page(gfp, info, pgoff);
if (!page)
goto out_unacct_blocks;
if (mode == MCOPY_ATOMIC_NORMAL) { /* mcopy_atomic */
if (!zeropage) { /* mcopy_atomic */
page_kaddr = kmap_atomic(page);
ret = copy_from_user(page_kaddr,
(const void __user *)src_addr,
@@ -2405,7 +2396,7 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
/* don't free the page */
return -ENOENT;
}
} else { /* zeropage */
} else { /* mfill_zeropage_atomic */
clear_highpage(page);
}
} else {
@@ -2413,13 +2404,10 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
*pagep = NULL;
}
if (!is_continue) {
VM_BUG_ON(PageSwapBacked(page));
VM_BUG_ON(PageLocked(page));
__SetPageLocked(page);
__SetPageSwapBacked(page);
__SetPageUptodate(page);
}
VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
__SetPageLocked(page);
__SetPageSwapBacked(page);
__SetPageUptodate(page);
ret = -EFAULT;
offset = linear_page_index(dst_vma, dst_addr);
@@ -2427,13 +2415,10 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
if (unlikely(offset >= max_off))
goto out_release;
/* If page wasn't already in the page cache, add it. */
if (!is_continue) {
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
gfp & GFP_RECLAIM_MASK, dst_mm);
if (ret)
goto out_release;
}
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
gfp & GFP_RECLAIM_MASK, dst_mm);
if (ret)
goto out_release;
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
if (dst_vma->vm_flags & VM_WRITE)
@@ -2460,15 +2445,13 @@ int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
if (!pte_none(*dst_pte))
goto out_release_unlock;
if (!is_continue) {
lru_cache_add(page);
lru_cache_add(page);
spin_lock_irq(&info->lock);
info->alloced++;
inode->i_blocks += BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock);
}
spin_lock_irq(&info->lock);
info->alloced++;
inode->i_blocks += BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock);
inc_mm_counter(dst_mm, mm_counter_file(page));
page_add_file_rmap(page, false);
@@ -2492,7 +2475,28 @@ out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1);
goto out;
}
#endif /* CONFIG_USERFAULTFD */
int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
struct page **pagep)
{
return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_addr, src_addr, false, pagep);
}
int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr)
{
struct page *page = NULL;
return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_addr, 0, true, &page);
}
#ifdef CONFIG_TMPFS
static const struct inode_operations shmem_symlink_inode_operations;