Merge 5d30bcacd9 ("Merge tag '9p-for-5.7-2' of git://github.com/martinetd/linux") into android-mainline

Baby steps on the way to 5.7-rc1

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I89095a90046a14eab189aab257a75b3dfdb5b1db
This commit is contained in:
Greg Kroah-Hartman
2020-04-10 11:53:08 +02:00
611 changed files with 27588 additions and 10137 deletions

View File

@@ -410,7 +410,7 @@ static bool shmem_confirm_swap(struct address_space *mapping,
#define SHMEM_HUGE_DENY (-1)
#define SHMEM_HUGE_FORCE (-2)
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* ifdef here to avoid bloating shmem.o when not necessary */
static int shmem_huge __read_mostly;
@@ -580,7 +580,7 @@ static long shmem_unused_huge_count(struct super_block *sb,
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
return READ_ONCE(sbinfo->shrinklist_len);
}
#else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
#define shmem_huge SHMEM_HUGE_DENY
@@ -589,11 +589,11 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
{
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
{
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
(shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
shmem_huge != SHMEM_HUGE_DENY)
return true;
@@ -788,6 +788,32 @@ void shmem_unlock_mapping(struct address_space *mapping)
}
}
/*
* Check whether a hole-punch or truncation needs to split a huge page,
* returning true if no split was required, or the split has been successful.
*
* Eviction (or truncation to 0 size) should never need to split a huge page;
* but in rare cases might do so, if shmem_undo_range() failed to trylock on
* head, and then succeeded to trylock on tail.
*
* A split can only succeed when there are no additional references on the
* huge page: so the split below relies upon find_get_entries() having stopped
* when it found a subpage of the huge page, without getting further references.
*/
static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
{
if (!PageTransCompound(page))
return true;
/* Just proceed to delete a huge page wholly within the range punched */
if (PageHead(page) &&
page->index >= start && page->index + HPAGE_PMD_NR <= end)
return true;
/* Try to split huge page, so we can truly punch the hole or truncate */
return split_huge_page(page) >= 0;
}
/*
* Remove range of pages and swap entries from page cache, and free them.
* If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
@@ -838,31 +864,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (!trylock_page(page))
continue;
if (PageTransTail(page)) {
/* Middle of THP: zero out the page */
clear_highpage(page);
unlock_page(page);
continue;
} else if (PageTransHuge(page)) {
if (index == round_down(end, HPAGE_PMD_NR)) {
/*
* Range ends in the middle of THP:
* zero out the page
*/
clear_highpage(page);
unlock_page(page);
continue;
}
index += HPAGE_PMD_NR - 1;
i += HPAGE_PMD_NR - 1;
}
if (!unfalloc || !PageUptodate(page)) {
VM_BUG_ON_PAGE(PageTail(page), page);
if (page_mapping(page) == mapping) {
VM_BUG_ON_PAGE(PageWriteback(page), page);
if ((!unfalloc || !PageUptodate(page)) &&
page_mapping(page) == mapping) {
VM_BUG_ON_PAGE(PageWriteback(page), page);
if (shmem_punch_compound(page, start, end))
truncate_inode_page(mapping, page);
}
}
unlock_page(page);
}
@@ -936,43 +942,25 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
lock_page(page);
if (PageTransTail(page)) {
/* Middle of THP: zero out the page */
clear_highpage(page);
unlock_page(page);
/*
* Partial thp truncate due 'start' in middle
* of THP: don't need to look on these pages
* again on !pvec.nr restart.
*/
if (index != round_down(end, HPAGE_PMD_NR))
start++;
continue;
} else if (PageTransHuge(page)) {
if (index == round_down(end, HPAGE_PMD_NR)) {
/*
* Range ends in the middle of THP:
* zero out the page
*/
clear_highpage(page);
unlock_page(page);
continue;
}
index += HPAGE_PMD_NR - 1;
i += HPAGE_PMD_NR - 1;
}
if (!unfalloc || !PageUptodate(page)) {
VM_BUG_ON_PAGE(PageTail(page), page);
if (page_mapping(page) == mapping) {
VM_BUG_ON_PAGE(PageWriteback(page), page);
truncate_inode_page(mapping, page);
} else {
if (page_mapping(page) != mapping) {
/* Page was replaced by swap: retry */
unlock_page(page);
index--;
break;
}
VM_BUG_ON_PAGE(PageWriteback(page), page);
if (shmem_punch_compound(page, start, end))
truncate_inode_page(mapping, page);
else {
/* Wipe the page and don't get stuck */
clear_highpage(page);
flush_dcache_page(page);
set_page_dirty(page);
if (index <
round_up(start, HPAGE_PMD_NR))
start = index + 1;
}
}
unlock_page(page);
}
@@ -1059,7 +1047,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
* Part of the huge page can be beyond i_size: subject
* to shrink under memory pressure.
*/
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
spin_lock(&sbinfo->shrinklist_lock);
/*
* _careful to defend against unlocked access to
@@ -1472,9 +1460,6 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
pgoff_t hindex;
struct page *page;
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
return NULL;
hindex = round_down(index, HPAGE_PMD_NR);
if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
XA_PRESENT))
@@ -1486,6 +1471,8 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
shmem_pseudo_vma_destroy(&pvma);
if (page)
prep_transhuge_page(page);
else
count_vm_event(THP_FILE_FALLBACK);
return page;
}
@@ -1511,7 +1498,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
int nr;
int err = -ENOSPC;
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
huge = false;
nr = huge ? HPAGE_PMD_NR : 1;
@@ -1813,17 +1800,20 @@ repeat:
if (shmem_huge == SHMEM_HUGE_FORCE)
goto alloc_huge;
switch (sbinfo->huge) {
loff_t i_size;
pgoff_t off;
case SHMEM_HUGE_NEVER:
goto alloc_nohuge;
case SHMEM_HUGE_WITHIN_SIZE:
case SHMEM_HUGE_WITHIN_SIZE: {
loff_t i_size;
pgoff_t off;
off = round_up(index, HPAGE_PMD_NR);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
if (i_size >= HPAGE_PMD_SIZE &&
i_size >> PAGE_SHIFT >= off)
goto alloc_huge;
/* fallthrough */
fallthrough;
}
case SHMEM_HUGE_ADVISE:
if (sgp_huge == SGP_HUGE)
goto alloc_huge;
@@ -1871,8 +1861,13 @@ alloc_nohuge:
error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
PageTransHuge(page));
if (error)
if (error) {
if (PageTransHuge(page)) {
count_vm_event(THP_FILE_FALLBACK);
count_vm_event(THP_FILE_FALLBACK_CHARGE);
}
goto unacct;
}
error = shmem_add_to_page_cache(page, mapping, hindex,
NULL, gfp & GFP_RECLAIM_MASK);
if (error) {
@@ -2089,7 +2084,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
get_area = current->mm->get_unmapped_area;
addr = get_area(file, uaddr, len, pgoff, flags);
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
return addr;
if (IS_ERR_VALUE(addr))
return addr;
@@ -2228,7 +2223,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
(vma->vm_end & HPAGE_PMD_MASK)) {
khugepaged_enter(vma, vma->vm_flags);
@@ -3113,12 +3108,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
error = security_inode_init_security(inode, dir, &dentry->d_name,
shmem_initxattrs, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
iput(inode);
return error;
}
error = 0;
if (error && error != -EOPNOTSUPP) {
iput(inode);
return error;
}
inode->i_size = len-1;
@@ -3456,7 +3448,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
case Opt_huge:
ctx->huge = result.uint_32;
if (ctx->huge != SHMEM_HUGE_NEVER &&
!(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
!(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
has_transparent_hugepage()))
goto unsupported_parameter;
ctx->seen |= SHMEM_SEEN_HUGE;
@@ -3602,7 +3594,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root)
if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
seq_printf(seq, ",gid=%u",
from_kgid_munged(&init_user_ns, sbinfo->gid));
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
if (sbinfo->huge)
seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
@@ -3847,7 +3839,7 @@ static const struct super_operations shmem_ops = {
.evict_inode = shmem_evict_inode,
.drop_inode = generic_delete_inode,
.put_super = shmem_put_super,
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
.nr_cached_objects = shmem_unused_huge_count,
.free_cached_objects = shmem_unused_huge_scan,
#endif
@@ -3909,7 +3901,7 @@ int __init shmem_init(void)
goto out1;
}
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
else
@@ -3925,7 +3917,7 @@ out2:
return error;
}
#if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
static ssize_t shmem_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -3977,9 +3969,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
struct kobj_attribute shmem_enabled_attr =
__ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool shmem_huge_enabled(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
@@ -4005,7 +3997,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
if (i_size >= HPAGE_PMD_SIZE &&
i_size >> PAGE_SHIFT >= off)
return true;
/* fall through */
fallthrough;
case SHMEM_HUGE_ADVISE:
/* TODO: implement fadvise() hints */
return (vma->vm_flags & VM_HUGEPAGE);
@@ -4014,7 +4006,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
return false;
}
}
#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#else /* !CONFIG_SHMEM */
@@ -4183,7 +4175,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
vma->vm_file = file;
vma->vm_ops = &shmem_vm_ops;
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
(vma->vm_end & HPAGE_PMD_MASK)) {
khugepaged_enter(vma, vma->vm_flags);