Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "155 patches. Subsystems affected by this patch series: mm (dax, debug, thp, readahead, page-poison, util, memory-hotplug, zram, cleanups), misc, core-kernel, get_maintainer, MAINTAINERS, lib, bitops, checkpatch, binfmt, ramfs, autofs, nilfs, rapidio, panic, relay, kgdb, ubsan, romfs, and fault-injection" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (155 commits) lib, uaccess: add failure injection to usercopy functions lib, include/linux: add usercopy failure capability ROMFS: support inode blocks calculation ubsan: introduce CONFIG_UBSAN_LOCAL_BOUNDS for Clang sched.h: drop in_ubsan field when UBSAN is in trap mode scripts/gdb/tasks: add headers and improve spacing format scripts/gdb/proc: add struct mount & struct super_block addr in lx-mounts command kernel/relay.c: drop unneeded initialization panic: dump registers on panic_on_warn rapidio: fix the missed put_device() for rio_mport_add_riodev rapidio: fix error handling path nilfs2: fix some kernel-doc warnings for nilfs2 autofs: harden ioctl table ramfs: fix nommu mmap with gaps in the page cache mm: remove the now-unnecessary mmget_still_valid() hack mm/gup: take mmap_lock in get_dump_page() binfmt_elf, binfmt_elf_fdpic: use a VMA list snapshot coredump: rework elf/elf_fdpic vma_dump_size() into common helper coredump: refactor page range dumping into common helper coredump: let dump_emit() bail out on short writes ...
This commit is contained in:
58
mm/filemap.c
58
mm/filemap.c
@@ -249,7 +249,7 @@ static void page_cache_free_page(struct address_space *mapping,
|
||||
freepage(page);
|
||||
|
||||
if (PageTransHuge(page) && !PageHuge(page)) {
|
||||
page_ref_sub(page, HPAGE_PMD_NR);
|
||||
page_ref_sub(page, thp_nr_pages(page));
|
||||
VM_BUG_ON_PAGE(page_count(page) <= 0, page);
|
||||
} else {
|
||||
put_page(page);
|
||||
@@ -829,13 +829,12 @@ EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
||||
|
||||
noinline int __add_to_page_cache_locked(struct page *page,
|
||||
struct address_space *mapping,
|
||||
pgoff_t offset, gfp_t gfp_mask,
|
||||
pgoff_t offset, gfp_t gfp,
|
||||
void **shadowp)
|
||||
{
|
||||
XA_STATE(xas, &mapping->i_pages, offset);
|
||||
int huge = PageHuge(page);
|
||||
int error;
|
||||
void *old;
|
||||
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
|
||||
@@ -846,25 +845,46 @@ noinline int __add_to_page_cache_locked(struct page *page,
|
||||
page->index = offset;
|
||||
|
||||
if (!huge) {
|
||||
error = mem_cgroup_charge(page, current->mm, gfp_mask);
|
||||
error = mem_cgroup_charge(page, current->mm, gfp);
|
||||
if (error)
|
||||
goto error;
|
||||
}
|
||||
|
||||
gfp &= GFP_RECLAIM_MASK;
|
||||
|
||||
do {
|
||||
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
|
||||
void *entry, *old = NULL;
|
||||
|
||||
if (order > thp_order(page))
|
||||
xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
|
||||
order, gfp);
|
||||
xas_lock_irq(&xas);
|
||||
old = xas_load(&xas);
|
||||
if (old && !xa_is_value(old))
|
||||
xas_set_err(&xas, -EEXIST);
|
||||
xas_for_each_conflict(&xas, entry) {
|
||||
old = entry;
|
||||
if (!xa_is_value(entry)) {
|
||||
xas_set_err(&xas, -EEXIST);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (old) {
|
||||
if (shadowp)
|
||||
*shadowp = old;
|
||||
/* entry may have been split before we acquired lock */
|
||||
order = xa_get_order(xas.xa, xas.xa_index);
|
||||
if (order > thp_order(page)) {
|
||||
xas_split(&xas, old, order);
|
||||
xas_reset(&xas);
|
||||
}
|
||||
}
|
||||
|
||||
xas_store(&xas, page);
|
||||
if (xas_error(&xas))
|
||||
goto unlock;
|
||||
|
||||
if (xa_is_value(old)) {
|
||||
if (old)
|
||||
mapping->nrexceptional--;
|
||||
if (shadowp)
|
||||
*shadowp = old;
|
||||
}
|
||||
mapping->nrpages++;
|
||||
|
||||
/* hugetlb pages do not participate in page cache accounting */
|
||||
@@ -872,7 +892,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
|
||||
__inc_lruvec_page_state(page, NR_FILE_PAGES);
|
||||
unlock:
|
||||
xas_unlock_irq(&xas);
|
||||
} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
|
||||
} while (xas_nomem(&xas, gfp));
|
||||
|
||||
if (xas_error(&xas)) {
|
||||
error = xas_error(&xas);
|
||||
@@ -1425,7 +1445,7 @@ static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem
|
||||
* unlock_page - unlock a locked page
|
||||
* @page: the page
|
||||
*
|
||||
* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
|
||||
* Unlocks the page and wakes up sleepers in wait_on_page_locked().
|
||||
* Also wakes sleepers in wait_on_page_writeback() because the wakeup
|
||||
* mechanism between PageLocked pages and PageWriteback pages is shared.
|
||||
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
|
||||
@@ -2568,8 +2588,8 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
struct file *file = vmf->vma->vm_file;
|
||||
struct file_ra_state *ra = &file->f_ra;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff);
|
||||
struct file *fpin = NULL;
|
||||
pgoff_t offset = vmf->pgoff;
|
||||
unsigned int mmap_miss;
|
||||
|
||||
/* If we don't want any read-ahead, don't bother */
|
||||
@@ -2580,8 +2600,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
|
||||
if (vmf->vma->vm_flags & VM_SEQ_READ) {
|
||||
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
||||
page_cache_sync_readahead(mapping, ra, file, offset,
|
||||
ra->ra_pages);
|
||||
page_cache_sync_ra(&ractl, ra, ra->ra_pages);
|
||||
return fpin;
|
||||
}
|
||||
|
||||
@@ -2601,10 +2620,11 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
* mmap read-around
|
||||
*/
|
||||
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
||||
ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
|
||||
ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
|
||||
ra->size = ra->ra_pages;
|
||||
ra->async_size = ra->ra_pages / 4;
|
||||
ra_submit(ra, mapping, file);
|
||||
ractl._index = ra->start;
|
||||
do_page_cache_ra(&ractl, ra->size, ra->async_size);
|
||||
return fpin;
|
||||
}
|
||||
|
||||
@@ -2984,7 +3004,7 @@ filler:
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Page is not up to date and may be locked due one of the following
|
||||
* Page is not up to date and may be locked due to one of the following
|
||||
* case a: Page is being filled and the page lock is held
|
||||
* case b: Read/write error clearing the page uptodate status
|
||||
* case c: Truncation in progress (page locked)
|
||||
|
Reference in New Issue
Block a user