Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - procfs updates - various misc bits - lib/ updates - epoll updates - autofs - fatfs - a few more MM bits * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (58 commits) mm/page_io.c: fix polled swap page in checkpatch: add Co-developed-by to signature tags docs: fix Co-Developed-by docs drivers/base/platform.c: kmemleak ignore a known leak fs: don't open code lru_to_page() fs/: remove caller signal_pending branch predictions mm/: remove caller signal_pending branch predictions arch/arc/mm/fault.c: remove caller signal_pending_branch predictions kernel/sched/: remove caller signal_pending branch predictions kernel/locking/mutex.c: remove caller signal_pending branch predictions mm: select HAVE_MOVE_PMD on x86 for faster mremap mm: speed up mremap by 20x on large regions mm: treewide: remove unused address argument from pte_alloc functions initramfs: cleanup incomplete rootfs scripts/gdb: fix lx-version string output kernel/kcov.c: mark write_comp_data() as notrace kernel/sysctl: add panic_print into sysctl panic: add options to print system info when panic happens bfs: extra sanity checking and static inode bitmap exec: separate MM_ANONPAGES and RLIMIT_STACK accounting ...
This commit is contained in:
@@ -1125,7 +1125,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(signal_pending_state(state, current))) {
|
||||
if (signal_pending_state(state, current)) {
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
|
2
mm/gup.c
2
mm/gup.c
@@ -727,7 +727,7 @@ retry:
|
||||
* If we have a pending SIGKILL, don't keep faulting pages and
|
||||
* potentially allocating memory.
|
||||
*/
|
||||
if (unlikely(fatal_signal_pending(current))) {
|
||||
if (fatal_signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
goto out;
|
||||
}
|
||||
|
@@ -568,7 +568,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
|
||||
pgtable = pte_alloc_one(vma->vm_mm, haddr);
|
||||
pgtable = pte_alloc_one(vma->vm_mm);
|
||||
if (unlikely(!pgtable)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
goto release;
|
||||
@@ -702,7 +702,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
|
||||
struct page *zero_page;
|
||||
bool set;
|
||||
vm_fault_t ret;
|
||||
pgtable = pte_alloc_one(vma->vm_mm, haddr);
|
||||
pgtable = pte_alloc_one(vma->vm_mm);
|
||||
if (unlikely(!pgtable))
|
||||
return VM_FAULT_OOM;
|
||||
zero_page = mm_get_huge_zero_page(vma->vm_mm);
|
||||
@@ -791,7 +791,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
if (arch_needs_pgtable_deposit()) {
|
||||
pgtable = pte_alloc_one(vma->vm_mm, addr);
|
||||
pgtable = pte_alloc_one(vma->vm_mm);
|
||||
if (!pgtable)
|
||||
return VM_FAULT_OOM;
|
||||
}
|
||||
@@ -927,7 +927,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
if (!vma_is_anonymous(vma))
|
||||
return 0;
|
||||
|
||||
pgtable = pte_alloc_one(dst_mm, addr);
|
||||
pgtable = pte_alloc_one(dst_mm);
|
||||
if (unlikely(!pgtable))
|
||||
goto out;
|
||||
|
||||
|
@@ -4231,7 +4231,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
* If we have a pending SIGKILL, don't keep faulting pages and
|
||||
* potentially allocating memory.
|
||||
*/
|
||||
if (unlikely(fatal_signal_pending(current))) {
|
||||
if (fatal_signal_pending(current)) {
|
||||
remainder = 0;
|
||||
break;
|
||||
}
|
||||
|
@@ -123,7 +123,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
|
||||
pte_t *p;
|
||||
|
||||
if (slab_is_available())
|
||||
p = pte_alloc_one_kernel(&init_mm, addr);
|
||||
p = pte_alloc_one_kernel(&init_mm);
|
||||
else
|
||||
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
|
||||
if (!p)
|
||||
|
17
mm/memory.c
17
mm/memory.c
@@ -400,10 +400,10 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
}
|
||||
}
|
||||
|
||||
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
|
||||
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
pgtable_t new = pte_alloc_one(mm, address);
|
||||
pgtable_t new = pte_alloc_one(mm);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -434,9 +434,9 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
|
||||
int __pte_alloc_kernel(pmd_t *pmd)
|
||||
{
|
||||
pte_t *new = pte_alloc_one_kernel(&init_mm, address);
|
||||
pte_t *new = pte_alloc_one_kernel(&init_mm);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -2896,7 +2896,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
|
||||
*
|
||||
* Here we only have down_read(mmap_sem).
|
||||
*/
|
||||
if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
|
||||
if (pte_alloc(vma->vm_mm, vmf->pmd))
|
||||
return VM_FAULT_OOM;
|
||||
|
||||
/* See the comment in pte_alloc_one_map() */
|
||||
@@ -3043,7 +3043,7 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
|
||||
pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
|
||||
spin_unlock(vmf->ptl);
|
||||
vmf->prealloc_pte = NULL;
|
||||
} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
|
||||
} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
|
||||
return VM_FAULT_OOM;
|
||||
}
|
||||
map_pte:
|
||||
@@ -3122,7 +3122,7 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
|
||||
* related to pte entry. Use the preallocated table for that.
|
||||
*/
|
||||
if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
|
||||
vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
|
||||
vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
|
||||
if (!vmf->prealloc_pte)
|
||||
return VM_FAULT_OOM;
|
||||
smp_wmb(); /* See comment in __pte_alloc() */
|
||||
@@ -3360,8 +3360,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
|
||||
start_pgoff + nr_pages - 1);
|
||||
|
||||
if (pmd_none(*vmf->pmd)) {
|
||||
vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
|
||||
vmf->address);
|
||||
vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
|
||||
if (!vmf->prealloc_pte)
|
||||
goto out;
|
||||
smp_wmb(); /* See comment in __pte_alloc() */
|
||||
|
@@ -2636,7 +2636,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
|
||||
*
|
||||
* Here we only have down_read(mmap_sem).
|
||||
*/
|
||||
if (pte_alloc(mm, pmdp, addr))
|
||||
if (pte_alloc(mm, pmdp))
|
||||
goto abort;
|
||||
|
||||
/* See the comment in pte_alloc_one_map() */
|
||||
|
66
mm/mremap.c
66
mm/mremap.c
@@ -191,6 +191,52 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
drop_rmap_locks(vma);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_MOVE_PMD
|
||||
static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, unsigned long old_end,
|
||||
pmd_t *old_pmd, pmd_t *new_pmd)
|
||||
{
|
||||
spinlock_t *old_ptl, *new_ptl;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pmd_t pmd;
|
||||
|
||||
if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)
|
||||
|| old_end - old_addr < PMD_SIZE)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The destination pmd shouldn't be established, free_pgtables()
|
||||
* should have release it.
|
||||
*/
|
||||
if (WARN_ON(!pmd_none(*new_pmd)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We don't have to worry about the ordering of src and dst
|
||||
* ptlocks because exclusive mmap_sem prevents deadlock.
|
||||
*/
|
||||
old_ptl = pmd_lock(vma->vm_mm, old_pmd);
|
||||
new_ptl = pmd_lockptr(mm, new_pmd);
|
||||
if (new_ptl != old_ptl)
|
||||
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
||||
|
||||
/* Clear the pmd */
|
||||
pmd = *old_pmd;
|
||||
pmd_clear(old_pmd);
|
||||
|
||||
VM_BUG_ON(!pmd_none(*new_pmd));
|
||||
|
||||
/* Set the new pmd */
|
||||
set_pmd_at(mm, new_addr, new_pmd, pmd);
|
||||
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
|
||||
if (new_ptl != old_ptl)
|
||||
spin_unlock(new_ptl);
|
||||
spin_unlock(old_ptl);
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
unsigned long old_addr, struct vm_area_struct *new_vma,
|
||||
unsigned long new_addr, unsigned long len,
|
||||
@@ -235,8 +281,26 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
split_huge_pmd(vma, old_pmd, old_addr);
|
||||
if (pmd_trans_unstable(old_pmd))
|
||||
continue;
|
||||
} else if (extent == PMD_SIZE) {
|
||||
#ifdef CONFIG_HAVE_MOVE_PMD
|
||||
/*
|
||||
* If the extent is PMD-sized, try to speed the move by
|
||||
* moving at the PMD level if possible.
|
||||
*/
|
||||
bool moved;
|
||||
|
||||
if (need_rmap_locks)
|
||||
take_rmap_locks(vma);
|
||||
moved = move_normal_pmd(vma, old_addr, new_addr,
|
||||
old_end, old_pmd, new_pmd);
|
||||
if (need_rmap_locks)
|
||||
drop_rmap_locks(vma);
|
||||
if (moved)
|
||||
continue;
|
||||
#endif
|
||||
}
|
||||
if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
|
||||
|
||||
if (pte_alloc(new_vma->vm_mm, new_pmd))
|
||||
break;
|
||||
next = (new_addr + PMD_SIZE) & PMD_MASK;
|
||||
if (extent > next - new_addr)
|
||||
|
@@ -401,6 +401,8 @@ int swap_readpage(struct page *page, bool synchronous)
|
||||
get_task_struct(current);
|
||||
bio->bi_private = current;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
||||
if (synchronous)
|
||||
bio->bi_opf |= REQ_HIPRI;
|
||||
count_vm_event(PSWPIN);
|
||||
bio_get(bio);
|
||||
qc = submit_bio(bio);
|
||||
@@ -410,7 +412,7 @@ int swap_readpage(struct page *page, bool synchronous)
|
||||
break;
|
||||
|
||||
if (!blk_poll(disk->queue, qc, true))
|
||||
break;
|
||||
io_schedule();
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
bio_put(bio);
|
||||
|
@@ -126,7 +126,7 @@ void put_pages_list(struct list_head *pages)
|
||||
while (!list_empty(pages)) {
|
||||
struct page *victim;
|
||||
|
||||
victim = list_entry(pages->prev, struct page, lru);
|
||||
victim = lru_to_page(pages);
|
||||
list_del(&victim->lru);
|
||||
put_page(victim);
|
||||
}
|
||||
|
@@ -550,7 +550,7 @@ retry:
|
||||
break;
|
||||
}
|
||||
if (unlikely(pmd_none(dst_pmdval)) &&
|
||||
unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
|
||||
unlikely(__pte_alloc(dst_mm, dst_pmd))) {
|
||||
err = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
Reference in New Issue
Block a user