Merge ad57a1022f
("Merge tag 'exfat-for-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linkinjeon/exfat") into android-mainline
Steps on the way to 5.8-rc1. Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I4bc42f572167ea2f815688b4d1eb6124b6d260d4
This commit is contained in:
@@ -224,7 +224,7 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
|
||||
* handle an empty nodemask with MPOL_PREFERRED here.
|
||||
*
|
||||
* Must be called holding task's alloc_lock to protect task's mems_allowed
|
||||
* and mempolicy. May also be called holding the mmap_semaphore for write.
|
||||
* and mempolicy. May also be called holding the mmap_lock for write.
|
||||
*/
|
||||
static int mpol_set_nodemask(struct mempolicy *pol,
|
||||
const nodemask_t *nodes, struct nodemask_scratch *nsc)
|
||||
@@ -368,7 +368,7 @@ static void mpol_rebind_preferred(struct mempolicy *pol,
|
||||
/*
|
||||
* mpol_rebind_policy - Migrate a policy to a different set of nodes
|
||||
*
|
||||
* Per-vma policies are protected by mmap_sem. Allocations using per-task
|
||||
* Per-vma policies are protected by mmap_lock. Allocations using per-task
|
||||
* policies are protected by task->mems_allowed_seq to prevent a premature
|
||||
* OOM/allocation failure due to parallel nodemask modification.
|
||||
*/
|
||||
@@ -398,17 +398,17 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
|
||||
/*
|
||||
* Rebind each vma in mm to new nodemask.
|
||||
*
|
||||
* Call holding a reference to mm. Takes mm->mmap_sem during call.
|
||||
* Call holding a reference to mm. Takes mm->mmap_lock during call.
|
||||
*/
|
||||
|
||||
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
mmap_write_lock(mm);
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next)
|
||||
mpol_rebind_policy(vma->vm_policy, new);
|
||||
up_write(&mm->mmap_sem);
|
||||
mmap_write_unlock(mm);
|
||||
}
|
||||
|
||||
static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
|
||||
@@ -764,7 +764,7 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
||||
|
||||
/*
|
||||
* Apply policy to a single VMA
|
||||
* This must be called with the mmap_sem held for writing.
|
||||
* This must be called with the mmap_lock held for writing.
|
||||
*/
|
||||
static int vma_replace_policy(struct vm_area_struct *vma,
|
||||
struct mempolicy *pol)
|
||||
@@ -789,7 +789,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
old = vma->vm_policy;
|
||||
vma->vm_policy = new; /* protected by mmap_sem */
|
||||
vma->vm_policy = new; /* protected by mmap_lock */
|
||||
mpol_put(old);
|
||||
|
||||
return 0;
|
||||
@@ -933,7 +933,7 @@ static int lookup_node(struct mm_struct *mm, unsigned long addr)
|
||||
put_page(p);
|
||||
}
|
||||
if (locked)
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -966,10 +966,10 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
|
||||
* vma/shared policy at addr is NULL. We
|
||||
* want to return MPOL_DEFAULT in this case.
|
||||
*/
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
vma = find_vma_intersection(mm, addr, addr+1);
|
||||
if (!vma) {
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (vma->vm_ops && vma->vm_ops->get_policy)
|
||||
@@ -986,7 +986,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
|
||||
if (flags & MPOL_F_ADDR) {
|
||||
/*
|
||||
* Take a refcount on the mpol, lookup_node()
|
||||
* wil drop the mmap_sem, so after calling
|
||||
* wil drop the mmap_lock, so after calling
|
||||
* lookup_node() only "pol" remains valid, "vma"
|
||||
* is stale.
|
||||
*/
|
||||
@@ -1028,7 +1028,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
|
||||
out:
|
||||
mpol_cond_put(pol);
|
||||
if (vma)
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
if (pol_refcount)
|
||||
mpol_put(pol_refcount);
|
||||
return err;
|
||||
@@ -1137,7 +1137,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
|
||||
/*
|
||||
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
|
||||
@@ -1218,7 +1218,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
|
||||
if (err < 0)
|
||||
break;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
if (err < 0)
|
||||
return err;
|
||||
return busy;
|
||||
@@ -1341,12 +1341,12 @@ static long do_mbind(unsigned long start, unsigned long len,
|
||||
{
|
||||
NODEMASK_SCRATCH(scratch);
|
||||
if (scratch) {
|
||||
down_write(&mm->mmap_sem);
|
||||
mmap_write_lock(mm);
|
||||
task_lock(current);
|
||||
err = mpol_set_nodemask(new, nmask, scratch);
|
||||
task_unlock(current);
|
||||
if (err)
|
||||
up_write(&mm->mmap_sem);
|
||||
mmap_write_unlock(mm);
|
||||
} else
|
||||
err = -ENOMEM;
|
||||
NODEMASK_SCRATCH_FREE(scratch);
|
||||
@@ -1383,7 +1383,7 @@ up_out:
|
||||
putback_movable_pages(&pagelist);
|
||||
}
|
||||
|
||||
up_write(&mm->mmap_sem);
|
||||
mmap_write_unlock(mm);
|
||||
mpol_out:
|
||||
mpol_put(new);
|
||||
return err;
|
||||
@@ -2186,7 +2186,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
|
||||
*
|
||||
* This function allocates a page from the kernel page pool and applies
|
||||
* a NUMA policy associated with the VMA or the current process.
|
||||
* When VMA is not NULL caller must hold down_read on the mmap_sem of the
|
||||
* When VMA is not NULL caller must read-lock the mmap_lock of the
|
||||
* mm_struct of the VMA to prevent it from going away. Should be used for
|
||||
* all allocations for pages that will be mapped into user space. Returns
|
||||
* NULL when no page can be allocated.
|
||||
|
Reference in New Issue
Block a user