mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
This change converts the existing mmap_sem rwsem calls to use the new mmap locking API instead. The change is generated using coccinelle with the following rule: // spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir . @@ expression mm; @@ ( -init_rwsem +mmap_init_lock | -down_write +mmap_write_lock | -down_write_killable +mmap_write_lock_killable | -down_write_trylock +mmap_write_trylock | -up_write +mmap_write_unlock | -downgrade_write +mmap_write_downgrade | -down_read +mmap_read_lock | -down_read_killable +mmap_read_lock_killable | -down_read_trylock +mmap_read_trylock | -up_read +mmap_read_unlock ) -(&mm->mmap_sem) +(mm) Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
0adf65f53a
commit
d8ed45c5dc
@@ -195,7 +195,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
|
||||
if (!from_user && address >= PAGE_OFFSET)
|
||||
goto bad_area;
|
||||
@@ -271,7 +271,7 @@ good_area:
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -279,7 +279,7 @@ good_area:
|
||||
* Fix it, but check if it's kernel or user first..
|
||||
*/
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
bad_area_nosemaphore:
|
||||
/* User mode accesses just cause a SIGSEGV */
|
||||
@@ -328,7 +328,7 @@ no_context:
|
||||
* us unable to handle the page fault gracefully.
|
||||
*/
|
||||
out_of_memory:
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
if (from_user) {
|
||||
pagefault_out_of_memory();
|
||||
return;
|
||||
@@ -336,7 +336,7 @@ out_of_memory:
|
||||
goto no_context;
|
||||
|
||||
do_sigbus:
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
|
||||
if (!from_user)
|
||||
goto no_context;
|
||||
@@ -390,7 +390,7 @@ static void force_user_fault(unsigned long address, int write)
|
||||
|
||||
code = SEGV_MAPERR;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
vma = find_vma(mm, address);
|
||||
if (!vma)
|
||||
goto bad_area;
|
||||
@@ -415,15 +415,15 @@ good_area:
|
||||
case VM_FAULT_OOM:
|
||||
goto do_sigbus;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
return;
|
||||
bad_area:
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
|
||||
return;
|
||||
|
||||
do_sigbus:
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
|
||||
}
|
||||
|
||||
|
@@ -318,7 +318,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
||||
if (!mmap_read_trylock(mm)) {
|
||||
if ((regs->tstate & TSTATE_PRIV) &&
|
||||
!search_exception_tables(regs->tpc)) {
|
||||
insn = get_fault_insn(regs, insn);
|
||||
@@ -326,7 +326,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
mmap_read_lock(mm);
|
||||
}
|
||||
|
||||
if (fault_code & FAULT_CODE_BAD_RA)
|
||||
@@ -458,7 +458,7 @@ good_area:
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
mm_rss = get_mm_rss(mm);
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
@@ -489,7 +489,7 @@ exit_exception:
|
||||
*/
|
||||
bad_area:
|
||||
insn = get_fault_insn(regs, insn);
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
handle_kernel_fault:
|
||||
do_kernel_fault(regs, si_code, fault_code, insn, address);
|
||||
@@ -501,7 +501,7 @@ handle_kernel_fault:
|
||||
*/
|
||||
out_of_memory:
|
||||
insn = get_fault_insn(regs, insn);
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
if (!(regs->tstate & TSTATE_PRIV)) {
|
||||
pagefault_out_of_memory();
|
||||
goto exit_exception;
|
||||
@@ -514,7 +514,7 @@ intr_or_no_mm:
|
||||
|
||||
do_sigbus:
|
||||
insn = get_fault_insn(regs, insn);
|
||||
up_read(&mm->mmap_sem);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
/*
|
||||
* Send a sigbus, regardless of whether we were in kernel
|
||||
|
@@ -366,7 +366,7 @@ static int map_vdso(const struct vdso_image *image,
|
||||
unsigned long text_start, addr = 0;
|
||||
int ret = 0;
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
mmap_write_lock(mm);
|
||||
|
||||
/*
|
||||
* First, get an unmapped region: then randomize it, and make sure that
|
||||
@@ -422,7 +422,7 @@ up_fail:
|
||||
if (ret)
|
||||
current->mm->context.vdso = NULL;
|
||||
|
||||
up_write(&mm->mmap_sem);
|
||||
mmap_write_unlock(mm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user