mmap locking API: convert mmap_sem comments
Convert comments that reference mmap_sem to reference mmap_lock instead. [akpm@linux-foundation.org: fix up linux-next leftovers] [akpm@linux-foundation.org: s/lockaphore/lock/, per Vlastimil] [akpm@linux-foundation.org: more linux-next fixups, per Michel] Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Laurent Dufour <ldufour@linux.ibm.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-13-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
3e4e28c5a8
commit
c1e8d7c6a7
12
mm/rmap.c
12
mm/rmap.c
@@ -21,7 +21,7 @@
|
||||
* Lock ordering in mm:
|
||||
*
|
||||
* inode->i_mutex (while writing or truncating, not reading or faulting)
|
||||
* mm->mmap_sem
|
||||
* mm->mmap_lock
|
||||
* page->flags PG_locked (lock_page) * (see huegtlbfs below)
|
||||
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
|
||||
* mapping->i_mmap_rwsem
|
||||
@@ -177,7 +177,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
|
||||
* to do any locking for the common case of already having
|
||||
* an anon_vma.
|
||||
*
|
||||
* This must be called with the mmap_sem held for reading.
|
||||
* This must be called with the mmap_lock held for reading.
|
||||
*/
|
||||
int __anon_vma_prepare(struct vm_area_struct *vma)
|
||||
{
|
||||
@@ -1444,7 +1444,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
if (!PageTransCompound(page)) {
|
||||
/*
|
||||
* Holding pte lock, we do *not* need
|
||||
* mmap_sem here
|
||||
* mmap_lock here
|
||||
*/
|
||||
mlock_vma_page(page);
|
||||
}
|
||||
@@ -1817,7 +1817,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
|
||||
/*
|
||||
* Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
|
||||
* because that depends on page_mapped(); but not all its usages
|
||||
* are holding mmap_sem. Users without mmap_sem are required to
|
||||
* are holding mmap_lock. Users without mmap_lock are required to
|
||||
* take a reference count to prevent the anon_vma disappearing
|
||||
*/
|
||||
anon_vma = page_anon_vma(page);
|
||||
@@ -1837,7 +1837,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
|
||||
* Find all the mappings of a page using the mapping pointer and the vma chains
|
||||
* contained in the anon_vma struct it points to.
|
||||
*
|
||||
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
|
||||
* When called from try_to_munlock(), the mmap_lock of the mm containing the vma
|
||||
* where the page was found will be held for write. So, we won't recheck
|
||||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||
* LOCKED.
|
||||
@@ -1889,7 +1889,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
|
||||
* Find all the mappings of a page using the mapping pointer and the vma chains
|
||||
* contained in the address_space struct it points to.
|
||||
*
|
||||
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
|
||||
* When called from try_to_munlock(), the mmap_lock of the mm containing the vma
|
||||
* where the page was found will be held for write. So, we won't recheck
|
||||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||
* LOCKED.
|
||||
|
Reference in New Issue
Block a user