mmap locking API: convert mmap_sem comments
Convert comments that reference mmap_sem to reference mmap_lock instead. [akpm@linux-foundation.org: fix up linux-next leftovers] [akpm@linux-foundation.org: s/lockaphore/lock/, per Vlastimil] [akpm@linux-foundation.org: more linux-next fixups, per Michel] Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Laurent Dufour <ldufour@linux.ibm.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-13-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
3e4e28c5a8
commit
c1e8d7c6a7
@@ -186,7 +186,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
|
||||
* disabled. The memory must be pinned and mapped to the hardware when
|
||||
* this is called in hqd_load functions, so it should never fault in
|
||||
* the first place. This resolves a circular lock dependency involving
|
||||
* four locks, including the DQM lock and mmap_sem.
|
||||
* four locks, including the DQM lock and mmap_lock.
|
||||
*/
|
||||
#define read_user_wptr(mmptr, wptr, dst) \
|
||||
({ \
|
||||
|
@@ -237,7 +237,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
|
||||
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
|
||||
|
||||
/* read_user_ptr may take the mm->mmap_sem.
|
||||
/* read_user_ptr may take the mm->mmap_lock.
|
||||
* release srbm_mutex to avoid circular dependency between
|
||||
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
|
||||
*/
|
||||
|
@@ -224,7 +224,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
|
||||
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
|
||||
|
||||
/* read_user_ptr may take the mm->mmap_sem.
|
||||
/* read_user_ptr may take the mm->mmap_lock.
|
||||
* release srbm_mutex to avoid circular dependency between
|
||||
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
|
||||
*/
|
||||
|
@@ -203,7 +203,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
|
||||
mmap_write_lock(mm->mm);
|
||||
mutex_lock(&mm->i915->mm_lock);
|
||||
if (mm->mn == NULL && !err) {
|
||||
/* Protected by mmap_sem (write-lock) */
|
||||
/* Protected by mmap_lock (write-lock) */
|
||||
err = __mmu_notifier_register(&mn->mn, mm->mm);
|
||||
if (!err) {
|
||||
/* Protected by mm_lock */
|
||||
@@ -522,8 +522,8 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
|
||||
|
||||
/* Spawn a worker so that we can acquire the
|
||||
* user pages without holding our mutex. Access
|
||||
* to the user pages requires mmap_sem, and we have
|
||||
* a strict lock ordering of mmap_sem, struct_mutex -
|
||||
* to the user pages requires mmap_lock, and we have
|
||||
* a strict lock ordering of mmap_lock, struct_mutex -
|
||||
* we already hold struct_mutex here and so cannot
|
||||
* call gup without encountering a lock inversion.
|
||||
*
|
||||
|
@@ -3676,7 +3676,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
|
||||
* buffered data written by the GPU besides periodic OA metrics.
|
||||
*
|
||||
* Note we copy the properties from userspace outside of the i915 perf
|
||||
* mutex to avoid an awkward lockdep with mmap_sem.
|
||||
* mutex to avoid an awkward lockdep with mmap_lock.
|
||||
*
|
||||
* Most of the implementation details are handled by
|
||||
* i915_perf_open_ioctl_locked() after taking the &perf->lock
|
||||
|
@@ -58,7 +58,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
|
||||
goto out_clear;
|
||||
|
||||
/*
|
||||
* If possible, avoid waiting for GPU with mmap_sem
|
||||
* If possible, avoid waiting for GPU with mmap_lock
|
||||
* held. We only do this if the fault allows retry and this
|
||||
* is the first attempt.
|
||||
*/
|
||||
@@ -131,14 +131,14 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
|
||||
{
|
||||
/*
|
||||
* Work around locking order reversal in fault / nopfn
|
||||
* between mmap_sem and bo_reserve: Perform a trylock operation
|
||||
* between mmap_lock and bo_reserve: Perform a trylock operation
|
||||
* for reserve, and if it fails, retry the fault after waiting
|
||||
* for the buffer to become unreserved.
|
||||
*/
|
||||
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
|
||||
/*
|
||||
* If the fault allows retry and this is the first
|
||||
* fault attempt, we try to release the mmap_sem
|
||||
* fault attempt, we try to release the mmap_lock
|
||||
* before waiting
|
||||
*/
|
||||
if (fault_flag_allow_retry_first(vmf->flags)) {
|
||||
|
Reference in New Issue
Block a user