mm/hmm: clean up some coding style and comments
There are no functional changes, just some coding style clean ups and minor comment changes. Cc: John Hubbard <jhubbard@nvidia.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Souptick Joarder <jrdr.linux@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:

committed by
Jason Gunthorpe

parent
2076e5c045
commit
085ea25064
62
mm/hmm.c
62
mm/hmm.c
@@ -153,9 +153,8 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
|
||||
/* Wake-up everyone waiting on any range. */
|
||||
mutex_lock(&hmm->lock);
|
||||
list_for_each_entry(range, &hmm->ranges, list) {
|
||||
list_for_each_entry(range, &hmm->ranges, list)
|
||||
range->valid = false;
|
||||
}
|
||||
wake_up_all(&hmm->wq);
|
||||
mutex_unlock(&hmm->lock);
|
||||
|
||||
@@ -166,9 +165,10 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
list_del_init(&mirror->list);
|
||||
if (mirror->ops->release) {
|
||||
/*
|
||||
* Drop mirrors_sem so callback can wait on any pending
|
||||
* work that might itself trigger mmu_notifier callback
|
||||
* and thus would deadlock with us.
|
||||
* Drop mirrors_sem so the release callback can wait
|
||||
* on any pending work that might itself trigger a
|
||||
* mmu_notifier callback and thus would deadlock with
|
||||
* us.
|
||||
*/
|
||||
up_write(&hmm->mirrors_sem);
|
||||
mirror->ops->release(mirror);
|
||||
@@ -223,11 +223,8 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
|
||||
int ret;
|
||||
|
||||
ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
|
||||
if (!update.blockable && ret == -EAGAIN) {
|
||||
up_read(&hmm->mirrors_sem);
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
if (!update.blockable && ret == -EAGAIN)
|
||||
break;
|
||||
}
|
||||
up_read(&hmm->mirrors_sem);
|
||||
|
||||
@@ -271,6 +268,7 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
|
||||
*
|
||||
* @mirror: new mirror struct to register
|
||||
* @mm: mm to register against
|
||||
* Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
|
||||
*
|
||||
* To start mirroring a process address space, the device driver must register
|
||||
* an HMM mirror struct.
|
||||
@@ -298,7 +296,7 @@ EXPORT_SYMBOL(hmm_mirror_register);
|
||||
/*
|
||||
* hmm_mirror_unregister() - unregister a mirror
|
||||
*
|
||||
* @mirror: new mirror struct to register
|
||||
* @mirror: mirror struct to unregister
|
||||
*
|
||||
* Stop mirroring a process address space, and cleanup.
|
||||
*/
|
||||
@@ -372,7 +370,7 @@ static int hmm_pfns_bad(unsigned long addr,
|
||||
* @fault: should we fault or not ?
|
||||
* @write_fault: write fault ?
|
||||
* @walk: mm_walk structure
|
||||
* Returns: 0 on success, -EBUSY after page fault, or page fault error
|
||||
* Return: 0 on success, -EBUSY after page fault, or page fault error
|
||||
*
|
||||
* This function will be called whenever pmd_none() or pte_none() returns true,
|
||||
* or whenever there is no page directory covering the virtual address range.
|
||||
@@ -911,6 +909,7 @@ int hmm_range_register(struct hmm_range *range,
|
||||
unsigned page_shift)
|
||||
{
|
||||
unsigned long mask = ((1UL << page_shift) - 1UL);
|
||||
struct hmm *hmm;
|
||||
|
||||
range->valid = false;
|
||||
range->hmm = NULL;
|
||||
@@ -924,28 +923,29 @@ int hmm_range_register(struct hmm_range *range,
|
||||
range->start = start;
|
||||
range->end = end;
|
||||
|
||||
range->hmm = hmm_get_or_create(mm);
|
||||
if (!range->hmm)
|
||||
hmm = hmm_get_or_create(mm);
|
||||
if (!hmm)
|
||||
return -EFAULT;
|
||||
|
||||
/* Check if hmm_mm_destroy() was call. */
|
||||
if (range->hmm->mm == NULL || range->hmm->dead) {
|
||||
hmm_put(range->hmm);
|
||||
if (hmm->mm == NULL || hmm->dead) {
|
||||
hmm_put(hmm);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Initialize range to track CPU page table update */
|
||||
mutex_lock(&range->hmm->lock);
|
||||
/* Initialize range to track CPU page table updates. */
|
||||
mutex_lock(&hmm->lock);
|
||||
|
||||
list_add_rcu(&range->list, &range->hmm->ranges);
|
||||
range->hmm = hmm;
|
||||
list_add_rcu(&range->list, &hmm->ranges);
|
||||
|
||||
/*
|
||||
* If there are any concurrent notifiers we have to wait for them for
|
||||
* the range to be valid (see hmm_range_wait_until_valid()).
|
||||
*/
|
||||
if (!range->hmm->notifiers)
|
||||
if (!hmm->notifiers)
|
||||
range->valid = true;
|
||||
mutex_unlock(&range->hmm->lock);
|
||||
mutex_unlock(&hmm->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -960,17 +960,19 @@ EXPORT_SYMBOL(hmm_range_register);
|
||||
*/
|
||||
void hmm_range_unregister(struct hmm_range *range)
|
||||
{
|
||||
struct hmm *hmm = range->hmm;
|
||||
|
||||
/* Sanity check this really should not happen. */
|
||||
if (range->hmm == NULL || range->end <= range->start)
|
||||
if (hmm == NULL || range->end <= range->start)
|
||||
return;
|
||||
|
||||
mutex_lock(&range->hmm->lock);
|
||||
mutex_lock(&hmm->lock);
|
||||
list_del_rcu(&range->list);
|
||||
mutex_unlock(&range->hmm->lock);
|
||||
mutex_unlock(&hmm->lock);
|
||||
|
||||
/* Drop reference taken by hmm_range_register() */
|
||||
range->valid = false;
|
||||
hmm_put(range->hmm);
|
||||
hmm_put(hmm);
|
||||
range->hmm = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(hmm_range_unregister);
|
||||
@@ -978,7 +980,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
|
||||
/*
|
||||
* hmm_range_snapshot() - snapshot CPU page table for a range
|
||||
* @range: range
|
||||
* Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
|
||||
* Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
|
||||
* permission (for instance asking for write and range is read only),
|
||||
* -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
|
||||
* vma or it is illegal to access that range), number of valid pages
|
||||
@@ -1061,7 +1063,7 @@ EXPORT_SYMBOL(hmm_range_snapshot);
|
||||
* hmm_range_fault() - try to fault some address in a virtual address range
|
||||
* @range: range being faulted
|
||||
* @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
|
||||
* Returns: number of valid pages in range->pfns[] (from range start
|
||||
* Return: number of valid pages in range->pfns[] (from range start
|
||||
* address). This may be zero. If the return value is negative,
|
||||
* then one of the following values may be returned:
|
||||
*
|
||||
@@ -1179,7 +1181,7 @@ EXPORT_SYMBOL(hmm_range_fault);
|
||||
* @device: device against to dma map page to
|
||||
* @daddrs: dma address of mapped pages
|
||||
* @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
|
||||
* Returns: number of pages mapped on success, -EAGAIN if mmap_sem have been
|
||||
* Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
|
||||
* drop and you need to try again, some other error value otherwise
|
||||
*
|
||||
* Note same usage pattern as hmm_range_fault().
|
||||
@@ -1267,7 +1269,7 @@ EXPORT_SYMBOL(hmm_range_dma_map);
|
||||
* @device: device against which dma map was done
|
||||
* @daddrs: dma address of mapped pages
|
||||
* @dirty: dirty page if it had the write flag set
|
||||
* Returns: number of page unmapped on success, -EINVAL otherwise
|
||||
* Return: number of page unmapped on success, -EINVAL otherwise
|
||||
*
|
||||
* Note that caller MUST abide by mmu notifier or use HMM mirror and abide
|
||||
* to the sync_cpu_device_pagetables() callback so that it is safe here to
|
||||
@@ -1390,7 +1392,7 @@ static void hmm_devmem_free(struct page *page, void *data)
|
||||
* @ops: memory event device driver callback (see struct hmm_devmem_ops)
|
||||
* @device: device struct to bind the resource too
|
||||
* @size: size in bytes of the device memory to add
|
||||
* Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
|
||||
* Return: pointer to new hmm_devmem struct ERR_PTR otherwise
|
||||
*
|
||||
* This function first finds an empty range of physical address big enough to
|
||||
* contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
|
||||
|
Reference in New Issue
Block a user