powerpc/iommu: Stop using @current in mm_iommu_xxx
This changes mm_iommu_xxx helpers to take mm_struct as a parameter instead of getting it from @current which in some situations may not have a valid reference to mm. This changes helpers to receive @mm and moves all references to @current to the caller, including checks for !current and !current->mm; checks in mm_iommu_preregistered() are removed as there is no caller yet. This moves the mm_iommu_adjust_locked_vm() call to the caller as it receives mm_iommu_table_group_mem_t but it needs mm. This should cause no behavioral change. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Acked-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:

committed by
Michael Ellerman

parent
88f54a3581
commit
d7baee6901
@@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
|
||||
}
|
||||
|
||||
pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
|
||||
current->pid,
|
||||
current ? current->pid : 0,
|
||||
incr ? '+' : '-',
|
||||
npages << PAGE_SHIFT,
|
||||
mm->locked_vm << PAGE_SHIFT,
|
||||
@@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mm_iommu_preregistered(void)
|
||||
bool mm_iommu_preregistered(struct mm_struct *mm)
|
||||
{
|
||||
if (!current || !current->mm)
|
||||
return false;
|
||||
|
||||
return !list_empty(¤t->mm->context.iommu_group_mem_list);
|
||||
return !list_empty(&mm->context.iommu_group_mem_list);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
|
||||
|
||||
@@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page)
|
||||
return 0;
|
||||
}
|
||||
|
||||
long mm_iommu_get(unsigned long ua, unsigned long entries,
|
||||
long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
||||
struct mm_iommu_table_group_mem_t **pmem)
|
||||
{
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
long i, j, ret = 0, locked_entries = 0;
|
||||
struct page *page = NULL;
|
||||
|
||||
if (!current || !current->mm)
|
||||
return -ESRCH; /* process exited */
|
||||
|
||||
mutex_lock(&mem_list_mutex);
|
||||
|
||||
list_for_each_entry_rcu(mem, ¤t->mm->context.iommu_group_mem_list,
|
||||
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
|
||||
next) {
|
||||
if ((mem->ua == ua) && (mem->entries == entries)) {
|
||||
++mem->used;
|
||||
@@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
|
||||
|
||||
}
|
||||
|
||||
ret = mm_iommu_adjust_locked_vm(current->mm, entries, true);
|
||||
ret = mm_iommu_adjust_locked_vm(mm, entries, true);
|
||||
if (ret)
|
||||
goto unlock_exit;
|
||||
|
||||
@@ -215,11 +209,11 @@ populate:
|
||||
mem->entries = entries;
|
||||
*pmem = mem;
|
||||
|
||||
list_add_rcu(&mem->next, ¤t->mm->context.iommu_group_mem_list);
|
||||
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
|
||||
|
||||
unlock_exit:
|
||||
if (locked_entries && ret)
|
||||
mm_iommu_adjust_locked_vm(current->mm, locked_entries, false);
|
||||
mm_iommu_adjust_locked_vm(mm, locked_entries, false);
|
||||
|
||||
mutex_unlock(&mem_list_mutex);
|
||||
|
||||
@@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head)
|
||||
static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
|
||||
{
|
||||
list_del_rcu(&mem->next);
|
||||
mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
|
||||
call_rcu(&mem->rcu, mm_iommu_free);
|
||||
}
|
||||
|
||||
long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
|
||||
long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
|
||||
{
|
||||
long ret = 0;
|
||||
|
||||
if (!current || !current->mm)
|
||||
return -ESRCH; /* process exited */
|
||||
|
||||
mutex_lock(&mem_list_mutex);
|
||||
|
||||
if (mem->used == 0) {
|
||||
@@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
|
||||
/* @mapped became 0 so now mappings are disabled, release the region */
|
||||
mm_iommu_release(mem);
|
||||
|
||||
mm_iommu_adjust_locked_vm(mm, mem->entries, false);
|
||||
|
||||
unlock_exit:
|
||||
mutex_unlock(&mem_list_mutex);
|
||||
|
||||
@@ -304,14 +296,12 @@ unlock_exit:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mm_iommu_put);
|
||||
|
||||
struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
|
||||
unsigned long size)
|
||||
struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
|
||||
unsigned long ua, unsigned long size)
|
||||
{
|
||||
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
|
||||
|
||||
list_for_each_entry_rcu(mem,
|
||||
¤t->mm->context.iommu_group_mem_list,
|
||||
next) {
|
||||
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
|
||||
if ((mem->ua <= ua) &&
|
||||
(ua + size <= mem->ua +
|
||||
(mem->entries << PAGE_SHIFT))) {
|
||||
@@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mm_iommu_lookup);
|
||||
|
||||
struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
|
||||
unsigned long entries)
|
||||
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
||||
unsigned long ua, unsigned long entries)
|
||||
{
|
||||
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
|
||||
|
||||
list_for_each_entry_rcu(mem,
|
||||
¤t->mm->context.iommu_group_mem_list,
|
||||
next) {
|
||||
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
|
||||
if ((mem->ua == ua) && (mem->entries == entries)) {
|
||||
ret = mem;
|
||||
break;
|
||||
|
Reference in New Issue
Block a user