powerpc/iommu: Stop using @current in mm_iommu_xxx

This changes mm_iommu_xxx helpers to take mm_struct as a parameter
instead of getting it from @current which in some situations may
not have a valid reference to mm.

This changes helpers to receive @mm and moves all references to @current
to the caller, including checks for !current and !current->mm;
checks in mm_iommu_preregistered() are removed as there is no caller
yet.

This moves the mm_iommu_adjust_locked_vm() call to the caller as
it receives mm_iommu_table_group_mem_t but it needs mm.

This should cause no behavioral change.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Acked-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Alexey Kardashevskiy 2016-11-30 17:52:00 +11:00 committed by Michael Ellerman
parent 88f54a3581
commit d7baee6901
3 changed files with 36 additions and 40 deletions

View file

@ -19,16 +19,18 @@ extern void destroy_context(struct mm_struct *mm);
struct mm_iommu_table_group_mem_t; struct mm_iommu_table_group_mem_t;
extern int isolate_lru_page(struct page *page); /* from internal.h */ extern int isolate_lru_page(struct page *page); /* from internal.h */
extern bool mm_iommu_preregistered(void); extern bool mm_iommu_preregistered(struct mm_struct *mm);
extern long mm_iommu_get(unsigned long ua, unsigned long entries, extern long mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem); struct mm_iommu_table_group_mem_t **pmem);
extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem); extern long mm_iommu_put(struct mm_struct *mm,
struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_init(struct mm_struct *mm); extern void mm_iommu_init(struct mm_struct *mm);
extern void mm_iommu_cleanup(struct mm_struct *mm); extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long size); unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long entries); unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa); unsigned long ua, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);

View file

@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
} }
pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
current->pid, current ? current->pid : 0,
incr ? '+' : '-', incr ? '+' : '-',
npages << PAGE_SHIFT, npages << PAGE_SHIFT,
mm->locked_vm << PAGE_SHIFT, mm->locked_vm << PAGE_SHIFT,
@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
return ret; return ret;
} }
bool mm_iommu_preregistered(void) bool mm_iommu_preregistered(struct mm_struct *mm)
{ {
if (!current || !current->mm) return !list_empty(&mm->context.iommu_group_mem_list);
return false;
return !list_empty(&current->mm->context.iommu_group_mem_list);
} }
EXPORT_SYMBOL_GPL(mm_iommu_preregistered); EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page)
return 0; return 0;
} }
long mm_iommu_get(unsigned long ua, unsigned long entries, long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem) struct mm_iommu_table_group_mem_t **pmem)
{ {
struct mm_iommu_table_group_mem_t *mem; struct mm_iommu_table_group_mem_t *mem;
long i, j, ret = 0, locked_entries = 0; long i, j, ret = 0, locked_entries = 0;
struct page *page = NULL; struct page *page = NULL;
if (!current || !current->mm)
return -ESRCH; /* process exited */
mutex_lock(&mem_list_mutex); mutex_lock(&mem_list_mutex);
list_for_each_entry_rcu(mem, &current->mm->context.iommu_group_mem_list, list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
next) { next) {
if ((mem->ua == ua) && (mem->entries == entries)) { if ((mem->ua == ua) && (mem->entries == entries)) {
++mem->used; ++mem->used;
@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
} }
ret = mm_iommu_adjust_locked_vm(current->mm, entries, true); ret = mm_iommu_adjust_locked_vm(mm, entries, true);
if (ret) if (ret)
goto unlock_exit; goto unlock_exit;
@ -215,11 +209,11 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
mem->entries = entries; mem->entries = entries;
*pmem = mem; *pmem = mem;
list_add_rcu(&mem->next, &current->mm->context.iommu_group_mem_list); list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
unlock_exit: unlock_exit:
if (locked_entries && ret) if (locked_entries && ret)
mm_iommu_adjust_locked_vm(current->mm, locked_entries, false); mm_iommu_adjust_locked_vm(mm, locked_entries, false);
mutex_unlock(&mem_list_mutex); mutex_unlock(&mem_list_mutex);
@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head)
static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
{ {
list_del_rcu(&mem->next); list_del_rcu(&mem->next);
mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
call_rcu(&mem->rcu, mm_iommu_free); call_rcu(&mem->rcu, mm_iommu_free);
} }
long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
{ {
long ret = 0; long ret = 0;
if (!current || !current->mm)
return -ESRCH; /* process exited */
mutex_lock(&mem_list_mutex); mutex_lock(&mem_list_mutex);
if (mem->used == 0) { if (mem->used == 0) {
@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
/* @mapped became 0 so now mappings are disabled, release the region */ /* @mapped became 0 so now mappings are disabled, release the region */
mm_iommu_release(mem); mm_iommu_release(mem);
mm_iommu_adjust_locked_vm(mm, mem->entries, false);
unlock_exit: unlock_exit:
mutex_unlock(&mem_list_mutex); mutex_unlock(&mem_list_mutex);
@ -304,14 +296,12 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
} }
EXPORT_SYMBOL_GPL(mm_iommu_put); EXPORT_SYMBOL_GPL(mm_iommu_put);
struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long size) unsigned long ua, unsigned long size)
{ {
struct mm_iommu_table_group_mem_t *mem, *ret = NULL; struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
list_for_each_entry_rcu(mem, list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
&current->mm->context.iommu_group_mem_list,
next) {
if ((mem->ua <= ua) && if ((mem->ua <= ua) &&
(ua + size <= mem->ua + (ua + size <= mem->ua +
(mem->entries << PAGE_SHIFT))) { (mem->entries << PAGE_SHIFT))) {
@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
} }
EXPORT_SYMBOL_GPL(mm_iommu_lookup); EXPORT_SYMBOL_GPL(mm_iommu_lookup);
struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long entries) unsigned long ua, unsigned long entries)
{ {
struct mm_iommu_table_group_mem_t *mem, *ret = NULL; struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
list_for_each_entry_rcu(mem, list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
&current->mm->context.iommu_group_mem_list,
next) {
if ((mem->ua == ua) && (mem->entries == entries)) { if ((mem->ua == ua) && (mem->entries == entries)) {
ret = mem; ret = mem;
break; break;

View file

@ -107,14 +107,17 @@ static long tce_iommu_unregister_pages(struct tce_container *container,
{ {
struct mm_iommu_table_group_mem_t *mem; struct mm_iommu_table_group_mem_t *mem;
if (!current || !current->mm)
return -ESRCH; /* process exited */
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL; return -EINVAL;
mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT); mem = mm_iommu_find(current->mm, vaddr, size >> PAGE_SHIFT);
if (!mem) if (!mem)
return -ENOENT; return -ENOENT;
return mm_iommu_put(mem); return mm_iommu_put(current->mm, mem);
} }
static long tce_iommu_register_pages(struct tce_container *container, static long tce_iommu_register_pages(struct tce_container *container,
@ -124,11 +127,14 @@ static long tce_iommu_register_pages(struct tce_container *container,
struct mm_iommu_table_group_mem_t *mem = NULL; struct mm_iommu_table_group_mem_t *mem = NULL;
unsigned long entries = size >> PAGE_SHIFT; unsigned long entries = size >> PAGE_SHIFT;
if (!current || !current->mm)
return -ESRCH; /* process exited */
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
((vaddr + size) < vaddr)) ((vaddr + size) < vaddr))
return -EINVAL; return -EINVAL;
ret = mm_iommu_get(vaddr, entries, &mem); ret = mm_iommu_get(current->mm, vaddr, entries, &mem);
if (ret) if (ret)
return ret; return ret;
@ -375,7 +381,7 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
long ret = 0; long ret = 0;
struct mm_iommu_table_group_mem_t *mem; struct mm_iommu_table_group_mem_t *mem;
mem = mm_iommu_lookup(tce, size); mem = mm_iommu_lookup(current->mm, tce, size);
if (!mem) if (!mem)
return -EINVAL; return -EINVAL;