aio: don't bother with unmapping when aio_free_ring() is coming from exit_aio()

... since exit_mmap() is coming and it will munmap() everything anyway.
In all other cases aio_free_ring() has ctx->mm == current->mm; moreover,
all other callers of vm_munmap() have mm == current->mm, so this will
allow us to get rid of mm argument of vm_munmap().

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2012-04-20 21:49:41 -04:00
parent 6be5ceb02e
commit 936af1576e
1 changed files with 14 additions and 1 deletions

View File

@ -92,8 +92,10 @@ static void aio_free_ring(struct kioctx *ctx)
for (i=0; i<info->nr_pages; i++)
put_page(info->ring_pages[i]);
if (info->mmap_size)
if (info->mmap_size) {
BUG_ON(ctx->mm != current->mm);
vm_munmap(ctx->mm, info->mmap_base, info->mmap_size);
}
if (info->ring_pages && info->ring_pages != info->internal_pages)
kfree(info->ring_pages);
@ -386,6 +388,17 @@ void exit_aio(struct mm_struct *mm)
"exit_aio:ioctx still alive: %d %d %d\n",
atomic_read(&ctx->users), ctx->dead,
ctx->reqs_active);
/*
* We don't need to bother with munmap() here -
* exit_mmap(mm) is coming and it'll unmap everything.
* Since aio_free_ring() uses non-zero ->mmap_size
* as indicator that it needs to unmap the area,
* just set it to 0; aio_free_ring() is the only
* place that uses ->mmap_size, so it's safe.
* That way we get all munmap done to current->mm -
* all other callers have ctx->mm == current->mm.
*/
ctx->ring_info.mmap_size = 0;
put_ioctx(ctx);
}
}