io_uring: hold mmap_sem for mm->locked_vm manipulation

The kernel doesn't seem to have clear rules around this, but various
spots are using the mmap_sem to serialize access to modifying the
locked_vm count. Play it safe and lock the mm for write when accounting
or unaccounting locked memory.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2020-12-17 07:53:33 -07:00
parent a146468d76
commit 4bc4a91253
1 changed files with 10 additions and 4 deletions

View File

@ -8157,10 +8157,13 @@ static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
__io_unaccount_mem(ctx->user, nr_pages);
if (ctx->mm_account) {
if (acct == ACCT_LOCKED)
if (acct == ACCT_LOCKED) {
mmap_write_lock(ctx->mm_account);
ctx->mm_account->locked_vm -= nr_pages;
else if (acct == ACCT_PINNED)
mmap_write_unlock(ctx->mm_account);
}else if (acct == ACCT_PINNED) {
atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
}
}
}
@ -8176,10 +8179,13 @@ static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
}
if (ctx->mm_account) {
if (acct == ACCT_LOCKED)
if (acct == ACCT_LOCKED) {
mmap_write_lock(ctx->mm_account);
ctx->mm_account->locked_vm += nr_pages;
else if (acct == ACCT_PINNED)
mmap_write_unlock(ctx->mm_account);
} else if (acct == ACCT_PINNED) {
atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
}
}
return 0;