io_uring/kbuf: get rid of lower BGID lists

commit 09ab7eff38 upstream.

Just rely on the xarray for any kind of bgid. This simplifies things, and
it really doesn't bring us much, if anything.

Cc: stable@vger.kernel.org # v6.4+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Jens Axboe 2024-03-14 10:45:07 -06:00 committed by Greg Kroah-Hartman
parent 310203fd7c
commit e08987e2e3
3 changed files with 8 additions and 65 deletions

View file

@ -281,7 +281,6 @@ struct io_ring_ctx {
struct io_submit_state submit_state;
struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
struct io_hash_table cancel_table_locked;

View file

@ -353,7 +353,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
err:
kfree(ctx->cancel_table.hbs);
kfree(ctx->cancel_table_locked.hbs);
kfree(ctx->io_bl);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
return NULL;
@ -2906,7 +2905,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_wq_put_hash(ctx->hash_map);
kfree(ctx->cancel_table.hbs);
kfree(ctx->cancel_table_locked.hbs);
kfree(ctx->io_bl);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
}

View file

@ -17,8 +17,6 @@
#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
#define BGID_ARRAY 64
/* BIDs are addressed by a 16-bit field in a CQE */
#define MAX_BIDS_PER_BGID (1 << 16)
@ -40,13 +38,9 @@ struct io_buf_free {
int inuse;
};
static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
struct io_buffer_list *bl,
unsigned int bgid)
static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
unsigned int bgid)
{
if (bl && bgid < BGID_ARRAY)
return &bl[bgid];
return xa_load(&ctx->io_bl_xa, bgid);
}
@ -55,7 +49,7 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
{
lockdep_assert_held(&ctx->uring_lock);
return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
return __io_buffer_get_list(ctx, bgid);
}
static int io_buffer_add_list(struct io_ring_ctx *ctx,
@ -68,10 +62,6 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
*/
bl->bgid = bgid;
smp_store_release(&bl->is_ready, 1);
if (bgid < BGID_ARRAY)
return 0;
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
}
@ -217,24 +207,6 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
return ret;
}
static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
{
struct io_buffer_list *bl;
int i;
bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
if (!bl)
return -ENOMEM;
for (i = 0; i < BGID_ARRAY; i++) {
INIT_LIST_HEAD(&bl[i].buf_list);
bl[i].bgid = i;
}
smp_store_release(&ctx->io_bl, bl);
return 0;
}
/*
* Mark the given mapped range as free for reuse
*/
@ -309,13 +281,6 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
struct list_head *item, *tmp;
struct io_buffer *buf;
unsigned long index;
int i;
for (i = 0; i < BGID_ARRAY; i++) {
if (!ctx->io_bl)
break;
__io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
}
xa_for_each(&ctx->io_bl_xa, index, bl) {
xa_erase(&ctx->io_bl_xa, bl->bgid);
@ -498,12 +463,6 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
io_ring_submit_lock(ctx, issue_flags);
if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
ret = io_init_bl_list(ctx);
if (ret)
goto err;
}
bl = io_buffer_get_list(ctx, p->bgid);
if (unlikely(!bl)) {
bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
@ -516,14 +475,9 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
if (ret) {
/*
* Doesn't need rcu free as it was never visible, but
* let's keep it consistent throughout. Also can't
* be a lower indexed array group, as adding one
* where lookup failed cannot happen.
* let's keep it consistent throughout.
*/
if (p->bgid >= BGID_ARRAY)
kfree_rcu(bl, rcu);
else
WARN_ON_ONCE(1);
kfree_rcu(bl, rcu);
goto err;
}
}
@ -688,12 +642,6 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
if (reg.ring_entries >= 65536)
return -EINVAL;
if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
int ret = io_init_bl_list(ctx);
if (ret)
return ret;
}
bl = io_buffer_get_list(ctx, reg.bgid);
if (bl) {
/* if mapped buffer ring OR classic exists, don't allow */
@ -743,10 +691,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
return -EINVAL;
__io_remove_buffers(ctx, bl, -1U);
if (bl->bgid >= BGID_ARRAY) {
xa_erase(&ctx->io_bl_xa, bl->bgid);
kfree_rcu(bl, rcu);
}
xa_erase(&ctx->io_bl_xa, bl->bgid);
kfree_rcu(bl, rcu);
return 0;
}
@ -780,7 +726,7 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
{
struct io_buffer_list *bl;
bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
bl = __io_buffer_get_list(ctx, bgid);
if (!bl || !bl->is_mmap)
return NULL;