io_uring: move zc reporting from the hot path

Add custom tw and notif callbacks on top of usual bits also handling zc
reporting. That moves it from the hot path.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/40de4a6409042478e1f35adc4912e23226cb1b5c.1667557923.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2022-11-04 10:59:45 +00:00 committed by Jens Axboe
parent bedd20bcf3
commit 40725d1b96
3 changed files with 42 additions and 12 deletions

View File

@ -923,6 +923,9 @@ void io_send_zc_cleanup(struct io_kiocb *req)
} }
} }
#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
@ -935,11 +938,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (req->flags & REQ_F_CQE_SKIP) if (req->flags & REQ_F_CQE_SKIP)
return -EINVAL; return -EINVAL;
zc->flags = READ_ONCE(sqe->ioprio);
if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
IORING_RECVSEND_FIXED_BUF |
IORING_SEND_ZC_REPORT_USAGE))
return -EINVAL;
notif = zc->notif = io_alloc_notif(ctx); notif = zc->notif = io_alloc_notif(ctx);
if (!notif) if (!notif)
return -ENOMEM; return -ENOMEM;
@ -947,6 +945,17 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
notif->cqe.res = 0; notif->cqe.res = 0;
notif->cqe.flags = IORING_CQE_F_NOTIF; notif->cqe.flags = IORING_CQE_F_NOTIF;
req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_NEED_CLEANUP;
zc->flags = READ_ONCE(sqe->ioprio);
if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
if (zc->flags & ~IO_ZC_FLAGS_VALID)
return -EINVAL;
if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
io_notif_set_extended(notif);
io_notif_to_data(notif)->zc_report = true;
}
}
if (zc->flags & IORING_RECVSEND_FIXED_BUF) { if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
unsigned idx = READ_ONCE(sqe->buf_index); unsigned idx = READ_ONCE(sqe->buf_index);
@ -956,9 +965,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->imu = READ_ONCE(ctx->user_bufs[idx]); req->imu = READ_ONCE(ctx->user_bufs[idx]);
io_req_set_rsrc_node(notif, ctx, 0); io_req_set_rsrc_node(notif, ctx, 0);
} }
if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
io_notif_to_data(notif)->zc_report = true;
}
if (req->opcode == IORING_OP_SEND_ZC) { if (req->opcode == IORING_OP_SEND_ZC) {
if (READ_ONCE(sqe->__pad3[0])) if (READ_ONCE(sqe->__pad3[0]))

View File

@ -18,11 +18,17 @@ static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
__io_unaccount_mem(ctx->user, nd->account_pages); __io_unaccount_mem(ctx->user, nd->account_pages);
nd->account_pages = 0; nd->account_pages = 0;
} }
io_req_task_complete(notif, locked);
}
static void io_notif_complete_tw_ext(struct io_kiocb *notif, bool *locked)
{
struct io_notif_data *nd = io_notif_to_data(notif);
if (nd->zc_report && (nd->zc_copied || !nd->zc_used)) if (nd->zc_report && (nd->zc_copied || !nd->zc_used))
notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED; notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
io_req_task_complete(notif, locked); __io_notif_complete_tw(notif, locked);
} }
static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg, static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg,
@ -31,15 +37,33 @@ static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg,
struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg); struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
struct io_kiocb *notif = cmd_to_io_kiocb(nd); struct io_kiocb *notif = cmd_to_io_kiocb(nd);
if (refcount_dec_and_test(&uarg->refcnt))
io_req_task_work_add(notif);
}
static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg,
bool success)
{
struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
if (nd->zc_report) { if (nd->zc_report) {
if (success && !nd->zc_used && skb) if (success && !nd->zc_used && skb)
WRITE_ONCE(nd->zc_used, true); WRITE_ONCE(nd->zc_used, true);
else if (!success && !nd->zc_copied) else if (!success && !nd->zc_copied)
WRITE_ONCE(nd->zc_copied, true); WRITE_ONCE(nd->zc_copied, true);
} }
io_tx_ubuf_callback(skb, uarg, success);
}
if (refcount_dec_and_test(&uarg->refcnt)) void io_notif_set_extended(struct io_kiocb *notif)
io_req_task_work_add(notif); {
struct io_notif_data *nd = io_notif_to_data(notif);
nd->zc_report = false;
nd->zc_used = false;
nd->zc_copied = false;
notif->io_task_work.func = io_notif_complete_tw_ext;
io_notif_to_data(notif)->uarg.callback = io_tx_ubuf_callback_ext;
} }
struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
@ -63,7 +87,6 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
nd->account_pages = 0; nd->account_pages = 0;
nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN; nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
nd->uarg.callback = io_tx_ubuf_callback; nd->uarg.callback = io_tx_ubuf_callback;
nd->zc_report = nd->zc_used = nd->zc_copied = false;
refcount_set(&nd->uarg.refcnt, 1); refcount_set(&nd->uarg.refcnt, 1);
return notif; return notif;
} }

View File

@ -19,6 +19,7 @@ struct io_notif_data {
}; };
struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx); struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx);
void io_notif_set_extended(struct io_kiocb *notif);
static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif) static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif)
{ {