linux-stable/io_uring/uring_cmd.c
Linus Torvalds 83511470af block-6.3-2023-03-24
-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmQdzYkQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpj+KEACpOBtrwaOUoMe683q/I9HYp5Te1/O9mOK3
 T+KmSxtDWiGTm/iQYUQQqGk+wXktAXjn31g+s1BZjxSbqG0OhR6Bd3XdgIVTqsCt
 aZiy3rQA+1sJ/rOoKNNzBbJKoIbVjGhxM8fXKZjsIyZBA1E0cFfsy/9pMIRoVJFL
 baonvAizJrw3nVkI3bFCuLaVQTl97Veg24rOkfr4YuSaac6wjzbsiTXH3EfJa3l/
 DumFkIE1fNK0FBQmt3+ky9M2R4yWlevenWoSZaVUdzKqG5DpYJmQrfaHn0QvTeXD
 Bxco8Pqie2k96sQlhVMiTps4HiWC9qdiUXmKeZmMfjzI1tfz6OGcKIAXUhJAYGJn
 ZngYSUGpLhvEduudDaVg6g1Gt2hNmQHtEion4IySMiurr2rHf+Pip4OfoAiVpWcq
 7ONE7wfQ1J8+NypKUJ6D9K0Z3ueMdrNv5AsjBSR2tEuMUUzVtDi5Uu0pvHXbclGj
 X5l9xln6J7+dj/6YOmKLXUvml+YEeslq2n2o3/H9zxIKXDbAHO5DKnAPEnMqhF9J
 ak3UIDYNhYjyYWNlQrEE/LpJ0wfVEGWXxowluwTy3gAyqh3Bfyu5C+Je/M6lMAev
 PghHsRWE6WPWsoh2qIC2aTXzJI6UjJdT5394kJYJ9aQm+gxObwE5+7UQANtGWlMC
 iOaDH9KpOw==
 =7EVo
 -----END PGP SIGNATURE-----

Merge tag 'block-6.3-2023-03-24' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Christoph:
     - Send Identify with CNS 06h only to I/O controllers (Martin
       George)
     - Fix nvme_tcp_term_pdu to match spec (Caleb Sander)

 - Pass in issue_flags for uring_cmd, so the end_io handlers don't need
   to assume what the right context is (me)

 - Fix for ublk, marking it as LIVE before adding it to avoid races on
   the initial IO (Ming)

* tag 'block-6.3-2023-03-24' of git://git.kernel.dk/linux:
  nvme-tcp: fix nvme_tcp_term_pdu to match spec
  nvme: send Identify with CNS 06h only to I/O controllers
  block/io_uring: pass in issue_flags for uring_cmd task_work handling
  block: ublk_drv: mark device as LIVE before adding disk
2023-03-24 14:10:39 -07:00

162 lines
4.1 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/io_uring.h>
#include <linux/security.h>
#include <linux/nospec.h>
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
#include "rsrc.h"
#include "uring_cmd.h"
static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
ioucmd->task_work_cb(ioucmd, issue_flags);
}
void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *, unsigned))
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
ioucmd->task_work_cb = task_work_cb;
req->io_task_work.func = io_uring_cmd_work;
io_req_task_work_add(req);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
u64 extra1, u64 extra2)
{
req->extra1 = extra1;
req->extra2 = extra2;
req->flags |= REQ_F_CQE32_INIT;
}
/*
* Called by consumers of io_uring_cmd, if they originally returned
* -EIOCBQUEUED upon receiving the command.
*/
void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
unsigned issue_flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
if (req->ctx->flags & IORING_SETUP_CQE32)
io_req_set_cqe32_extra(req, res2, 0);
if (req->ctx->flags & IORING_SETUP_IOPOLL)
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
smp_store_release(&req->iopoll_completed, 1);
else
io_req_complete_post(req, issue_flags);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
int io_uring_cmd_prep_async(struct io_kiocb *req)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
size_t cmd_size;
BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16);
BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80);
cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
memcpy(req->async_data, ioucmd->cmd, cmd_size);
return 0;
}
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
if (sqe->__pad1)
return -EINVAL;
ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
return -EINVAL;
if (ioucmd->flags & IORING_URING_CMD_FIXED) {
struct io_ring_ctx *ctx = req->ctx;
u16 index;
req->buf_index = READ_ONCE(sqe->buf_index);
if (unlikely(req->buf_index >= ctx->nr_user_bufs))
return -EFAULT;
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
req->imu = ctx->user_bufs[index];
io_req_set_rsrc_node(req, ctx, 0);
}
ioucmd->cmd = sqe->cmd;
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
return 0;
}
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct io_ring_ctx *ctx = req->ctx;
struct file *file = req->file;
int ret;
if (!file->f_op->uring_cmd)
return -EOPNOTSUPP;
ret = security_uring_cmd(ioucmd);
if (ret)
return ret;
if (ctx->flags & IORING_SETUP_SQE128)
issue_flags |= IO_URING_F_SQE128;
if (ctx->flags & IORING_SETUP_CQE32)
issue_flags |= IO_URING_F_CQE32;
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!file->f_op->uring_cmd_iopoll)
return -EOPNOTSUPP;
issue_flags |= IO_URING_F_IOPOLL;
req->iopoll_completed = 0;
WRITE_ONCE(ioucmd->cookie, NULL);
}
if (req_has_async_data(req))
ioucmd->cmd = req->async_data;
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
if (ret == -EAGAIN) {
if (!req_has_async_data(req)) {
if (io_alloc_async_data(req))
return -ENOMEM;
io_uring_cmd_prep_async(req);
}
return -EAGAIN;
}
if (ret != -EIOCBQUEUED) {
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
return ret;
}
return IOU_ISSUE_SKIP_COMPLETE;
}
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
return io_import_fixed(rw, iter, req->imu, ubuf, len);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);