nvme follow-up updates for Linux 6.8

- tcp, fc, and rdma target fixes (Maurizio, Daniel, Hannes, Christoph)
  - discard fixes and improvements (Christoph)
  - timeout debug improvements (Keith, Max)
  - various cleanups (Daniel, Max, Giuxen)
  - trace event string fixes (Arnd)
  - shadow doorbell setup on reset fix (William)
  - a write zeroes quirk for SK Hynix (Jim)
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE3Fbyvv+648XNRdHTPe3zGtjzRgkFAmWfDbMACgkQPe3zGtjz
 RgmrHQ//W7K3aDiORxywZQkCfnJLfTSWfYkUACg4f4nMaz+YNOHP7lXgsLirsyay
 guq5k07ohsZ/mCTo6xYKaHNnj3ACkp8b0J5+vOSWvCFrm4xnRDOW0HH9bbMrjfBb
 8/en1mJjsqMjSm9x56gD4ShwetteofjS1M8cWZZLwcb6k5isc6HcGyxTKzSb7Tjq
 CBeMfDzpOcKhs3MfooLb4lAVI/n93Dwi2KU403KQUq4EyTo3eOmOOoJSb48ypdPW
 D9XIugkjncib99KpDeLdPQfhg8kNv6ZOaZnW8mfxWZs4hA3OAW4pwGjfb9t2uo7c
 D95s3fRkI1mzNzvj0YwCIrtRXisKXmhJOC9UoWCxntT/5wVcv5QHaYvSL5eVm/H3
 Jn89RtqPWC9vbeApSO0wW4/o4l05yHH4mu/RNSK004EKxzEe5q3nj/OTfyqVM75L
 a/fQBG2hI7qLJH06FrNGejnPAvkrHet4HxQfKH0+CSaXtVwXcU7TOWh3LLQt+21Z
 zhgH+DsePYC/dgbRdikTG6h/PrgIcv3UAX2lhYF7oC0tMUke/8qSmV5dha1O04AM
 msjChFo+L+67pg7IxotLrvvYCgTxgChDUwz7rnAnbIQwzyD9yhAhK0qCv58T8ePc
 0dvk7V6BgUttCQ4ObaqpbxfqSv1XB9pO1LtCbqli5+HLNeZT7bs=
 =XZVc
 -----END PGP SIGNATURE-----

Merge tag 'nvme-6.8-2024-1-10' of git://git.infradead.org/nvme into for-6.8/block

Pull NVMe changes from Keith:

"nvme follow-up updates for Linux 6.8

 - tcp, fc, and rdma target fixes (Maurizio, Daniel, Hannes, Christoph)
 - discard fixes and improvements (Christoph)
 - timeout debug improvements (Keith, Max)
 - various cleanups (Daniel, Max, Giuxen)
 - trace event string fixes (Arnd)
 - shadow doorbell setup on reset fix (William)
 - a write zeroes quirk for SK Hynix (Jim)"

* tag 'nvme-6.8-2024-1-10' of git://git.infradead.org/nvme: (25 commits)
  nvmet-rdma: avoid circular locking dependency on install_queue()
  nvmet-tcp: avoid circular locking dependency on install_queue()
  nvme-pci: set doorbell config before unquiescing
  nvmet-tcp: Fix the H2C expected PDU len calculation
  nvme-tcp: enhance timeout kernel log
  nvme-rdma: enhance timeout kernel log
  nvme-pci: enhance timeout kernel log
  nvme: trace: avoid memcpy overflow warning
  nvmet: re-fix tracing strncpy() warning
  nvme: introduce nvme_disk_is_ns_head helper
  nvme-pci: disable write zeroes for SK Hynix BC901
  nvmet-fcloop: Remove remote port from list when unlinking
  nvmet-trace: avoid dereferencing pointer too early
  nvmet-fc: remove unnecessary bracket
  nvme: simplify the max_discard_segments calculation
  nvme: fix max_discard_sectors calculation
  nvme: also skip discard granularity updates in nvme_config_discard
  nvme: update the explanation for not updating the limits in nvme_config_discard
  nvmet-tcp: fix a missing endianess conversion in nvmet_tcp_try_peek_pdu
  nvme-common: mark nvme_tls_psk_prio static
  ...
This commit is contained in:
Jens Axboe 2024-01-10 15:26:42 -07:00
commit b2da197565
15 changed files with 145 additions and 87 deletions

View File

@ -111,7 +111,7 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
* should be preferred to 'generated' PSKs,
* and SHA-384 should be preferred to SHA-256.
*/
struct nvme_tls_psk_priority_list {
static struct nvme_tls_psk_priority_list {
bool generated;
enum nvme_tcp_tls_cipher cipher;
} nvme_tls_psk_prio[] = {

View File

@ -1727,13 +1727,13 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
struct nvme_ns_head *head)
{
struct request_queue *queue = disk->queue;
u32 size = queue_logical_block_size(queue);
u32 max_discard_sectors;
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX))
ctrl->max_discard_sectors =
nvme_lba_to_sect(head, ctrl->dmrsl);
if (ctrl->max_discard_sectors == 0) {
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX)) {
max_discard_sectors = nvme_lba_to_sect(head, ctrl->dmrsl);
} else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
max_discard_sectors = UINT_MAX;
} else {
blk_queue_max_discard_sectors(queue, 0);
return;
}
@ -1741,14 +1741,22 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
queue->limits.discard_granularity = size;
/* If discard is already enabled, don't reset queue limits */
/*
* If discard is already enabled, don't reset queue limits.
*
* This works around the fact that the block layer can't cope well with
* updating the hardware limits when overridden through sysfs. This is
* harmless because discard limits in NVMe are purely advisory.
*/
if (queue->limits.max_discard_sectors)
return;
blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
blk_queue_max_discard_sectors(queue, max_discard_sectors);
if (ctrl->dmrl)
blk_queue_max_discard_segments(queue, ctrl->dmrl);
else
blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
queue->limits.discard_granularity = queue_logical_block_size(queue);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
@ -2907,14 +2915,6 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
struct nvme_id_ctrl_nvm *id;
int ret;
if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
ctrl->max_discard_sectors = UINT_MAX;
ctrl->max_discard_segments = NVME_DSM_MAX_RANGES;
} else {
ctrl->max_discard_sectors = 0;
ctrl->max_discard_segments = 0;
}
/*
* Even though NVMe spec explicitly states that MDTS is not applicable
* to the write-zeroes, we are cautious and limit the size to the
@ -2944,8 +2944,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
if (ret)
goto free_data;
if (id->dmrl)
ctrl->max_discard_segments = id->dmrl;
ctrl->dmrl = id->dmrl;
ctrl->dmrsl = le32_to_cpu(id->dmrsl);
if (id->wzsl)
ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);

View File

@ -297,14 +297,13 @@ struct nvme_ctrl {
u32 max_hw_sectors;
u32 max_segments;
u32 max_integrity_segments;
u32 max_discard_sectors;
u32 max_discard_segments;
u32 max_zeroes_sectors;
#ifdef CONFIG_BLK_DEV_ZONED
u32 max_zone_append;
#endif
u16 crdt[3];
u16 oncs;
u8 dmrl;
u32 dmrsl;
u16 oacs;
u16 sqsize;
@ -921,6 +920,10 @@ extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
{
return disk->fops == &nvme_ns_head_ops;
}
#else
#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
@ -998,6 +1001,10 @@ static inline void nvme_mpath_start_request(struct request *rq)
static inline void nvme_mpath_end_request(struct request *rq)
{
}
static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
{
return false;
}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_revalidate_zones(struct nvme_ns *ns);
@ -1026,7 +1033,10 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
return dev_to_disk(dev)->private_data;
struct gendisk *disk = dev_to_disk(dev);
WARN_ON(nvme_disk_is_ns_head(disk));
return disk->private_data;
}
#ifdef CONFIG_NVME_HWMON

View File

@ -1284,6 +1284,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct request *abort_req;
struct nvme_command cmd = { };
u32 csts = readl(dev->bar + NVME_REG_CSTS);
u8 opcode;
/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
@ -1310,8 +1311,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, completion polled\n",
req->tag, nvmeq->qid);
"I/O tag %d (%04x) QID %d timeout, completion polled\n",
req->tag, nvme_cid(req), nvmeq->qid);
return BLK_EH_DONE;
}
@ -1327,8 +1328,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
fallthrough;
case NVME_CTRL_DELETING:
dev_warn_ratelimited(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
"I/O tag %d (%04x) QID %d timeout, disable controller\n",
req->tag, nvme_cid(req), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
nvme_dev_disable(dev, true);
return BLK_EH_DONE;
@ -1343,10 +1344,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* command was already aborted once before and still hasn't been
* returned to the driver, or if this is the admin queue.
*/
opcode = nvme_req(req)->cmd->common.opcode;
if (!nvmeq->qid || iod->aborted) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
req->tag, nvme_cid(req), opcode,
nvme_opcode_str(nvmeq->qid, opcode, 0), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
goto disable;
}
@ -1362,10 +1365,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
"I/O %d (%s) QID %d timeout, aborting\n",
req->tag,
nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
nvmeq->qid);
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, aborting req_op:%s(%u) size:%u\n",
req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode),
nvmeq->qid, blk_op_str(req_op(req)), req_op(req),
blk_rq_bytes(req));
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
BLK_MQ_REQ_NOWAIT);
@ -2743,10 +2746,10 @@ static void nvme_reset_work(struct work_struct *work)
* controller around but remove all namespaces.
*/
if (dev->online_queues > 1) {
nvme_dbbuf_set(dev);
nvme_unquiesce_io_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
nvme_pci_update_nr_queues(dev);
nvme_dbbuf_set(dev);
nvme_unfreeze(&dev->ctrl);
} else {
dev_warn(dev->ctrl.device, "IO queues lost\n");
@ -3394,6 +3397,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1c5c, 0x1D59), /* SK Hynix BC901 */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */

View File

@ -98,7 +98,7 @@ static int nvme_send_pr_command(struct block_device *bdev,
struct nvme_command *c, void *data, unsigned int data_len)
{
if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
bdev->bd_disk->fops == &nvme_ns_head_ops)
nvme_disk_is_ns_head(bdev->bd_disk))
return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,

View File

@ -1941,9 +1941,14 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
u8 opcode = req->req.cmd->common.opcode;
u8 fctype = req->req.cmd->fabrics.fctype;
int qid = nvme_rdma_queue_idx(queue);
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
dev_warn(ctrl->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
rq->tag, nvme_cid(rq), opcode,
nvme_opcode_str(qid, opcode, fctype), qid);
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
/*

View File

@ -39,10 +39,9 @@ static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
if (disk->fops == &nvme_bdev_ops)
return nvme_get_ns_from_dev(dev)->head;
else
if (nvme_disk_is_ns_head(disk))
return disk->private_data;
return nvme_get_ns_from_dev(dev)->head;
}
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
@ -233,7 +232,8 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
}
#ifdef CONFIG_NVME_MULTIPATH
if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
if (dev_to_disk(dev)->fops != &nvme_bdev_ops) /* per-path attr */
/* per-path attr */
if (nvme_disk_is_ns_head(dev_to_disk(dev)))
return 0;
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
return 0;

View File

@ -1922,14 +1922,13 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
ctrl->opts->subsysnqn);
if (!pskid) {
dev_err(ctrl->device, "no valid PSK found\n");
ret = -ENOKEY;
goto out_free_queue;
return -ENOKEY;
}
}
ret = nvme_tcp_alloc_queue(ctrl, 0, pskid);
if (ret)
goto out_free_queue;
return ret;
ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
if (ret)
@ -2426,9 +2425,9 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device,
"queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
opc, nvme_opcode_str(qid, opc, fctype));
"I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
rq->tag, nvme_cid(rq), pdu->hdr.type, opc,
nvme_opcode_str(qid, opc, fctype), qid);
if (ctrl->state != NVME_CTRL_LIVE) {
/*

View File

@ -1031,7 +1031,7 @@ nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
list_for_each_entry(host, &tgtport->host_list, host_list) {
if (host->hosthandle == hosthandle && !host->invalid) {
if (nvmet_fc_hostport_get(host))
return (host);
return host;
}
}

View File

@ -995,11 +995,6 @@ fcloop_nport_free(struct kref *ref)
{
struct fcloop_nport *nport =
container_of(ref, struct fcloop_nport, ref);
unsigned long flags;
spin_lock_irqsave(&fcloop_lock, flags);
list_del(&nport->nport_list);
spin_unlock_irqrestore(&fcloop_lock, flags);
kfree(nport);
}
@ -1357,6 +1352,8 @@ __unlink_remote_port(struct fcloop_nport *nport)
nport->tport->remoteport = NULL;
nport->rport = NULL;
list_del(&nport->nport_list);
return rport;
}

View File

@ -37,6 +37,8 @@
#define NVMET_RDMA_MAX_MDTS 8
#define NVMET_RDMA_MAX_METADATA_MDTS 5
#define NVMET_RDMA_BACKLOG 128
struct nvmet_rdma_srq;
struct nvmet_rdma_cmd {
@ -1583,8 +1585,19 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
}
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
flush_workqueue(nvmet_wq);
struct nvmet_rdma_queue *q;
int pending = 0;
/* Check for pending controller teardown */
mutex_lock(&nvmet_rdma_queue_mutex);
list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) {
if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl &&
q->state == NVMET_RDMA_Q_DISCONNECTING)
pending++;
}
mutex_unlock(&nvmet_rdma_queue_mutex);
if (pending > NVMET_RDMA_BACKLOG)
return NVME_SC_CONNECT_CTRL_BUSY;
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
@ -1880,7 +1893,7 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
goto out_destroy_id;
}
ret = rdma_listen(cm_id, 128);
ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG);
if (ret) {
pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;

View File

@ -24,6 +24,8 @@
#include "nvmet.h"
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
#define NVMET_TCP_BACKLOG 128
static int param_store_val(const char *str, int *val, int min, int max)
{
@ -923,7 +925,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
icresp->hdr.pdo = 0;
icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
icresp->cpda = 0;
if (queue->hdr_digest)
icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
@ -978,13 +980,13 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
unsigned int exp_data_len;
if (likely(queue->nr_cmds)) {
if (unlikely(data->ttag >= queue->nr_cmds)) {
pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
queue->idx, data->ttag, queue->nr_cmds);
nvmet_tcp_fatal_error(queue);
return -EPROTO;
goto err_proto;
}
cmd = &queue->cmds[data->ttag];
} else {
@ -995,19 +997,32 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
data->ttag, le32_to_cpu(data->data_offset),
cmd->rbytes_done);
/* FIXME: use path and transport errors */
nvmet_req_complete(&cmd->req,
NVME_SC_INVALID_FIELD | NVME_SC_DNR);
return -EPROTO;
goto err_proto;
}
exp_data_len = le32_to_cpu(data->hdr.plen) -
nvmet_tcp_hdgst_len(queue) -
nvmet_tcp_ddgst_len(queue) -
sizeof(*data);
cmd->pdu_len = le32_to_cpu(data->data_length);
if (unlikely(cmd->pdu_len != exp_data_len ||
cmd->pdu_len == 0 ||
cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
goto err_proto;
}
cmd->pdu_recv = 0;
nvmet_tcp_build_pdu_iovec(cmd);
queue->cmd = cmd;
queue->rcv_state = NVMET_TCP_RECV_DATA;
return 0;
err_proto:
/* FIXME: use proper transport errors */
nvmet_tcp_fatal_error(queue);
return -EPROTO;
}
static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
@ -1768,7 +1783,7 @@ static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
(int)sizeof(struct nvme_tcp_icreq_pdu));
if (hdr->type == nvme_tcp_icreq &&
hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
hdr->plen == (__le32)sizeof(struct nvme_tcp_icreq_pdu)) {
hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) {
pr_debug("queue %d: icreq detected\n",
queue->idx);
return len;
@ -2053,7 +2068,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
goto err_sock;
}
ret = kernel_listen(port->sock, 128);
ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
if (ret) {
pr_err("failed to listen %d on port sock\n", ret);
goto err_sock;
@ -2119,8 +2134,19 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
container_of(sq, struct nvmet_tcp_queue, nvme_sq);
if (sq->qid == 0) {
/* Let inflight controller teardown complete */
flush_workqueue(nvmet_wq);
struct nvmet_tcp_queue *q;
int pending = 0;
/* Check for pending controller teardown */
mutex_lock(&nvmet_tcp_queue_mutex);
list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
if (q->nvme_sq.ctrl == sq->ctrl &&
q->state == NVMET_TCP_Q_DISCONNECTING)
pending++;
}
mutex_unlock(&nvmet_tcp_queue_mutex);
if (pending > NVMET_TCP_BACKLOG)
return NVME_SC_CONNECT_CTRL_BUSY;
}
queue->nr_cmds = sq->size * 2;

View File

@ -211,7 +211,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
return ret;
}
const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
{
const char *ret = trace_seq_buffer_ptr(p);
@ -224,8 +224,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
* If we can know the extra data of the connect command in this stage,
* we can update this print statement later.
*/
if (ctrl)
trace_seq_printf(p, "%d", ctrl->cntlid);
if (ctrl_id)
trace_seq_printf(p, "%d", ctrl_id);
else
trace_seq_printf(p, "_");
trace_seq_putc(p, 0);

View File

@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) : \
nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
#define __print_ctrl_name(ctrl) \
nvmet_trace_ctrl_name(p, ctrl)
const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id);
#define __print_ctrl_id(ctrl_id) \
nvmet_trace_ctrl_id(p, ctrl_id)
const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
#define __print_disk_name(name) \
nvmet_trace_disk_name(p, name)
#ifndef TRACE_HEADER_MULTI_READ
static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req)
{
return req->sq->ctrl;
/*
* The queue and controller pointers are not valid until an association
* has been established.
*/
if (!req->sq || !req->sq->ctrl)
return 0;
return req->sq->ctrl->cntlid;
}
static inline void __assign_req_name(char *name, struct nvmet_req *req)
@ -53,8 +59,7 @@ static inline void __assign_req_name(char *name, struct nvmet_req *req)
return;
}
strncpy(name, req->ns->device_path,
min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN);
}
#endif
@ -63,7 +68,7 @@ TRACE_EVENT(nvmet_req_init,
TP_ARGS(req, cmd),
TP_STRUCT__entry(
__field(struct nvme_command *, cmd)
__field(struct nvmet_ctrl *, ctrl)
__field(u16, ctrl_id)
__array(char, disk, DISK_NAME_LEN)
__field(int, qid)
__field(u16, cid)
@ -76,7 +81,7 @@ TRACE_EVENT(nvmet_req_init,
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->ctrl = nvmet_req_to_ctrl(req);
__entry->ctrl_id = nvmet_req_to_ctrl_id(req);
__assign_req_name(__entry->disk, req);
__entry->qid = req->sq->qid;
__entry->cid = cmd->common.command_id;
@ -85,12 +90,12 @@ TRACE_EVENT(nvmet_req_init,
__entry->flags = cmd->common.flags;
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = le64_to_cpu(cmd->common.metadata);
memcpy(__entry->cdw10, &cmd->common.cdw10,
memcpy(__entry->cdw10, &cmd->common.cdws,
sizeof(__entry->cdw10));
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
"meta=%#llx, cmd=(%s, %s)",
__print_ctrl_name(__entry->ctrl),
__print_ctrl_id(__entry->ctrl_id),
__print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->nsid,
__entry->flags, __entry->metadata,
@ -104,7 +109,7 @@ TRACE_EVENT(nvmet_req_complete,
TP_PROTO(struct nvmet_req *req),
TP_ARGS(req),
TP_STRUCT__entry(
__field(struct nvmet_ctrl *, ctrl)
__field(u16, ctrl_id)
__array(char, disk, DISK_NAME_LEN)
__field(int, qid)
__field(int, cid)
@ -112,7 +117,7 @@ TRACE_EVENT(nvmet_req_complete,
__field(u16, status)
),
TP_fast_assign(
__entry->ctrl = nvmet_req_to_ctrl(req);
__entry->ctrl_id = nvmet_req_to_ctrl_id(req);
__entry->qid = req->cq->qid;
__entry->cid = req->cqe->command_id;
__entry->result = le64_to_cpu(req->cqe->result.u64);
@ -120,7 +125,7 @@ TRACE_EVENT(nvmet_req_complete,
__assign_req_name(__entry->disk, req);
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
__print_ctrl_name(__entry->ctrl),
__print_ctrl_id(__entry->ctrl_id),
__print_disk_name(__entry->disk),
__entry->qid, __entry->cid, __entry->result, __entry->status)

View File

@ -20,7 +20,6 @@
#define NVMF_TRSVCID_SIZE 32
#define NVMF_TRADDR_SIZE 256
#define NVMF_TSAS_SIZE 256
#define NVMF_AUTH_HASH_LEN 64
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"