From 39bbee4e549fbc358b2ef9137c4bf459abd164fb Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 16 Aug 2016 09:24:39 +0100 Subject: [PATCH 01/11] nvme-rdma: initialize ret to zero to avoid returning garbage ret is not initialized so it contains garbage. Ensure garbage is not returned by initializing rc to 0. Signed-off-by: Colin Ian King Signed-off-by: Sagi Grimberg --- drivers/nvme/host/rdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 8d2875b4c56d..9c69393f6d1f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1319,7 +1319,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) { struct nvme_rdma_ctrl *ctrl = queue->ctrl; - int ret; + int ret = 0; /* Own the controller deletion */ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) From 3256aaef5e9a851f6be47656868020726e102187 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vincent=20Stehl=C3=A9?= Date: Tue, 16 Aug 2016 15:11:25 +0200 Subject: [PATCH 02/11] nvmet-rdma: Fix use after free MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid dereferencing the queue pointer in nvmet_rdma_release_queue_work() after it has been freed by nvmet_rdma_free_queue(). Fixes: d8f7750a08968b10 ("nvmet-rdma: Correctly handle RDMA device hot removal") Signed-off-by: Vincent StehlĂ© Cc: Sagi Grimberg Cc: Christoph Hellwig Signed-off-by: Sagi Grimberg --- drivers/nvme/target/rdma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index b4d648536c3e..5de8d0a0db58 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -978,10 +978,11 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w) container_of(w, struct nvmet_rdma_queue, release_work); struct rdma_cm_id *cm_id = queue->cm_id; struct nvmet_rdma_device *dev = queue->dev; + enum nvmet_rdma_queue_state state = queue->state; nvmet_rdma_free_queue(queue); - if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL) + if (state != NVMET_RDMA_IN_DEVICE_REMOVAL) rdma_destroy_id(cm_id); kref_put(&dev->ref, nvmet_rdma_free_dev); From b825b44c4ef4dabfdaf4e82db2263d377ac45d67 Mon Sep 17 00:00:00 2001 From: Jay Freyensee Date: Wed, 17 Aug 2016 15:00:25 -0700 Subject: [PATCH 03/11] nvmet-rdma: +1 to *queue_size from hsqsize/hrqsize The host will be sending sqsize 0-based hsqsize value, the target need to be adjusted as well. Signed-off-by: Jay Freyensee Reviewed-by: Sagi Grimberg Signed-off-by: Sagi Grimberg --- drivers/nvme/target/rdma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 5de8d0a0db58..1cbe6e053b5b 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1004,10 +1004,10 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, queue->host_qid = le16_to_cpu(req->qid); /* - * req->hsqsize corresponds to our recv queue size + * req->hsqsize corresponds to our recv queue size plus 1 * req->hrqsize corresponds to our send queue size */ - queue->recv_queue_size = le16_to_cpu(req->hsqsize); + queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; queue->send_queue_size = le16_to_cpu(req->hrqsize); if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH) From f994d9dc28bc27353acde2caaf718222d92a3e24 Mon Sep 17 00:00:00 2001 From: Jay Freyensee Date: Wed, 17 Aug 2016 15:00:26 -0700 Subject: [PATCH 04/11] fabrics: define admin sqsize min default, per spec Upon admin queue connect(), the rdma qp was being set based on NVMF_AQ_DEPTH. However, the fabrics layer was using the sqsize field value set for I/O queues for the admin queue, which threw the nvme layer and rdma layer off-whack: root@fedora23-fabrics-host1 nvmf]# dmesg [ 3507.798642] nvme_fabrics: nvmf_connect_admin_queue():admin sqsize being sent is: 128 [ 3507.798858] nvme nvme0: creating 16 I/O queues. [ 3507.896407] nvme nvme0: new ctrl: NQN "nullside-nqn", addr 192.168.1.3:4420 Thus, to have a different admin queue value, we use NVMF_AQ_DEPTH for connect() and RDMA private data as the minimum depth specified in the NVMe-over-Fabrics 1.0 spec (and in that RDMA private data we treat hrqsize as 1's-based value, per current understanding of the fabrics spec). Reported-by: Daniel Verkamp Signed-off-by: Jay Freyensee Reviewed-by: Daniel Verkamp Signed-off-by: Sagi Grimberg --- drivers/nvme/host/fabrics.c | 9 ++++++++- drivers/nvme/host/rdma.c | 13 +++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index dc996761042f..020302c6ea57 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -363,7 +363,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) cmd.connect.opcode = nvme_fabrics_command; cmd.connect.fctype = nvme_fabrics_type_connect; cmd.connect.qid = 0; - cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize); + + /* + * fabrics spec sets a minimum of depth 32 for admin queue, + * so set the queue with this depth always until + * justification otherwise. + */ + cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1); + /* * Set keep-alive timeout in seconds granularity (ms * 1000) * and add a grace period for controller kato enforcement diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 9c69393f6d1f..d44809e6b03f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1278,8 +1278,17 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); - priv.hrqsize = cpu_to_le16(queue->queue_size); - priv.hsqsize = cpu_to_le16(queue->queue_size); + /* + * set the admin queue depth to the minimum size + * specified by the Fabrics standard. + */ + if (priv.qid == 0) { + priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH); + priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1); + } else { + priv.hrqsize = cpu_to_le16(queue->queue_size); + priv.hsqsize = cpu_to_le16(queue->queue_size); + } ret = rdma_connect(queue->cm_id, ¶m); if (ret) { From c5af8654c422cfdd8480be3a244748e18cace6c5 Mon Sep 17 00:00:00 2001 From: Jay Freyensee Date: Wed, 17 Aug 2016 15:00:27 -0700 Subject: [PATCH 05/11] nvme-rdma: fix sqsize/hsqsize per spec Per NVMe-over-Fabrics 1.0 spec, sqsize is represented as a 0-based value. Also per spec, the RDMA binding values shall be set to sqsize, which makes hsqsize 0-based values. Thus, the sqsize during NVMf connect() is now: [root@fedora23-fabrics-host1 for-48]# dmesg [ 318.720645] nvme_fabrics: nvmf_connect_admin_queue(): sqsize for admin queue: 31 [ 318.720884] nvme nvme0: creating 16 I/O queues. [ 318.810114] nvme_fabrics: nvmf_connect_io_queue(): sqsize for i/o queue: 127 Finally, current interpretation implies hrqsize is 1's based so set it appropriately. Reported-by: Daniel Verkamp Signed-off-by: Jay Freyensee Signed-off-by: Sagi Grimberg --- drivers/nvme/host/rdma.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index d44809e6b03f..c133256fd745 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -645,7 +645,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) int i, ret; for (i = 1; i < ctrl->queue_count; i++) { - ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize); + ret = nvme_rdma_init_queue(ctrl, i, + ctrl->ctrl.opts->queue_size); if (ret) { dev_info(ctrl->ctrl.device, "failed to initialize i/o queue: %d\n", ret); @@ -1286,8 +1287,13 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH); priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1); } else { + /* + * current interpretation of the fabrics spec + * is at minimum you make hrqsize sqsize+1, or a + * 1's based representation of sqsize. + */ priv.hrqsize = cpu_to_le16(queue->queue_size); - priv.hsqsize = cpu_to_le16(queue->queue_size); + priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); } ret = rdma_connect(queue->cm_id, ¶m); @@ -1825,7 +1831,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_rdma_mq_ops; - ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize; + ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; ctrl->tag_set.reserved_tags = 1; /* fabric connect */ ctrl->tag_set.numa_node = NUMA_NO_NODE; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; @@ -1923,7 +1929,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, spin_lock_init(&ctrl->lock); ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ - ctrl->ctrl.sqsize = opts->queue_size; + ctrl->ctrl.sqsize = opts->queue_size - 1; ctrl->ctrl.kato = opts->kato; ret = -ENOMEM; From eadb7cf44105ae8250f0d638dc880c3ed511c4e2 Mon Sep 17 00:00:00 2001 From: Jay Freyensee Date: Wed, 17 Aug 2016 15:00:28 -0700 Subject: [PATCH 06/11] nvme-loop: set sqsize to 0-based value, per spec Signed-off-by: Jay Freyensee Signed-off-by: Sagi Grimberg --- drivers/nvme/target/loop.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 7affd40a6b33..395e60dad835 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -556,7 +556,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ctrl->tag_set.ops = &nvme_loop_mq_ops; - ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize; + ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; ctrl->tag_set.reserved_tags = 1; /* fabric connect */ ctrl->tag_set.numa_node = NUMA_NO_NODE; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; @@ -620,7 +620,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, ret = -ENOMEM; - ctrl->ctrl.sqsize = opts->queue_size; + ctrl->ctrl.sqsize = opts->queue_size - 1; ctrl->ctrl.kato = opts->kato; ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), From 7a665d2f60b457c0d77b3e4f01e21c55ffc57069 Mon Sep 17 00:00:00 2001 From: Daniel Verkamp Date: Tue, 28 Jun 2016 11:20:23 -0700 Subject: [PATCH 07/11] nvme-fabrics: change NQN UUID to big-endian format NVM Express 1.2.1 section 7.9, NVMe Qualified Names, specifies that the UUID format of NQN uses a UUID based on RFC 4122. RFC 4122 specifies that the UUID is encoded in big-endian byte order. Switch the NVMe over Fabrics host ID field from little-endian UUID to big-endian UUID to match the specification. Signed-off-by: Daniel Verkamp Reviewed-by: Jay Freyensee Signed-off-by: Sagi Grimberg --- drivers/nvme/host/fabrics.c | 10 +++++----- drivers/nvme/host/fabrics.h | 2 +- include/linux/nvme.h | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 020302c6ea57..be0b1067c9fa 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -56,7 +56,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn) kref_init(&host->ref); memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE); - uuid_le_gen(&host->id); + uuid_be_gen(&host->id); list_add_tail(&host->list, &nvmf_hosts); out_unlock: @@ -73,9 +73,9 @@ static struct nvmf_host *nvmf_host_default(void) return NULL; kref_init(&host->ref); - uuid_le_gen(&host->id); + uuid_be_gen(&host->id); snprintf(host->nqn, NVMF_NQN_SIZE, - "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id); + "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id); mutex_lock(&nvmf_hosts_mutex); list_add_tail(&host->list, &nvmf_hosts); @@ -382,7 +382,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) if (!data) return -ENOMEM; - memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le)); + memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be)); data->cntlid = cpu_to_le16(0xffff); strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); @@ -441,7 +441,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) if (!data) return -ENOMEM; - memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le)); + memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be)); data->cntlid = cpu_to_le16(ctrl->cntlid); strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 89df52c8be97..46e460aee52d 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -34,7 +34,7 @@ struct nvmf_host { struct kref ref; struct list_head list; char nqn[NVMF_NQN_SIZE]; - uuid_le id; + uuid_be id; }; /** diff --git a/include/linux/nvme.h b/include/linux/nvme.h index d8b37bab2887..7676557ce357 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -794,7 +794,7 @@ struct nvmf_connect_command { }; struct nvmf_connect_data { - uuid_le hostid; + uuid_be hostid; __le16 cntlid; char resv4[238]; char subsysnqn[NVMF_NQN_FIELD_LEN]; From 98096d8a787f05b1afe3869aa01e84981915c81d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 18 Aug 2016 11:16:35 -0700 Subject: [PATCH 08/11] nvme-fabrics: get a reference when reusing a nvme_host structure Without this we'll get a use after free after connecting two controller using the same hostnqn and then disconnecting one of them. Signed-off-by: Christoph Hellwig Reviewed-by: Jay Freyensee Signed-off-by: Sagi Grimberg --- drivers/nvme/host/fabrics.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index be0b1067c9fa..4eff49174466 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -47,8 +47,10 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn) mutex_lock(&nvmf_hosts_mutex); host = __nvmf_host_find(hostnqn); - if (host) + if (host) { + kref_get(&host->ref); goto out_unlock; + } host = kmalloc(sizeof(*host), GFP_KERNEL); if (!host) From aa71987472a974f4f6dc4be377720564079ef42e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 18 Aug 2016 11:16:36 -0700 Subject: [PATCH 09/11] nvme: fabrics drivers don't need the nvme-pci driver So select the NVME_CORE symbol instead of depending on BLK_DEV_NVME. Signed-off-by: Christoph Hellwig Reviewed-by: Jay Freyensee Signed-off-by: Sagi Grimberg --- drivers/nvme/host/Kconfig | 2 +- drivers/nvme/target/Kconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index db39d53cdfb9..0c644f7bdf80 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -31,7 +31,7 @@ config NVME_FABRICS config NVME_RDMA tristate "NVM Express over Fabrics RDMA host driver" depends on INFINIBAND - depends on BLK_DEV_NVME + select NVME_CORE select NVME_FABRICS select SG_POOL help diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index a5c31cbeb481..3a5b9d0576cb 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -15,8 +15,8 @@ config NVME_TARGET config NVME_TARGET_LOOP tristate "NVMe loopback device support" - depends on BLK_DEV_NVME depends on NVME_TARGET + select NVME_CORE select NVME_FABRICS select SG_POOL help From f5b7b559e14881b27d76f9c97817ec82bfc48827 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 24 Aug 2016 12:25:56 +0300 Subject: [PATCH 10/11] nvme-rdma: Get rid of duplicate variable We already have need_inval in ib_mr, lets use that instead. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig --- drivers/nvme/host/rdma.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index c133256fd745..881ac28575ef 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -77,7 +77,6 @@ struct nvme_rdma_request { u32 num_sge; int nents; bool inline_data; - bool need_inval; struct ib_reg_wr reg_wr; struct ib_cqe reg_cqe; struct nvme_rdma_queue *queue; @@ -286,7 +285,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq) struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); int ret = 0; - if (!req->need_inval) + if (!req->mr->need_inval) goto out; ib_dereg_mr(req->mr); @@ -298,7 +297,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq) req->mr = NULL; } - req->need_inval = false; + req->mr->need_inval = false; out: return ret; @@ -850,7 +849,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, if (!blk_rq_bytes(rq)) return; - if (req->need_inval) { + if (req->mr->need_inval) { res = nvme_rdma_inv_rkey(queue, req); if (res < 0) { dev_err(ctrl->ctrl.device, @@ -936,7 +935,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; - req->need_inval = true; + req->mr->need_inval = true; sg->addr = cpu_to_le64(req->mr->iova); put_unaligned_le24(req->mr->length, sg->length); @@ -959,7 +958,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, req->num_sge = 1; req->inline_data = false; - req->need_inval = false; + req->mr->need_inval = false; c->common.flags |= NVME_CMD_SGL_METABUF; @@ -1146,7 +1145,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && wc->ex.invalidate_rkey == req->mr->rkey) - req->need_inval = false; + req->mr->need_inval = false; blk_mq_complete_request(rq, status); @@ -1476,7 +1475,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) flush = true; ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, - req->need_inval ? &req->reg_wr.wr : NULL, flush); + req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); if (ret) { nvme_rdma_unmap_data(queue, rq); goto err; From 4d8c6a7946d53648d9ed0e3852a1c81ce07d40db Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Fri, 26 Aug 2016 00:37:52 +0300 Subject: [PATCH 11/11] nvme-rdma: Get rid of redundant defines Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig --- drivers/nvme/host/rdma.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 881ac28575ef..ab545fb347a0 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -43,10 +43,6 @@ #define NVME_RDMA_MAX_INLINE_SEGMENTS 1 -#define NVME_RDMA_MAX_PAGES_PER_MR 512 - -#define NVME_RDMA_DEF_RECONNECT_DELAY 20 - /* * We handle AEN commands ourselves and don't even let the * block layer know about them.