mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-05 00:20:32 +00:00
nvmet-rdma: fix null dereference under heavy load
commit 5cbab6303b
upstream.
Under heavy load if we don't have any pre-allocated rsps left, we
dynamically allocate a rsp, but we are not actually allocating memory
for nvme_completion (rsp->req.rsp). In such a case, accessing pointer
fields (req->rsp->status) in nvmet_req_init() will result in crash.
To fix this, allocate the memory for nvme_completion by calling
nvmet_rdma_alloc_rsp()
Fixes: 8407879c("nvmet-rdma:fix possible bogus dereference under heavy load")
Cc: <stable@vger.kernel.org>
Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Raju Rangoju <rajur@chelsio.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
8d1ee2d54d
commit
f63ee3bb14
1 changed files with 14 additions and 1 deletions
|
@ -137,6 +137,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||||
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
|
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||||
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
|
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
|
||||||
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
|
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
|
||||||
|
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
|
||||||
|
struct nvmet_rdma_rsp *r);
|
||||||
|
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
|
||||||
|
struct nvmet_rdma_rsp *r);
|
||||||
|
|
||||||
static struct nvmet_fabrics_ops nvmet_rdma_ops;
|
static struct nvmet_fabrics_ops nvmet_rdma_ops;
|
||||||
|
|
||||||
|
@ -175,9 +179,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
|
||||||
spin_unlock_irqrestore(&queue->rsps_lock, flags);
|
spin_unlock_irqrestore(&queue->rsps_lock, flags);
|
||||||
|
|
||||||
if (unlikely(!rsp)) {
|
if (unlikely(!rsp)) {
|
||||||
rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
|
int ret;
|
||||||
|
|
||||||
|
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
|
||||||
if (unlikely(!rsp))
|
if (unlikely(!rsp))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
|
||||||
|
if (unlikely(ret)) {
|
||||||
|
kfree(rsp);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
rsp->allocated = true;
|
rsp->allocated = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,6 +202,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (unlikely(rsp->allocated)) {
|
if (unlikely(rsp->allocated)) {
|
||||||
|
nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
|
||||||
kfree(rsp);
|
kfree(rsp);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue