diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 34e17ee799be..68846897d213 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set) return blk_mq_virtio_map_queues(set, vblk->vdev, 0); } +#ifdef CONFIG_VIRTIO_BLK_SCSI +static void virtblk_initialize_rq(struct request *req) +{ + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); + + scsi_req_init(&vbr->sreq); +} +#endif + static const struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .complete = virtblk_request_done, .init_request = virtblk_init_request, +#ifdef CONFIG_VIRTIO_BLK_SCSI + .initialize_rq_fn = virtblk_initialize_rq, +#endif .map_queues = virtblk_map_queues, }; diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 81e18f9628d0..a7355ab3bb22 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -1328,6 +1328,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); struct scsi_request *req = scsi_req(rq); + scsi_req_init(req); memset(req->cmd, 0, BLK_MAX_CDB); if (rq_data_dir(rq) == READ) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 5a14cc7f28ee..37f9039bb9ca 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1249,6 +1249,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) goto out; } + __nvme_revalidate_disk(disk, id); nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid); if (!uuid_equal(&ns->uuid, &uuid) || memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) || diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 87bac27ec64b..0ebb539f3bd3 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1614,12 +1614,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) /* * reconnecting state means transport disruption, which * can take a long time and even might fail permanently, - * so we can't let incoming I/O be requeued forever. - * fail it fast to allow upper layers a chance to - * failover. + * fail fast to give upper layers a chance to failover. + * deleting state means that the ctrl will never accept + * commands again, fail it permanently. */ - if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) + if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING || + queue->ctrl->ctrl.state == NVME_CTRL_DELETING) { + nvme_req(rq)->status = NVME_SC_ABORT_REQ; return BLK_STS_IOERR; + } return BLK_STS_RESOURCE; /* try again later */ } }