block: remove the gendisk argument to blk_execute_rq

Remove the gendisk aregument to blk_execute_rq and blk_execute_rq_nowait
given that it is unused now.  Also convert the boolean at_head parameter
to actually use the bool type while touching the prototype.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20211126121802.2090656-5-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2021-11-26 13:18:01 +01:00 committed by Jens Axboe
parent f3fa33acca
commit b84ba30b6c
21 changed files with 35 additions and 42 deletions

View File

@ -1153,7 +1153,6 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
/**
* blk_execute_rq_nowait - insert a request to I/O scheduler for execution
* @bd_disk: matching gendisk
* @rq: request to insert
* @at_head: insert request at head or tail of queue
* @done: I/O completion handler
@ -1165,8 +1164,7 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
* Note:
* This function will invoke @done directly if the queue is dead.
*/
void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
int at_head, rq_end_io_fn *done)
void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
{
WARN_ON(irqs_disabled());
WARN_ON(!blk_rq_is_passthrough(rq));
@ -1204,7 +1202,6 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
/**
* blk_execute_rq - insert a request into queue for execution
* @bd_disk: matching gendisk
* @rq: request to insert
* @at_head: insert request at head or tail of queue
*
@ -1213,14 +1210,13 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
* for execution and wait for completion.
* Return: The blk_status_t result provided to blk_mq_end_request().
*/
blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq,
int at_head)
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long hang_check;
rq->end_io_data = &wait;
blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq);
blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq);
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;

View File

@ -92,7 +92,7 @@ static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
goto out_unmap_bidi_rq;
bio = rq->bio;
blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
/*
* The assignments below don't make much sense, but are kept for

View File

@ -1015,7 +1015,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
rq->timeout = timeout;
/* insert request and run queue */
blk_execute_rq(NULL, rq, true);
blk_execute_rq(rq, true);
if (int_cmd->status) {
dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",

View File

@ -781,7 +781,7 @@ static int pd_special_command(struct pd_unit *disk,
req = blk_mq_rq_to_pdu(rq);
req->func = func;
blk_execute_rq(disk->gd, rq, 0);
blk_execute_rq(rq, false);
blk_mq_free_request(rq);
return 0;
}

View File

@ -722,7 +722,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (cgc->quiet)
rq->rq_flags |= RQF_QUIET;
blk_execute_rq(pd->bdev->bd_disk, rq, 0);
blk_execute_rq(rq, false);
if (scsi_req(rq)->result)
ret = -EIO;
out:

View File

@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
blk_execute_rq_nowait(NULL, rq, true, NULL);
blk_execute_rq_nowait(rq, true, NULL);
return 0;
@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
blk_execute_rq_nowait(NULL, rq, true, NULL);
blk_execute_rq_nowait(rq, true, NULL);
return 0;
}

View File

@ -384,7 +384,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
if (err)
goto out;
blk_execute_rq(vblk->disk, req, false);
blk_execute_rq(req, false);
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
blk_mq_free_request(req);

View File

@ -264,7 +264,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
blk_execute_rq(NULL, req, 0);
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
@ -657,7 +657,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(NULL, req, 0);
blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
blk_mq_free_request(req);
@ -726,7 +726,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
blk_execute_rq(NULL, req, 0);
blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
/* copy to user if data and response */
@ -2743,7 +2743,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
blk_execute_rq(NULL, req, 0);
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
*val = ret;
@ -2782,7 +2782,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(NULL, req, 0);
blk_execute_rq(req, false);
err = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
if (err) {

View File

@ -1056,7 +1056,7 @@ static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
{
blk_status_t status;
status = blk_execute_rq(disk, rq, at_head);
status = blk_execute_rq(rq, at_head);
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
return -EINTR;
if (nvme_req(rq)->status)
@ -1283,7 +1283,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)

View File

@ -1371,7 +1371,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
abort_req->end_io_data = NULL;
blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio);
blk_execute_rq_nowait(abort_req, false, abort_endio);
/*
* The aborted req will be completed on receiving the abort req.
@ -2416,9 +2416,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
blk_execute_rq_nowait(NULL, req, false,
opcode == nvme_admin_delete_cq ?
nvme_del_cq_end : nvme_del_queue_end);
blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
nvme_del_cq_end : nvme_del_queue_end);
return 0;
}

View File

@ -284,8 +284,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
schedule_work(&req->p.work);
} else {
rq->end_io_data = req;
blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0,
nvmet_passthru_req_done);
blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
}
if (ns)

View File

@ -60,7 +60,7 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
goto out_free_cmd;
bio = rq->bio;
blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
/*
* fill in all the output members

View File

@ -2040,7 +2040,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
req->timeout = 10 * HZ;
rq->retries = 5;
blk_execute_rq_nowait(NULL, req, 1, eh_lock_door_done);
blk_execute_rq_nowait(req, true, eh_lock_door_done);
}
/**

View File

@ -483,7 +483,7 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
start_time = jiffies;
blk_execute_rq(disk, rq, at_head);
blk_execute_rq(rq, at_head);
hdr->duration = jiffies_to_msecs(jiffies - start_time);
@ -620,7 +620,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
goto error;
}
blk_execute_rq(disk, rq, 0);
blk_execute_rq(rq, false);
err = req->result & 0xff; /* only 8 bit SCSI status */
if (err) {

View File

@ -241,7 +241,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
/*
* head injection *required* here otherwise quiesce won't work
*/
blk_execute_rq(NULL, req, 1);
blk_execute_rq(req, true);
/*
* Some devices (USB mass-storage in particular) may transfer

View File

@ -833,7 +833,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(NULL, srp->rq, at_head, sg_rq_end_io);
blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
return 0;
}

View File

@ -994,7 +994,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
rq->timeout = 60 * HZ;
bio = rq->bio;
blk_execute_rq(disk, rq, 0);
blk_execute_rq(rq, false);
if (scsi_req(rq)->result) {
struct scsi_sense_hdr sshdr;

View File

@ -581,7 +581,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
rq->retries = retries;
req->end_io_data = SRpnt;
blk_execute_rq_nowait(NULL, req, 1, st_scsi_execute_end);
blk_execute_rq_nowait(req, true, st_scsi_execute_end);
return 0;
}

View File

@ -677,7 +677,7 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
ufshpb_set_unmap_cmd(rq->cmd, rgn);
rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
hpb->stats.umap_req_cnt++;
}
@ -719,7 +719,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
map_req->rb.srgn_idx, mem_size);
rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
hpb->stats.map_req_cnt++;
return 0;

View File

@ -1005,7 +1005,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->timeout = PS_TIMEOUT_OTHER;
scsi_req(req)->retries = PS_RETRY;
blk_execute_rq_nowait(NULL, req, (cmd->sam_task_attr == TCM_HEAD_TAG),
blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
pscsi_req_done);
return 0;

View File

@ -924,10 +924,9 @@ int blk_rq_unmap_user(struct bio *);
int blk_rq_map_kern(struct request_queue *, struct request *, void *,
unsigned int, gfp_t);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct gendisk *, struct request *, int,
rq_end_io_fn *);
blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq,
int at_head);
void blk_execute_rq_nowait(struct request *rq, bool at_head,
rq_end_io_fn *end_io);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
struct req_iterator {
struct bvec_iter iter;