mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-12 21:57:43 +00:00
skd: Inline skd_process_request()
This patch does not change any functionality but makes the skd driver code more similar to that of other blk-mq kernel drivers. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
49f16e2f20
commit
c39c6c773d
1 changed files with 10 additions and 20 deletions
|
@ -478,8 +478,10 @@ static bool skd_fail_all(struct request_queue *q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void skd_process_request(struct request *req, bool last)
|
static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
|
const struct blk_mq_queue_data *mqd)
|
||||||
{
|
{
|
||||||
|
struct request *const req = mqd->rq;
|
||||||
struct request_queue *const q = req->q;
|
struct request_queue *const q = req->q;
|
||||||
struct skd_device *skdev = q->queuedata;
|
struct skd_device *skdev = q->queuedata;
|
||||||
struct skd_fitmsg_context *skmsg;
|
struct skd_fitmsg_context *skmsg;
|
||||||
|
@ -492,6 +494,11 @@ static void skd_process_request(struct request *req, bool last)
|
||||||
const u32 count = blk_rq_sectors(req);
|
const u32 count = blk_rq_sectors(req);
|
||||||
const int data_dir = rq_data_dir(req);
|
const int data_dir = rq_data_dir(req);
|
||||||
|
|
||||||
|
if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
|
||||||
|
return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
|
||||||
|
|
||||||
|
blk_mq_start_request(req);
|
||||||
|
|
||||||
WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
|
WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
|
||||||
tag, skd_max_queue_depth, q->nr_requests);
|
tag, skd_max_queue_depth, q->nr_requests);
|
||||||
|
|
||||||
|
@ -514,7 +521,7 @@ static void skd_process_request(struct request *req, bool last)
|
||||||
dev_dbg(&skdev->pdev->dev, "error Out\n");
|
dev_dbg(&skdev->pdev->dev, "error Out\n");
|
||||||
skd_end_request(skdev, blk_mq_rq_from_pdu(skreq),
|
skd_end_request(skdev, blk_mq_rq_from_pdu(skreq),
|
||||||
BLK_STS_RESOURCE);
|
BLK_STS_RESOURCE);
|
||||||
return;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
|
dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
|
||||||
|
@ -578,30 +585,13 @@ static void skd_process_request(struct request *req, bool last)
|
||||||
if (skd_max_req_per_msg == 1) {
|
if (skd_max_req_per_msg == 1) {
|
||||||
skd_send_fitmsg(skdev, skmsg);
|
skd_send_fitmsg(skdev, skmsg);
|
||||||
} else {
|
} else {
|
||||||
if (last ||
|
if (mqd->last ||
|
||||||
fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
|
fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
|
||||||
skd_send_fitmsg(skdev, skmsg);
|
skd_send_fitmsg(skdev, skmsg);
|
||||||
skdev->skmsg = NULL;
|
skdev->skmsg = NULL;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&skdev->lock, flags);
|
spin_unlock_irqrestore(&skdev->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
||||||
const struct blk_mq_queue_data *mqd)
|
|
||||||
{
|
|
||||||
struct request *req = mqd->rq;
|
|
||||||
struct request_queue *q = req->q;
|
|
||||||
struct skd_device *skdev = q->queuedata;
|
|
||||||
|
|
||||||
if (skdev->state == SKD_DRVR_STATE_ONLINE) {
|
|
||||||
blk_mq_start_request(req);
|
|
||||||
skd_process_request(req, mqd->last);
|
|
||||||
|
|
||||||
return BLK_STS_OK;
|
|
||||||
} else {
|
|
||||||
return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
|
|
||||||
}
|
|
||||||
|
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue