blk-mq: add a new blk_mq_complete_request_remote API

This is a variant of blk_mq_complete_request_remote that only completes
the request if it needs to be bounced to another CPU or a softirq.  If
the request can be completed locally the function returns false and lets
the driver complete it without requring and indirect function call.

Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2020-06-11 08:44:50 +02:00 committed by Jens Axboe
parent 963395269c
commit 40d09b53bf
2 changed files with 32 additions and 24 deletions

View File

@ -632,8 +632,11 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
return 0;
}
static void __blk_mq_complete_request(struct request *rq)
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
/*
* For most of single queue controllers, there is only one irq vector
* for handling I/O completion, and the only irq's affinity is set
@ -649,11 +652,6 @@ static void __blk_mq_complete_request(struct request *rq)
rq->q->mq_ops->complete(rq);
}
static void __blk_mq_complete_request_remote(void *data)
{
__blk_mq_complete_request(data);
}
static inline bool blk_mq_complete_need_ipi(struct request *rq)
{
int cpu = raw_smp_processor_id();
@ -672,6 +670,32 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
return cpu_online(rq->mq_ctx->cpu);
}
bool blk_mq_complete_request_remote(struct request *rq)
{
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
/*
* For a polled request, always complete locallly, it's pointless
* to redirect the completion.
*/
if (rq->cmd_flags & REQ_HIPRI)
return false;
if (blk_mq_complete_need_ipi(rq)) {
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
} else {
if (rq->q->nr_hw_queues > 1)
return false;
blk_mq_trigger_softirq(rq);
}
return true;
}
EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
/**
* blk_mq_complete_request - end I/O on a request
* @rq: the request being processed
@ -681,25 +705,8 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
**/
void blk_mq_complete_request(struct request *rq)
{
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
/*
* For a polled request, always complete locallly, it's pointless
* to redirect the completion.
*/
if (rq->cmd_flags & REQ_HIPRI) {
if (!blk_mq_complete_request_remote(rq))
rq->q->mq_ops->complete(rq);
return;
}
if (blk_mq_complete_need_ipi(rq)) {
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
} else {
__blk_mq_complete_request(rq);
}
}
EXPORT_SYMBOL(blk_mq_complete_request);

View File

@ -504,6 +504,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
bool blk_mq_complete_request_remote(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio, unsigned int nr_segs);
bool blk_mq_queue_stopped(struct request_queue *q);