block: flush plug based on hardware and software queue order

commit 26fed4ac4e upstream.

We used to sort the plug list if we had multiple queues before dispatching
requests to the IO scheduler. This usually isn't needed, but for certain
workloads that interleave requests to disks, it's a less efficient to
process the plug list one-by-one if everything is interleaved.

Don't sort the list, but skip through it and flush out entries that have
the same target at the same time.

Fixes: df87eb0fce ("block: get rid of plug list sorting")
Reported-and-tested-by: Song Liu <song@kernel.org>
Reviewed-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Jens Axboe 2022-03-11 10:24:17 -07:00 committed by Greg Kroah-Hartman
parent b0c1d88b83
commit e19b0f8391
1 changed files with 28 additions and 31 deletions

View File

@ -2561,13 +2561,36 @@ static void __blk_mq_flush_plug_list(struct request_queue *q,
q->mq_ops->queue_rqs(&plug->mq_list);
}
static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
{
struct blk_mq_hw_ctx *this_hctx = NULL;
struct blk_mq_ctx *this_ctx = NULL;
struct request *requeue_list = NULL;
unsigned int depth = 0;
LIST_HEAD(list);
do {
struct request *rq = rq_list_pop(&plug->mq_list);
if (!this_hctx) {
this_hctx = rq->mq_hctx;
this_ctx = rq->mq_ctx;
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
rq_list_add(&requeue_list, rq);
continue;
}
list_add_tail(&rq->queuelist, &list);
depth++;
} while (!rq_list_empty(plug->mq_list));
plug->mq_list = requeue_list;
trace_block_unplug(this_hctx->queue, depth, !from_sched);
blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
}
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct blk_mq_hw_ctx *this_hctx;
struct blk_mq_ctx *this_ctx;
struct request *rq;
unsigned int depth;
LIST_HEAD(list);
if (rq_list_empty(plug->mq_list))
return;
@ -2603,35 +2626,9 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
return;
}
this_hctx = NULL;
this_ctx = NULL;
depth = 0;
do {
rq = rq_list_pop(&plug->mq_list);
if (!this_hctx) {
this_hctx = rq->mq_hctx;
this_ctx = rq->mq_ctx;
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
trace_block_unplug(this_hctx->queue, depth,
!from_schedule);
blk_mq_sched_insert_requests(this_hctx, this_ctx,
&list, from_schedule);
depth = 0;
this_hctx = rq->mq_hctx;
this_ctx = rq->mq_ctx;
}
list_add(&rq->queuelist, &list);
depth++;
blk_mq_dispatch_plug_list(plug, from_schedule);
} while (!rq_list_empty(plug->mq_list));
if (!list_empty(&list)) {
trace_block_unplug(this_hctx->queue, depth, !from_schedule);
blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
from_schedule);
}
}
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,