blk-mq: Add blk_mq_alloc_map_and_rqs()

Add a function to combine allocating tags and the associated requests,
and factor out common patterns to use this new function.

Some function only call blk_mq_alloc_map_and_rqs() now, but more
functionality will be added later.

Also make blk_mq_alloc_rq_map() and blk_mq_alloc_rqs() static since they
are only used in blk-mq.c, and finally rename some functions for
conciseness and consistency with other function names:
- __blk_mq_alloc_map_and_{request -> rqs}()
- blk_mq_alloc_{map_and_requests -> set_map_and_rqs}()

Suggested-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/1633429419-228500-11-git-send-email-john.garry@huawei.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
John Garry 2021-10-05 18:23:35 +08:00 committed by Jens Axboe
parent a7e7388dce
commit 63064be150
4 changed files with 42 additions and 53 deletions

View File

@ -519,21 +519,12 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
struct blk_mq_hw_ctx *hctx, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
struct blk_mq_tag_set *set = q->tag_set; hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
int ret; q->nr_requests);
hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
set->reserved_tags, set->flags);
if (!hctx->sched_tags) if (!hctx->sched_tags)
return -ENOMEM; return -ENOMEM;
return 0;
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
if (ret) {
blk_mq_free_rq_map(hctx->sched_tags, set->flags);
hctx->sched_tags = NULL;
}
return ret;
} }
/* called in queue's release handler, tagset has gone away */ /* called in queue's release handler, tagset has gone away */

View File

@ -592,7 +592,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth > tags->nr_tags) { if (tdepth > tags->nr_tags) {
struct blk_mq_tag_set *set = hctx->queue->tag_set; struct blk_mq_tag_set *set = hctx->queue->tag_set;
struct blk_mq_tags *new; struct blk_mq_tags *new;
bool ret;
if (!can_grow) if (!can_grow)
return -EINVAL; return -EINVAL;
@ -604,15 +603,9 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth > MAX_SCHED_RQ) if (tdepth > MAX_SCHED_RQ)
return -EINVAL; return -EINVAL;
new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
tags->nr_reserved_tags, set->flags);
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
if (ret) {
blk_mq_free_rq_map(new, set->flags);
return -ENOMEM;
}
blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
blk_mq_free_rq_map(*tagsptr, set->flags); blk_mq_free_rq_map(*tagsptr, set->flags);

View File

@ -2392,11 +2392,11 @@ void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags)
blk_mq_free_tags(tags, flags); blk_mq_free_tags(tags, flags);
} }
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
unsigned int hctx_idx, unsigned int hctx_idx,
unsigned int nr_tags, unsigned int nr_tags,
unsigned int reserved_tags, unsigned int reserved_tags,
unsigned int flags) unsigned int flags)
{ {
struct blk_mq_tags *tags; struct blk_mq_tags *tags;
int node; int node;
@ -2444,8 +2444,9 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
return 0; return 0;
} }
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
unsigned int hctx_idx, unsigned int depth) struct blk_mq_tags *tags,
unsigned int hctx_idx, unsigned int depth)
{ {
unsigned int i, j, entries_per_page, max_order = 4; unsigned int i, j, entries_per_page, max_order = 4;
size_t rq_size, left; size_t rq_size, left;
@ -2856,25 +2857,34 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
} }
} }
static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
int hctx_idx) unsigned int hctx_idx,
unsigned int depth)
{ {
unsigned int flags = set->flags; struct blk_mq_tags *tags;
int ret = 0; int ret;
set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags,
set->queue_depth, set->reserved_tags, flags); set->flags);
if (!set->tags[hctx_idx]) if (!tags)
return false; return NULL;
ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
set->queue_depth); if (ret) {
if (!ret) blk_mq_free_rq_map(tags, set->flags);
return true; return NULL;
}
blk_mq_free_rq_map(set->tags[hctx_idx], flags); return tags;
set->tags[hctx_idx] = NULL; }
return false;
static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
int hctx_idx)
{
set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
set->queue_depth);
return set->tags[hctx_idx];
} }
static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
@ -2919,7 +2929,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx_idx = set->map[j].mq_map[i]; hctx_idx = set->map[j].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */ /* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] && if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_map_and_request(set, hctx_idx)) { !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
/* /*
* If tags initialization fail for some hctx, * If tags initialization fail for some hctx,
* that hctx won't be brought online. In this * that hctx won't be brought online. In this
@ -3352,7 +3362,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
int i; int i;
for (i = 0; i < set->nr_hw_queues; i++) { for (i = 0; i < set->nr_hw_queues; i++) {
if (!__blk_mq_alloc_map_and_request(set, i)) if (!__blk_mq_alloc_map_and_rqs(set, i))
goto out_unwind; goto out_unwind;
cond_resched(); cond_resched();
} }
@ -3371,7 +3381,7 @@ out_unwind:
* may reduce the depth asked for, if memory is tight. set->queue_depth * may reduce the depth asked for, if memory is tight. set->queue_depth
* will be updated to reflect the allocated depth. * will be updated to reflect the allocated depth.
*/ */
static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set) static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
{ {
unsigned int depth; unsigned int depth;
int err; int err;
@ -3537,7 +3547,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (ret) if (ret)
goto out_free_mq_map; goto out_free_mq_map;
ret = blk_mq_alloc_map_and_requests(set); ret = blk_mq_alloc_set_map_and_rqs(set);
if (ret) if (ret)
goto out_free_mq_map; goto out_free_mq_map;

View File

@ -55,13 +55,8 @@ void blk_mq_put_rq_ref(struct request *rq);
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx); unsigned int hctx_idx);
void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
unsigned int hctx_idx, unsigned int hctx_idx, unsigned int depth);
unsigned int nr_tags,
unsigned int reserved_tags,
unsigned int flags);
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx, unsigned int depth);
/* /*
* Internal helpers for request insertion into sw queues * Internal helpers for request insertion into sw queues