block: pass a queue_limits argument to blk_alloc_queue

Pass a queue_limits to blk_alloc_queue and apply it after validating and
capping the values using blk_validate_limits.  This will allow allocating
queues with valid queue limits instead of setting the values one at a
time later.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20240213073425.1621680-9-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2024-02-13 08:34:18 +01:00 committed by Jens Axboe
parent ff956a3be9
commit ad751ba1f8
4 changed files with 26 additions and 14 deletions

View File

@ -394,24 +394,34 @@ static void blk_timeout_work(struct work_struct *work)
{
}
struct request_queue *blk_alloc_queue(int node_id)
struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
{
struct request_queue *q;
int error;
q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
node_id);
if (!q)
return NULL;
return ERR_PTR(-ENOMEM);
q->last_merge = NULL;
q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
if (q->id < 0)
if (q->id < 0) {
error = q->id;
goto fail_q;
}
q->stats = blk_alloc_queue_stats();
if (!q->stats)
if (!q->stats) {
error = -ENOMEM;
goto fail_id;
}
error = blk_set_default_limits(lim);
if (error)
goto fail_stats;
q->limits = *lim;
q->node = node_id;
@ -436,12 +446,12 @@ struct request_queue *blk_alloc_queue(int node_id)
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
*/
if (percpu_ref_init(&q->q_usage_counter,
error = percpu_ref_init(&q->q_usage_counter,
blk_queue_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
if (error)
goto fail_stats;
blk_set_default_limits(&q->limits);
q->nr_requests = BLKDEV_DEFAULT_RQ;
return q;
@ -452,7 +462,7 @@ fail_id:
ida_free(&blk_queue_ida, q->id);
fail_q:
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
return ERR_PTR(error);
}
/**

View File

@ -4086,12 +4086,13 @@ void blk_mq_release(struct request_queue *q)
static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
void *queuedata)
{
struct queue_limits lim = { };
struct request_queue *q;
int ret;
q = blk_alloc_queue(set->numa_node);
if (!q)
return ERR_PTR(-ENOMEM);
q = blk_alloc_queue(&lim, set->numa_node);
if (IS_ERR(q))
return q;
q->queuedata = queuedata;
ret = blk_mq_init_allocated_queue(set, q);
if (ret) {

View File

@ -448,7 +448,7 @@ static inline void bio_release_page(struct bio *bio, struct page *page)
unpin_user_page(page);
}
struct request_queue *blk_alloc_queue(int node_id);
struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);

View File

@ -1393,11 +1393,12 @@ out_free_disk:
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
{
struct queue_limits lim = { };
struct request_queue *q;
struct gendisk *disk;
q = blk_alloc_queue(node);
if (!q)
q = blk_alloc_queue(&lim, node);
if (IS_ERR(q))
return NULL;
disk = __alloc_disk_node(q, node, lkclass);