block: pass a queue_limits argument to blk_mq_init_queue

Pass a queue_limits to blk_mq_init_queue and apply it if non-NULL.  This
will allow allocating queues with valid queue limits instead of setting
the values one at a time later.

Also rename the function to blk_mq_alloc_queue as that is a much better
name for a function that allocates a queue and always pass the queuedata
argument instead of having a separate version for the extra argument.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: John Garry <john.g.garry@oracle.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20240213073425.1621680-10-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2024-02-13 08:34:19 +01:00 committed by Jens Axboe
parent ad751ba1f8
commit 9ac4dd8c47
7 changed files with 17 additions and 21 deletions

View File

@ -4083,14 +4083,14 @@ void blk_mq_release(struct request_queue *q)
blk_mq_sysfs_deinit(q); blk_mq_sysfs_deinit(q);
} }
static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
void *queuedata) struct queue_limits *lim, void *queuedata)
{ {
struct queue_limits lim = { }; struct queue_limits default_lim = { };
struct request_queue *q; struct request_queue *q;
int ret; int ret;
q = blk_alloc_queue(&lim, set->numa_node); q = blk_alloc_queue(lim ? lim : &default_lim, set->numa_node);
if (IS_ERR(q)) if (IS_ERR(q))
return q; return q;
q->queuedata = queuedata; q->queuedata = queuedata;
@ -4101,20 +4101,15 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
} }
return q; return q;
} }
EXPORT_SYMBOL(blk_mq_alloc_queue);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
return blk_mq_init_queue_data(set, NULL);
}
EXPORT_SYMBOL(blk_mq_init_queue);
/** /**
* blk_mq_destroy_queue - shutdown a request queue * blk_mq_destroy_queue - shutdown a request queue
* @q: request queue to shutdown * @q: request queue to shutdown
* *
* This shuts down a request queue allocated by blk_mq_init_queue(). All future * This shuts down a request queue allocated by blk_mq_alloc_queue(). All future
* requests will be failed with -ENODEV. The caller is responsible for dropping * requests will be failed with -ENODEV. The caller is responsible for dropping
* the reference from blk_mq_init_queue() by calling blk_put_queue(). * the reference from blk_mq_alloc_queue() by calling blk_put_queue().
* *
* Context: can sleep * Context: can sleep
*/ */
@ -4141,7 +4136,7 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
struct request_queue *q; struct request_queue *q;
struct gendisk *disk; struct gendisk *disk;
q = blk_mq_init_queue_data(set, queuedata); q = blk_mq_alloc_queue(set, NULL, queuedata);
if (IS_ERR(q)) if (IS_ERR(q))
return ERR_CAST(q); return ERR_CAST(q);

View File

@ -383,7 +383,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
if (blk_mq_alloc_tag_set(set)) if (blk_mq_alloc_tag_set(set))
goto out_tag_set; goto out_tag_set;
q = blk_mq_init_queue(set); q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(q)) { if (IS_ERR(q)) {
ret = PTR_ERR(q); ret = PTR_ERR(q);
goto out_queue; goto out_queue;

View File

@ -1516,7 +1516,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
goto put_dev; goto put_dev;
} }
anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset); anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
if (IS_ERR(anv->ctrl.admin_q)) { if (IS_ERR(anv->ctrl.admin_q)) {
ret = -ENOMEM; ret = -ENOMEM;
goto put_dev; goto put_dev;

View File

@ -4372,14 +4372,14 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ret) if (ret)
return ret; return ret;
ctrl->admin_q = blk_mq_init_queue(set); ctrl->admin_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->admin_q)) { if (IS_ERR(ctrl->admin_q)) {
ret = PTR_ERR(ctrl->admin_q); ret = PTR_ERR(ctrl->admin_q);
goto out_free_tagset; goto out_free_tagset;
} }
if (ctrl->ops->flags & NVME_F_FABRICS) { if (ctrl->ops->flags & NVME_F_FABRICS) {
ctrl->fabrics_q = blk_mq_init_queue(set); ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->fabrics_q)) { if (IS_ERR(ctrl->fabrics_q)) {
ret = PTR_ERR(ctrl->fabrics_q); ret = PTR_ERR(ctrl->fabrics_q);
goto out_cleanup_admin_q; goto out_cleanup_admin_q;
@ -4443,7 +4443,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
return ret; return ret;
if (ctrl->ops->flags & NVME_F_FABRICS) { if (ctrl->ops->flags & NVME_F_FABRICS) {
ctrl->connect_q = blk_mq_init_queue(set); ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->connect_q)) { if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q); ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set; goto out_free_tag_set;

View File

@ -332,7 +332,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sg_reserved_size = INT_MAX; sdev->sg_reserved_size = INT_MAX;
q = blk_mq_init_queue(&sdev->host->tag_set); q = blk_mq_alloc_queue(&sdev->host->tag_set, NULL, NULL);
if (IS_ERR(q)) { if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so /* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */ * have to free and put manually here */

View File

@ -10592,7 +10592,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0) if (err < 0)
goto out_remove_scsi_host; goto out_remove_scsi_host;
hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
if (IS_ERR(hba->tmf_queue)) { if (IS_ERR(hba->tmf_queue)) {
err = PTR_ERR(hba->tmf_queue); err = PTR_ERR(hba->tmf_queue);
goto free_tmf_tag_set; goto free_tmf_tag_set;

View File

@ -692,7 +692,8 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
}) })
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
struct lock_class_key *lkclass); struct lock_class_key *lkclass);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
struct queue_limits *lim, void *queuedata);
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q); struct request_queue *q);
void blk_mq_destroy_queue(struct request_queue *); void blk_mq_destroy_queue(struct request_queue *);