blk-mq: make sure active queue usage is held for bio_integrity_prep()

[ Upstream commit b0077e269f ]

blk_integrity_unregister() can come if queue usage counter isn't held
for one bio with integrity prepared, so this request may be completed with
calling profile->complete_fn, then kernel panic.

Another constraint is that bio_integrity_prep() needs to be called
before bio merge.

Fix the issue by:

- call bio_integrity_prep() with one queue usage counter grabbed reliably

- call bio_integrity_prep() before bio merge

Fixes: 900e080752 ("block: move queue enter logic into blk_mq_submit_bio()")
Reported-by: Yi Zhang <yi.zhang@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Tested-by: Yi Zhang <yi.zhang@redhat.com>
Link: https://lore.kernel.org/r/20231113035231.2708053-1-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Christoph Hellwig 2023-11-13 11:52:31 +08:00 committed by Greg Kroah-Hartman
parent 36fcea86f7
commit e9c309ded2
1 changed files with 38 additions and 37 deletions

View File

@ -2874,11 +2874,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
if (unlikely(bio_queue_enter(bio)))
return NULL;
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
goto queue_exit;
return NULL;
rq_qos_throttle(q, bio);
@ -2894,35 +2891,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
queue_exit:
blk_queue_exit(q);
return NULL;
}
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
/* return true if this @rq can be used for @bio */
static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
struct bio *bio)
{
struct request *rq;
enum hctx_type type, hctx_type;
enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
enum hctx_type hctx_type = rq->mq_hctx->type;
if (!plug)
return NULL;
rq = rq_list_peek(&plug->cached_rq);
if (!rq || rq->q != q)
return NULL;
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
*bio = NULL;
return NULL;
}
type = blk_mq_get_hctx_type((*bio)->bi_opf);
hctx_type = rq->mq_hctx->type;
if (type != hctx_type &&
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL;
return false;
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
return false;
/*
* If any qos ->throttle() end up blocking, we will have flushed the
@ -2930,12 +2915,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
* before we throttle.
*/
plug->cached_rq = rq_list_next(rq);
rq_qos_throttle(q, *bio);
rq_qos_throttle(rq->q, bio);
blk_mq_rq_time_init(rq, 0);
rq->cmd_flags = (*bio)->bi_opf;
rq->cmd_flags = bio->bi_opf;
INIT_LIST_HEAD(&rq->queuelist);
return rq;
return true;
}
static void bio_set_ioprio(struct bio *bio)
@ -2965,7 +2950,7 @@ void blk_mq_submit_bio(struct bio *bio)
struct blk_plug *plug = blk_mq_plug(bio);
const int is_sync = op_is_sync(bio->bi_opf);
struct blk_mq_hw_ctx *hctx;
struct request *rq;
struct request *rq = NULL;
unsigned int nr_segs = 1;
blk_status_t ret;
@ -2976,20 +2961,36 @@ void blk_mq_submit_bio(struct bio *bio)
return;
}
if (!bio_integrity_prep(bio))
return;
bio_set_ioprio(bio);
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
if (!rq) {
if (!bio)
if (plug) {
rq = rq_list_peek(&plug->cached_rq);
if (rq && rq->q != q)
rq = NULL;
}
if (rq) {
if (!bio_integrity_prep(bio))
return;
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq))
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
return;
if (blk_mq_can_use_cached_rq(rq, plug, bio))
goto done;
percpu_ref_get(&q->q_usage_counter);
} else {
if (unlikely(bio_queue_enter(bio)))
return;
if (!bio_integrity_prep(bio))
goto fail;
}
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq)) {
fail:
blk_queue_exit(q);
return;
}
done:
trace_block_getrq(bio);
rq_qos_track(q, rq, bio);