diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 1b2829e99dad..7d9b15f0dbd5 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -316,14 +316,12 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq) static void bfqg_get(struct bfq_group *bfqg) { - bfqg->ref++; + refcount_inc(&bfqg->ref); } static void bfqg_put(struct bfq_group *bfqg) { - bfqg->ref--; - - if (bfqg->ref == 0) + if (refcount_dec_and_test(&bfqg->ref)) kfree(bfqg); } @@ -530,7 +528,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q, } /* see comments in bfq_bic_update_cgroup for why refcounting */ - bfqg_get(bfqg); + refcount_set(&bfqg->ref, 1); return &bfqg->pd; } diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 41aa151ccc22..466e4865ace6 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -928,7 +928,7 @@ struct bfq_group { char blkg_path[128]; /* reference counter (see comments in bfq_bic_update_cgroup) */ - int ref; + refcount_t ref; /* Is bfq_group still online? */ bool online; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ce6a2b7d3dfb..4c94a6560f62 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1455,6 +1455,10 @@ int blkcg_activate_policy(struct request_queue *q, list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) pol->pd_init_fn(blkg->pd[pol->plid]); + if (pol->pd_online_fn) + list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) + pol->pd_online_fn(blkg->pd[pol->plid]); + __set_bit(pol->plid, q->blkcg_pols); ret = 0; diff --git a/block/blk-mq.c b/block/blk-mq.c index 2c49b4151da1..9d463f7563bc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2890,6 +2890,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, struct blk_plug *plug, struct bio **bio, unsigned int nsegs) { struct request *rq; + enum hctx_type type, hctx_type; if (!plug) return NULL; @@ -2902,7 +2903,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, return NULL; } - if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) + type = blk_mq_get_hctx_type((*bio)->bi_opf); + hctx_type = rq->mq_hctx->type; + if (type != hctx_type && + !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT)) return NULL; if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) return NULL; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 4cea3b08087e..2f1a92509271 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2400,6 +2400,8 @@ static void pkt_submit_bio(struct bio *bio) struct bio *split; bio = bio_split_to_limits(bio); + if (!bio) + return; pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", (unsigned long long)bio->bi_iter.bi_sector, diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index 78334da74d8b..5eb8c7855970 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1440,7 +1440,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess, goto out_alloc; } - ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS), + ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1, GFP_KERNEL); if (ret < 0) { pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n", diff --git a/drivers/md/md.c b/drivers/md/md.c index 8af639296b3c..02b0240e7c71 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3644,7 +3644,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init); */ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) { - static struct md_rdev *claim_rdev; /* just for claiming the bdev */ + static struct md_rdev claim_rdev; /* just for claiming the bdev */ struct md_rdev *rdev; sector_t size; int err; @@ -3662,7 +3662,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe rdev->bdev = blkdev_get_by_dev(newdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, - super_format == -2 ? claim_rdev : rdev); + super_format == -2 ? &claim_rdev : rdev); if (IS_ERR(rdev->bdev)) { pr_warn("md: could not open device unknown-block(%u,%u).\n", MAJOR(newdev), MINOR(newdev)); diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index bf1c60edb7f9..b317ce6c4ec3 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -829,7 +829,23 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) apple_nvme_remove_cq(anv); } - nvme_disable_ctrl(&anv->ctrl, shutdown); + /* + * Always disable the NVMe controller after shutdown. + * We need to do this to bring it back up later anyway, and we + * can't do it while the firmware is not running (e.g. in the + * resume reset path before RTKit is initialized), so for Apple + * controllers it makes sense to unconditionally do it here. + * Additionally, this sequence of events is reliable, while + * others (like disabling after bringing back the firmware on + * resume) seem to run into trouble under some circumstances. + * + * Both U-Boot and m1n1 also use this convention (i.e. an ANS + * NVMe controller is handed off with firmware shut down, in an + * NVMe disabled state, after a clean shutdown). + */ + if (shutdown) + nvme_disable_ctrl(&anv->ctrl, shutdown); + nvme_disable_ctrl(&anv->ctrl, false); } WRITE_ONCE(anv->ioq.enabled, false); @@ -985,11 +1001,11 @@ static void apple_nvme_reset_work(struct work_struct *work) goto out; } - if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) - apple_nvme_disable(anv, false); - /* RTKit must be shut down cleanly for the (soft)-reset to work */ if (apple_rtkit_is_running(anv->rtk)) { + /* reset the controller if it is enabled */ + if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) + apple_nvme_disable(anv, false); dev_dbg(anv->dev, "Trying to shut down RTKit before reset."); ret = apple_rtkit_shutdown(anv->rtk); if (ret) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a2553b7d9bb8..1ff8843bc4b3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1362,7 +1362,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) else nvme_poll_irqdisable(nvmeq); - if (blk_mq_request_completed(req)) { + if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, completion polled\n", req->tag, nvmeq->qid);