From 3c08b0931eedd04c530040499fadeccab50ed646 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 14 Oct 2021 13:20:22 -1000 Subject: [PATCH 001/174] blk-cgroup: blk_cgroup_bio_start() should use irq-safe operations on blkg->iostat_cpu c3df5fb57fe8 ("cgroup: rstat: fix A-A deadlock on 32bit around u64_stats_sync") made u64_stats updates irq-safe to avoid A-A deadlocks. Unfortunately, the conversion missed one in blk_cgroup_bio_start(). Fix it. Fixes: 2d146aa3aa84 ("mm: memcontrol: switch to rstat") Cc: stable@vger.kernel.org # v5.13+ Reported-by: syzbot+9738c8815b375ce482a1@syzkaller.appspotmail.com Signed-off-by: Tejun Heo Link: https://lore.kernel.org/r/YWi7NrQdVlxD6J9W@slm.duckdns.org Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 38b9f7684952..9a1c5839dd46 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1897,10 +1897,11 @@ void blk_cgroup_bio_start(struct bio *bio) { int rwd = blk_cgroup_io_type(bio), cpu; struct blkg_iostat_set *bis; + unsigned long flags; cpu = get_cpu(); bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); - u64_stats_update_begin(&bis->sync); + flags = u64_stats_update_begin_irqsave(&bis->sync); /* * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split @@ -1912,7 +1913,7 @@ void blk_cgroup_bio_start(struct bio *bio) } bis->cur.ios[rwd]++; - u64_stats_update_end(&bis->sync); + u64_stats_update_end_irqrestore(&bis->sync, flags); if (cgroup_subsys_on_dfl(io_cgrp_subsys)) cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); put_cpu(); From 348332e000697b4ca82ef96719e02876434b8346 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:12 +0200 Subject: [PATCH 002/174] mm: don't include in blk-cgroup.h pulls in blkdev.h and thus pretty much all the block headers. Break this dependency chain by turning wbc_blkcg_css into a macro and dropping the blk-cgroup.h include. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-2-hch@lst.de Signed-off-by: Jens Axboe --- drivers/gpu/drm/i915/i915_utils.h | 1 + fs/btrfs/inode.c | 1 + fs/quota/quota.c | 1 + include/linux/writeback.h | 14 +++++--------- lib/random32.c | 1 + 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 5259edacde38..066a9118c374 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -30,6 +30,7 @@ #include #include #include +#include struct drm_i915_private; struct timer_list; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 487533c35ddb..4a9077c52444 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 2bcc9a6f1bfc..052f143e2e0e 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d1f65adf6a26..8eb165760752 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -11,7 +11,6 @@ #include #include #include -#include struct bio; @@ -109,15 +108,12 @@ static inline int wbc_to_write_flags(struct writeback_control *wbc) return flags; } -static inline struct cgroup_subsys_state * -wbc_blkcg_css(struct writeback_control *wbc) -{ #ifdef CONFIG_CGROUP_WRITEBACK - if (wbc->wb) - return wbc->wb->blkcg_css; -#endif - return blkcg_root_css; -} +#define wbc_blkcg_css(wbc) \ + ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css) +#else +#define wbc_blkcg_css(wbc) (blkcg_root_css) +#endif /* CONFIG_CGROUP_WRITEBACK */ /* * A wb_domain represents a domain that wb's (bdi_writeback's) belong to diff --git a/lib/random32.c b/lib/random32.c index 4d0e05e471d7..a57a0e18819d 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include From e41d12f539f7c8bac9b0897031fee6cc9158a6be Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:13 +0200 Subject: [PATCH 003/174] mm: don't include in There is no need to pull blk-cgroup.h and thus blkdev.h in here, so break the include chain. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-3-hch@lst.de Signed-off-by: Jens Axboe --- arch/powerpc/platforms/cell/spufs/inode.c | 1 + block/blk-iolatency.c | 1 + block/blk-mq.c | 1 + block/bounce.c | 1 + drivers/md/dm-ima.c | 1 + fs/btrfs/compression.c | 1 + fs/btrfs/ctree.c | 1 + fs/f2fs/compress.c | 1 + fs/orangefs/super.c | 1 + fs/ramfs/inode.c | 1 + include/linux/backing-dev.h | 3 ++- mm/backing-dev.c | 3 ++- mm/swapfile.c | 2 +- 13 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index bed05b644c2c..cb25acccd746 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index c0545f9da549..6593c7123b97 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -74,6 +74,7 @@ #include #include #include +#include #include "blk-rq-qos.h" #include "blk-stat.h" #include "blk.h" diff --git a/block/blk-mq.c b/block/blk-mq.c index bc026372de43..0fefe00cebd4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/block/bounce.c b/block/bounce.c index 05fc7148489d..7af1a72835b9 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c index 2c5edfbd7711..957999998d70 100644 --- a/drivers/md/dm-ima.c +++ b/drivers/md/dm-ima.c @@ -12,6 +12,7 @@ #include "dm-ima.h" #include +#include #include #include #include diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 7869ad12bc6e..ddc4f5436cc9 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84627cbd5b5b..66290b214f2b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index c1bf9ad4c220..20a083dc9042 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 2f2e430461b2..8bb0a53a658b 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -11,6 +11,7 @@ #include #include +#include /* a cache for orangefs-inode objects (i.e. orangefs inode private data) */ static struct kmem_cache *orangefs_inode_cache; diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index 65e7e56005b8..e2302342a67f 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "internal.h" struct ramfs_mount_opts { diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index ac7f231b8825..843bf4be675f 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -15,10 +15,11 @@ #include #include #include -#include #include #include +struct blkcg; + static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) { kref_get(&bdi->refcnt); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 4a9d4e27d0d9..5245744437dc 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -2,8 +2,9 @@ #include #include -#include #include +#include +#include #include #include #include diff --git a/mm/swapfile.c b/mm/swapfile.c index 22d10f713848..30db362749c0 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include From ccdf774189b6466457ca9c7de1fe9ed18547d249 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:14 +0200 Subject: [PATCH 004/174] mm: don't include in Move inode_to_bdi out of line to avoid having to include blkdev.h. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-4-hch@lst.de Signed-off-by: Jens Axboe --- drivers/mtd/mtdsuper.c | 1 + fs/ntfs/file.c | 1 + fs/ntfs3/file.c | 1 + fs/orangefs/inode.c | 2 +- include/linux/backing-dev.h | 16 +--------------- mm/backing-dev.c | 16 ++++++++++++++++ 6 files changed, 21 insertions(+), 16 deletions(-) diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index 38b6aa849c63..5ff001140ef4 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "mtdcore.h" diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index ab4f3362466d..373dbb627657 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -5,6 +5,7 @@ * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc. */ +#include #include #include #include diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 43b1451bff53..a3cd3c3f091e 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index c1bb4c4b5d67..e5e3e500ed46 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -10,7 +10,7 @@ * Linux VFS inode operations. */ -#include +#include #include #include "protocol.h" #include "orangefs-kernel.h" diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 843bf4be675f..4ac7ce096013 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -134,20 +133,7 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb) return test_bit(WB_writeback_running, &wb->state); } -static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) -{ - struct super_block *sb; - - if (!inode) - return &noop_backing_dev_info; - - sb = inode->i_sb; -#ifdef CONFIG_BLOCK - if (sb_is_blkdev_sb(sb)) - return I_BDEV(inode)->bd_disk->bdi; -#endif - return sb->s_bdi; -} +struct backing_dev_info *inode_to_bdi(struct inode *inode); static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) { diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 5245744437dc..c878d995af06 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -978,6 +978,22 @@ void bdi_put(struct backing_dev_info *bdi) } EXPORT_SYMBOL(bdi_put); +struct backing_dev_info *inode_to_bdi(struct inode *inode) +{ + struct super_block *sb; + + if (!inode) + return &noop_backing_dev_info; + + sb = inode->i_sb; +#ifdef CONFIG_BLOCK + if (sb_is_blkdev_sb(sb)) + return I_BDEV(inode)->bd_disk->bdi; +#endif + return sb->s_bdi; +} +EXPORT_SYMBOL(inode_to_bdi); + const char *bdi_dev_name(struct backing_dev_info *bdi) { if (!bdi || !bdi->dev) From 518d55051a8c368f1ba8ba1bed837a582f27a584 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:15 +0200 Subject: [PATCH 005/174] mm: remove spurious blkdev.h includes Various files have acquired spurious includes of over time. Remove them. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-5-hch@lst.de Signed-off-by: Jens Axboe --- mm/filemap.c | 1 - mm/highmem.c | 1 - mm/mempool.c | 1 - mm/nommu.c | 1 - mm/readahead.c | 1 - mm/shmem.c | 1 - 6 files changed, 6 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index dae481293b5d..44b4b551e430 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/highmem.c b/mm/highmem.c index 4212ad0e4a19..471d9779a7f4 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/mempool.c b/mm/mempool.c index 0b8afbec3e35..b933d0fc21b8 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include "slab.h" diff --git a/mm/nommu.c b/mm/nommu.c index 02d2427b8f9e..41ef204e7482 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/readahead.c b/mm/readahead.c index 41b75d76d36e..e71e719e36c9 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/shmem.c b/mm/shmem.c index b5860f4a2738..2ca4a341652c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -59,7 +59,6 @@ static struct vfsmount *shm_mnt; #include #include #include -#include #include #include #include From dcbfa221b57b6ddf794c6dfc3fd7813ede368052 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:16 +0200 Subject: [PATCH 006/174] arch: remove spurious blkdev.h includes Various files have acquired spurious includes of over time. Remove them. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-6-hch@lst.de Signed-off-by: Jens Axboe --- arch/mips/rb532/prom.c | 1 - arch/mips/sibyte/common/cfe.c | 1 - arch/mips/sibyte/swarm/setup.c | 1 - arch/openrisc/mm/init.c | 1 - 4 files changed, 4 deletions(-) diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c index 23ad8dd9aa5e..b11693715547 100644 --- a/arch/mips/rb532/prom.c +++ b/arch/mips/rb532/prom.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c index a3323f8dcc1b..1a504294d85f 100644 --- a/arch/mips/sibyte/common/cfe.c +++ b/arch/mips/sibyte/common/cfe.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 538a2791b48c..f07b15dd1c1a 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index cfef61a7b6c2..97305bde1b16 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -25,7 +25,6 @@ #include #include #include -#include /* for initrd_* */ #include #include From 545c6647d2d9f3bf8a086d5ff47fb85e5c5dca28 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:17 +0200 Subject: [PATCH 007/174] kernel: remove spurious blkdev.h includes Various files have acquired spurious includes of over time. Remove them. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-7-hch@lst.de Signed-off-by: Jens Axboe --- kernel/acct.c | 1 - kernel/exit.c | 1 - kernel/fork.c | 1 - 3 files changed, 3 deletions(-) diff --git a/kernel/acct.c b/kernel/acct.c index 23a7ab8e6cbc..3df53cf1dcd5 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -60,7 +60,6 @@ #include #include -#include /* sector_div */ #include #include diff --git a/kernel/exit.c b/kernel/exit.c index 91a43e57a32e..a53863dafb3d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -48,7 +48,6 @@ #include #include /* for audit_free() */ #include -#include #include #include #include diff --git a/kernel/fork.c b/kernel/fork.c index 38681ad44c76..67679e3933f2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -76,7 +76,6 @@ #include #include #include -#include #include #include #include From 6a5850d12977eb0a814c4977da389cebdc558863 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:18 +0200 Subject: [PATCH 008/174] sched: move the include out of kernel/sched/sched.h Only core.c needs blkdev.h, so move the #include statement there. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-8-hch@lst.de Signed-off-by: Jens Axboe --- kernel/sched/core.c | 2 +- kernel/sched/sched.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1bba4128a3e6..92ef7b68198c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -13,7 +13,7 @@ #include "sched.h" #include - +#include #include #include diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3d3e5793e117..66128dfc05fb 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -37,7 +37,6 @@ #include #include -#include #include #include #include From 1d9433cdd04adf7cfd5e3ef81f5acb29d80aae9b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:19 +0200 Subject: [PATCH 009/174] block: remove the unused rq_end_sector macro Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-9-hch@lst.de Signed-off-by: Jens Axboe --- drivers/md/dm-rq.c | 1 - include/linux/elevator.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index a896dea9750e..579ab6183d4d 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -7,7 +7,6 @@ #include "dm-core.h" #include "dm-rq.h" -#include /* for rq_end_sector() */ #include #define DM_MSG_PREFIX "core-rq" diff --git a/include/linux/elevator.h b/include/linux/elevator.h index ef9ceead3db1..80633f3b600c 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -162,7 +162,6 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define ELEVATOR_INSERT_FLUSH 5 #define ELEVATOR_INSERT_SORT_MERGE 6 -#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) From 90138237a56282f99d71130ab7c68903a2eba5a7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:20 +0200 Subject: [PATCH 010/174] block: remove the unused blk_queue_state enum Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-10-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 12b9dbcc980e..9e367509bea1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -262,11 +262,6 @@ enum blk_eh_timer_return { BLK_EH_RESET_TIMER, /* reset timer and try again */ }; -enum blk_queue_state { - Queue_down, - Queue_up, -}; - #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ From 713e4e11088875bf7aee3df81b167c8b2f6c5d65 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:21 +0200 Subject: [PATCH 011/174] block: remove the cmd_size field from struct request_queue Entirely unused. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-11-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9e367509bea1..6737e484280d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -544,8 +544,6 @@ struct request_queue { bool mq_sysfs_init_done; - size_t cmd_size; - #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; }; From 9778ac77c2027827ffdbb33d3e936b3a0ae9f0f9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:22 +0200 Subject: [PATCH 012/174] block: remove the struct blk_queue_ctx forward declaration This type doesn't exist at all, so no need to forward declare it. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-12-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6737e484280d..4bcbb1ae2d66 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -253,8 +253,6 @@ static inline unsigned short req_get_ioprio(struct request *req) #include -struct blk_queue_ctx; - struct bio_vec; enum blk_eh_timer_return { From 2e9bc3465ac54d282b855b073409c2c3a7d1ae00 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:23 +0200 Subject: [PATCH 013/174] block: move elevator.h to block/ Except for the features passed to blk_queue_required_elevator_features, elevator.h is only needed internally to the block layer. Move the ELEVATOR_F_* definitions to blkdev.h, and the move elevator.h to block/, dropping all the spurious includes outside of that. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-13-hch@lst.de Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 2 +- block/bfq-iosched.c | 2 +- block/blk-mq-sched.h | 1 + block/blk-mq-tag.h | 2 ++ block/blk.h | 2 ++ block/elevator.c | 2 +- {include/linux => block}/elevator.h | 20 +++----------------- block/kyber-iosched.c | 2 +- block/mq-deadline.c | 2 +- drivers/block/amiflop.c | 1 - drivers/scsi/lpfc/lpfc.h | 1 + include/linux/blkdev.h | 11 +++++++++-- init/main.c | 1 - 13 files changed, 23 insertions(+), 26 deletions(-) rename {include/linux => block}/elevator.h (93%) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 85b8e1c3a762..882ec4bc51ad 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -6,13 +6,13 @@ #include #include #include -#include #include #include #include #include #include +#include "elevator.h" #include "bfq-iosched.h" #ifdef CONFIG_BFQ_CGROUP_DEBUG diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 480e1a134859..b5ef23f63d51 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -117,7 +117,6 @@ #include #include #include -#include #include #include #include @@ -127,6 +126,7 @@ #include +#include "elevator.h" #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 5246ae040704..5181487db792 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -2,6 +2,7 @@ #ifndef BLK_MQ_SCHED_H #define BLK_MQ_SCHED_H +#include "elevator.h" #include "blk-mq.h" #include "blk-mq-tag.h" diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 8ed55af08427..f0a0ee758a55 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -2,6 +2,8 @@ #ifndef INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H +struct blk_mq_alloc_data; + /* * Tag address space map. */ diff --git a/block/blk.h b/block/blk.h index 6c3c00a8fe19..f24569980905 100644 --- a/block/blk.h +++ b/block/blk.h @@ -12,6 +12,8 @@ #include "blk-mq.h" #include "blk-mq-sched.h" +struct elevator_type; + /* Max future timer expiry for timeouts */ #define BLK_MAX_TIMEOUT (5 * HZ) diff --git a/block/elevator.c b/block/elevator.c index ff45d8388f48..57be09cd7f6d 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -40,6 +39,7 @@ #include +#include "elevator.h" #include "blk.h" #include "blk-mq-sched.h" #include "blk-pm.h" diff --git a/include/linux/elevator.h b/block/elevator.h similarity index 93% rename from include/linux/elevator.h rename to block/elevator.h index 80633f3b600c..16cd8bdedb7e 100644 --- a/include/linux/elevator.h +++ b/block/elevator.h @@ -1,17 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_ELEVATOR_H -#define _LINUX_ELEVATOR_H +#ifndef _ELEVATOR_H +#define _ELEVATOR_H #include #include -#ifdef CONFIG_BLOCK - struct io_cq; struct elevator_type; -#ifdef CONFIG_BLK_DEBUG_FS struct blk_mq_debugfs_attr; -#endif /* * Return values from elevator merger @@ -167,14 +163,4 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) -/* - * Elevator features. - */ - -/* Supports zoned block devices sequential write constraint */ -#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) -/* Supports scheduling on multiple hardware queues */ -#define ELEVATOR_F_MQ_AWARE (1U << 1) - -#endif /* CONFIG_BLOCK */ -#endif +#endif /* _ELEVATOR_H */ diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index a0ffbabfac2c..53dafc4bcd72 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -9,12 +9,12 @@ #include #include #include -#include #include #include #include +#include "elevator.h" #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 7f3c3932b723..47f042fa6a68 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -20,6 +19,7 @@ #include +#include "elevator.h" #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 8b1714021498..b892e5185d6f 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -64,7 +64,6 @@ #include #include #include -#include #include #include diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index befeb7c34290..337e6ed24821 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -22,6 +22,7 @@ *******************************************************************/ #include +#include #include #include diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4bcbb1ae2d66..d815238f61ed 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -251,8 +251,6 @@ static inline unsigned short req_get_ioprio(struct request *req) return req->ioprio; } -#include - struct bio_vec; enum blk_eh_timer_return { @@ -1124,6 +1122,15 @@ extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); + +/* + * Elevator features for blk_queue_required_elevator_features: + */ +/* Supports zoned block devices sequential write constraint */ +#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) +/* Supports scheduling on multiple hardware queues */ +#define ELEVATOR_F_MQ_AWARE (1U << 1) + extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, diff --git a/init/main.c b/init/main.c index 3c4054a95545..4162d7f3179f 100644 --- a/init/main.c +++ b/init/main.c @@ -83,7 +83,6 @@ #include #include #include -#include #include #include #include From 3ab0bc78e96bd03a5096e4801550926d81b3e19d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:24 +0200 Subject: [PATCH 014/174] block: drop unused includes in Drop various include not actually used in blkdev.h itself. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-14-hch@lst.de Signed-off-by: Jens Axboe --- drivers/md/dm-ps-historical-service-time.c | 1 + drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 1 + include/linux/blkdev.h | 7 ------- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c index 1856a1b125cc..875bca30a0dd 100644 --- a/drivers/md/dm-ps-historical-service-time.c +++ b/drivers/md/dm-ps-historical-service-time.c @@ -27,6 +27,7 @@ #include #include #include +#include #define DM_MSG_PREFIX "multipath historical-service-time" diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 3ab669dc806f..27884f3106ab 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -3,6 +3,7 @@ * Copyright (c) 2017 Hisilicon Limited. */ +#include #include "hisi_sas.h" #define DRV_NAME "hisi_sas_v3_hw" diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d815238f61ed..46a703394f7f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -3,8 +3,6 @@ #define _LINUX_BLKDEV_H #include -#include -#include #include #include #include @@ -12,17 +10,12 @@ #include #include #include -#include -#include #include -#include #include -#include #include #include #include #include -#include #include struct module; From b81e0c2372e65e5627864ba034433b64b2fc73f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:25 +0200 Subject: [PATCH 015/174] block: drop unused includes in Drop various include not actually used in genhd.h itself, and move the remaning includes closer together. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-15-hch@lst.de Signed-off-by: Jens Axboe --- arch/um/drivers/ubd_kern.c | 1 + block/genhd.c | 1 + block/holder.c | 1 + block/partitions/core.c | 1 + drivers/block/amiflop.c | 1 + drivers/block/ataflop.c | 1 + drivers/block/floppy.c | 1 + drivers/block/swim.c | 1 + drivers/block/xen-blkfront.c | 1 + drivers/md/md.c | 1 + drivers/s390/block/dasd_genhd.c | 1 + drivers/scsi/sd.c | 1 + drivers/scsi/sg.c | 1 + drivers/scsi/sr.c | 1 + drivers/scsi/st.c | 1 + include/linux/genhd.h | 14 ++------------ include/linux/part_stat.h | 1 + 17 files changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index cd9dc0556e91..fefd343412c7 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/block/genhd.c b/block/genhd.c index b49858550fa6..ffbdb9b24555 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/block/holder.c b/block/holder.c index 9dc084182337..27cddce1b446 100644 --- a/block/holder.c +++ b/block/holder.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include +#include struct bd_holder_disk { struct list_head list; diff --git a/block/partitions/core.c b/block/partitions/core.c index 58c4c362c94f..3a4898433c43 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -5,6 +5,7 @@ * Copyright (C) 2020 Christoph Hellwig */ #include +#include #include #include #include diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index b892e5185d6f..2909fd9e72fb 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index a093644ac39f..58e921ab5729 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -68,6 +68,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index fef79ea52e3e..6288ce888414 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -184,6 +184,7 @@ static int print_unex = 1; #include #include #include +#include #include #include #include diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 7ccc8d2a41bc..3911d0833e1b 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 72902104f111..df0deb927760 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/md.c b/drivers/md/md.c index 6c0c3d0d905a..1b9bc8dbd95d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index fa966e0db6ca..3a6f3af240fa 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -14,6 +14,7 @@ #define KMSG_COMPONENT "dasd" #include +#include #include #include diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 523bf2fdc253..d8f6add416c0 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8f05248920e8..3c98f08dc25d 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -31,6 +31,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ #include #include #include +#include #include #include #include diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 8b17b35283aa..115f7ef7a5de 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index ae8636d3780b..9933722acfd9 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -32,6 +32,7 @@ static const char *verstr = "20160209"; #include #include #include +#include #include #include #include diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 0f5315c2b5a3..0b48a0cf4262 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -12,12 +12,10 @@ #include #include -#include -#include -#include #include #include -#include +#include +#include extern const struct device_type disk_type; extern struct device_type part_type; @@ -26,14 +24,6 @@ extern struct class block_class; #define DISK_MAX_PARTS 256 #define DISK_NAME_LEN 32 -#include -#include -#include -#include -#include -#include -#include - #define PARTITION_META_INFO_VOLNAMELTH 64 /* * Enough for the string representation of any kind of UUID plus NULL. diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index d2558121d48c..6f7949b2fd8d 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -3,6 +3,7 @@ #define _LINUX_PART_STAT_H #include +#include struct disk_stats { u64 nsecs[NR_STAT_GROUPS]; From badf7f64378796d460c79eb0f182fa7282eb65d5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:26 +0200 Subject: [PATCH 016/174] block: move a few merge helpers out of These are block-layer internal helpers, so move them to block/blk.h and block/blk-merge.c. Also update a comment a bit to use better grammar. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-16-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-merge.c | 24 ++++++++++++++++ block/blk.h | 38 +++++++++++++++++++++++++ include/linux/blkdev.h | 64 ------------------------------------------ 3 files changed, 62 insertions(+), 64 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index 7a5c81c02c80..39f210da399a 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -558,6 +558,23 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq) return queue_max_segments(rq->q); } +static inline unsigned int blk_rq_get_max_sectors(struct request *rq, + sector_t offset) +{ + struct request_queue *q = rq->q; + + if (blk_rq_is_passthrough(rq)) + return q->limits.max_hw_sectors; + + if (!q->limits.chunk_sectors || + req_op(rq) == REQ_OP_DISCARD || + req_op(rq) == REQ_OP_SECURE_ERASE) + return blk_queue_get_max_sectors(q, req_op(rq)); + + return min(blk_max_size_offset(q, offset, 0), + blk_queue_get_max_sectors(q, req_op(rq))); +} + static inline int ll_new_hw_segment(struct request *req, struct bio *bio, unsigned int nr_phys_segs) { @@ -718,6 +735,13 @@ static enum elv_merge blk_try_req_merge(struct request *req, return ELEVATOR_NO_MERGE; } +static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) +{ + if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b)) + return true; + return false; +} + /* * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. diff --git a/block/blk.h b/block/blk.h index f24569980905..fa90691cfdab 100644 --- a/block/blk.h +++ b/block/blk.h @@ -96,6 +96,44 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, return __bvec_gap_to_prev(q, bprv, offset); } +static inline bool rq_mergeable(struct request *rq) +{ + if (blk_rq_is_passthrough(rq)) + return false; + + if (req_op(rq) == REQ_OP_FLUSH) + return false; + + if (req_op(rq) == REQ_OP_WRITE_ZEROES) + return false; + + if (req_op(rq) == REQ_OP_ZONE_APPEND) + return false; + + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) + return false; + if (rq->rq_flags & RQF_NOMERGE_FLAGS) + return false; + + return true; +} + +/* + * There are two different ways to handle DISCARD merges: + * 1) If max_discard_segments > 1, the driver treats every bio as a range and + * send the bios to controller together. The ranges don't need to be + * contiguous. + * 2) Otherwise, the request will be normal read/write requests. The ranges + * need to be contiguous. + */ +static inline bool blk_discard_mergable(struct request *req) +{ + if (req_op(req) == REQ_OP_DISCARD && + queue_max_discard_segments(req->q) > 1) + return true; + return false; +} + #ifdef CONFIG_BLK_DEV_INTEGRITY void blk_flush_integrity(void); bool __bio_integrity_endio(struct bio *); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 46a703394f7f..be534040ca9c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -745,37 +745,6 @@ static inline bool rq_is_sync(struct request *rq) return op_is_sync(rq->cmd_flags); } -static inline bool rq_mergeable(struct request *rq) -{ - if (blk_rq_is_passthrough(rq)) - return false; - - if (req_op(rq) == REQ_OP_FLUSH) - return false; - - if (req_op(rq) == REQ_OP_WRITE_ZEROES) - return false; - - if (req_op(rq) == REQ_OP_ZONE_APPEND) - return false; - - if (rq->cmd_flags & REQ_NOMERGE_FLAGS) - return false; - if (rq->rq_flags & RQF_NOMERGE_FLAGS) - return false; - - return true; -} - -static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) -{ - if (bio_page(a) == bio_page(b) && - bio_offset(a) == bio_offset(b)) - return true; - - return false; -} - static inline unsigned int blk_queue_depth(struct request_queue *q) { if (q->queue_depth) @@ -1030,23 +999,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, return min(q->limits.max_sectors, chunk_sectors); } -static inline unsigned int blk_rq_get_max_sectors(struct request *rq, - sector_t offset) -{ - struct request_queue *q = rq->q; - - if (blk_rq_is_passthrough(rq)) - return q->limits.max_hw_sectors; - - if (!q->limits.chunk_sectors || - req_op(rq) == REQ_OP_DISCARD || - req_op(rq) == REQ_OP_SECURE_ERASE) - return blk_queue_get_max_sectors(q, req_op(rq)); - - return min(blk_max_size_offset(q, offset, 0), - blk_queue_get_max_sectors(q, req_op(rq))); -} - static inline unsigned int blk_rq_count_bios(struct request *rq) { unsigned int nr_bios = 0; @@ -1490,22 +1442,6 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return offset << SECTOR_SHIFT; } -/* - * Two cases of handling DISCARD merge: - * If max_discard_segments > 1, the driver takes every bio - * as a range and send them to controller together. The ranges - * needn't to be contiguous. - * Otherwise, the bios/requests will be handled as same as - * others which should be contiguous. - */ -static inline bool blk_discard_mergable(struct request *req) -{ - if (req_op(req) == REQ_OP_DISCARD && - queue_max_discard_segments(req->q) > 1) - return true; - return false; -} - static inline int bdev_discard_alignment(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); From fe45e630a1035aea94c29016f2598bbde149bbe3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:27 +0200 Subject: [PATCH 017/174] block: move integrity handling out of Split the integrity/metadata handling definitions out into a new header. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-17-hch@lst.de Signed-off-by: Jens Axboe --- block/bdev.c | 1 + block/bio-integrity.c | 2 +- block/blk-core.c | 1 + block/blk-integrity.c | 2 +- block/blk-merge.c | 1 + block/blk-mq.c | 1 + block/keyslot-manager.c | 1 + block/t10-pi.c | 2 +- drivers/md/dm-bio-record.h | 1 + drivers/md/dm-crypt.c | 1 + drivers/md/dm-table.c | 1 + drivers/md/md.c | 1 + drivers/nvdimm/core.c | 1 + drivers/nvme/host/core.c | 1 + drivers/nvme/host/pci.c | 1 + drivers/nvme/host/rdma.c | 1 + drivers/nvme/target/io-cmd-bdev.c | 1 + drivers/nvme/target/rdma.c | 1 + drivers/scsi/scsi_lib.c | 1 + drivers/scsi/sd_dif.c | 2 +- drivers/scsi/virtio_scsi.c | 1 + drivers/target/target_core_iblock.c | 1 + include/linux/blk-integrity.h | 183 ++++++++++++++++++++++++++++ include/linux/blkdev.h | 183 ---------------------------- 24 files changed, 205 insertions(+), 187 deletions(-) create mode 100644 include/linux/blk-integrity.h diff --git a/block/bdev.c b/block/bdev.c index 485a258b0ab3..93b1188d7e58 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 6b47cddbbca1..21234ff966d9 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -6,7 +6,7 @@ * Written by: Martin K. Petersen */ -#include +#include #include #include #include diff --git a/block/blk-core.c b/block/blk-core.c index 4d8f5fe91588..d2c6caadef89 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 16d5d5338392..cef534a7cbc9 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -6,7 +6,7 @@ * Written by: Martin K. Petersen */ -#include +#include #include #include #include diff --git a/block/blk-merge.c b/block/blk-merge.c index 39f210da399a..5b4f23014df8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/block/blk-mq.c b/block/blk-mq.c index 0fefe00cebd4..2539ba976949 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c index 2c4a55bea6ca..1792159d12d1 100644 --- a/block/keyslot-manager.c +++ b/block/keyslot-manager.c @@ -35,6 +35,7 @@ #include #include #include +#include struct blk_ksm_keyslot { atomic_t slot_refs; diff --git a/block/t10-pi.c b/block/t10-pi.c index 00c203b2a921..25a52a2a09a8 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -5,7 +5,7 @@ */ #include -#include +#include #include #include #include diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index a3b71350eec8..745e3ab4aa0a 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -8,6 +8,7 @@ #define DM_BIO_RECORD_H #include +#include /* * There are lots of mutable fields in the bio struct that get diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 916b7da16de2..292f7896f733 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2111daaacaba..1fa4d5582dca 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/md.c b/drivers/md/md.c index 1b9bc8dbd95d..ec09083ff0ef 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 7de592d7eff4..6a45fa91e8a3 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f8dd664b2eda..3d444b13cd69 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 149ecf73df38..896328271471 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 042c594bc57e..40317e1b9183 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 0fc2781ab970..6139e1de50a6 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -5,6 +5,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include "nvmet.h" diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 891174ccd44b..38d1f292ecc2 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -5,6 +5,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 572673873ddf..33fd9a01330c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 4cadb26070a8..349950616adc 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -6,7 +6,7 @@ * Written by: Martin K. Petersen */ -#include +#include #include #include diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 07d0250f17c3..b8455fcbf18b 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 4069a1edcfa3..d39b87e2ed10 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h new file mode 100644 index 000000000000..8a038ea0717e --- /dev/null +++ b/include/linux/blk-integrity.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLK_INTEGRITY_H +#define _LINUX_BLK_INTEGRITY_H + +#include + +struct request; + +enum blk_integrity_flags { + BLK_INTEGRITY_VERIFY = 1 << 0, + BLK_INTEGRITY_GENERATE = 1 << 1, + BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, + BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, +}; + +struct blk_integrity_iter { + void *prot_buf; + void *data_buf; + sector_t seed; + unsigned int data_size; + unsigned short interval; + const char *disk_name; +}; + +typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); +typedef void (integrity_prepare_fn) (struct request *); +typedef void (integrity_complete_fn) (struct request *, unsigned int); + +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + integrity_prepare_fn *prepare_fn; + integrity_complete_fn *complete_fn; + const char *name; +}; + +#ifdef CONFIG_BLK_DEV_INTEGRITY +void blk_integrity_register(struct gendisk *, struct blk_integrity *); +void blk_integrity_unregister(struct gendisk *); +int blk_integrity_compare(struct gendisk *, struct gendisk *); +int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, + struct scatterlist *); +int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); + +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + struct blk_integrity *bi = &disk->queue->integrity; + + if (!bi->profile) + return NULL; + + return bi; +} + +static inline struct blk_integrity * +bdev_get_integrity(struct block_device *bdev) +{ + return blk_get_integrity(bdev->bd_disk); +} + +static inline bool +blk_integrity_queue_supports_integrity(struct request_queue *q) +{ + return q->integrity.profile; +} + +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ + q->limits.max_integrity_segments = segs; +} + +static inline unsigned short +queue_max_integrity_segments(const struct request_queue *q) +{ + return q->limits.max_integrity_segments; +} + +/** + * bio_integrity_intervals - Return number of integrity intervals for a bio + * @bi: blk_integrity profile for device + * @sectors: Size of the bio in 512-byte sectors + * + * Description: The block layer calculates everything in 512 byte + * sectors but integrity metadata is done in terms of the data integrity + * interval size of the storage device. Convert the block layer sectors + * to the appropriate number of integrity intervals. + */ +static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return sectors >> (bi->interval_exp - 9); +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return bio_integrity_intervals(bi, sectors) * bi->tuple_size; +} + +static inline bool blk_integrity_rq(struct request *rq) +{ + return rq->cmd_flags & REQ_INTEGRITY; +} + +/* + * Return the first bvec that contains integrity data. Only drivers that are + * limited to a single integrity segment should use this helper. + */ +static inline struct bio_vec *rq_integrity_vec(struct request *rq) +{ + if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) + return NULL; + return rq->bio->bi_integrity->bip_vec; +} +#else /* CONFIG_BLK_DEV_INTEGRITY */ +static inline int blk_rq_count_integrity_sg(struct request_queue *q, + struct bio *b) +{ + return 0; +} +static inline int blk_rq_map_integrity_sg(struct request_queue *q, + struct bio *b, + struct scatterlist *s) +{ + return 0; +} +static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) +{ + return NULL; +} +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + return NULL; +} +static inline bool +blk_integrity_queue_supports_integrity(struct request_queue *q) +{ + return false; +} +static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) +{ + return 0; +} +static inline void blk_integrity_register(struct gendisk *d, + struct blk_integrity *b) +{ +} +static inline void blk_integrity_unregister(struct gendisk *d) +{ +} +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ +} +static inline unsigned short +queue_max_integrity_segments(const struct request_queue *q) +{ + return 0; +} + +static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return 0; +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return 0; +} +static inline int blk_integrity_rq(struct request *rq) +{ + return 0; +} + +static inline struct bio_vec *rq_integrity_vec(struct request *rq) +{ + return NULL; +} +#endif /* CONFIG_BLK_DEV_INTEGRITY */ +#endif /* _LINUX_BLK_INTEGRITY_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index be534040ca9c..56e60e5c09d0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1555,189 +1555,6 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned lo #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ MODULE_ALIAS("block-major-" __stringify(major) "-*") -#if defined(CONFIG_BLK_DEV_INTEGRITY) - -enum blk_integrity_flags { - BLK_INTEGRITY_VERIFY = 1 << 0, - BLK_INTEGRITY_GENERATE = 1 << 1, - BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, - BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, -}; - -struct blk_integrity_iter { - void *prot_buf; - void *data_buf; - sector_t seed; - unsigned int data_size; - unsigned short interval; - const char *disk_name; -}; - -typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); -typedef void (integrity_prepare_fn) (struct request *); -typedef void (integrity_complete_fn) (struct request *, unsigned int); - -struct blk_integrity_profile { - integrity_processing_fn *generate_fn; - integrity_processing_fn *verify_fn; - integrity_prepare_fn *prepare_fn; - integrity_complete_fn *complete_fn; - const char *name; -}; - -extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); -extern void blk_integrity_unregister(struct gendisk *); -extern int blk_integrity_compare(struct gendisk *, struct gendisk *); -extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, - struct scatterlist *); -extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); - -static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) -{ - struct blk_integrity *bi = &disk->queue->integrity; - - if (!bi->profile) - return NULL; - - return bi; -} - -static inline -struct blk_integrity *bdev_get_integrity(struct block_device *bdev) -{ - return blk_get_integrity(bdev->bd_disk); -} - -static inline bool -blk_integrity_queue_supports_integrity(struct request_queue *q) -{ - return q->integrity.profile; -} - -static inline bool blk_integrity_rq(struct request *rq) -{ - return rq->cmd_flags & REQ_INTEGRITY; -} - -static inline void blk_queue_max_integrity_segments(struct request_queue *q, - unsigned int segs) -{ - q->limits.max_integrity_segments = segs; -} - -static inline unsigned short -queue_max_integrity_segments(const struct request_queue *q) -{ - return q->limits.max_integrity_segments; -} - -/** - * bio_integrity_intervals - Return number of integrity intervals for a bio - * @bi: blk_integrity profile for device - * @sectors: Size of the bio in 512-byte sectors - * - * Description: The block layer calculates everything in 512 byte - * sectors but integrity metadata is done in terms of the data integrity - * interval size of the storage device. Convert the block layer sectors - * to the appropriate number of integrity intervals. - */ -static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, - unsigned int sectors) -{ - return sectors >> (bi->interval_exp - 9); -} - -static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, - unsigned int sectors) -{ - return bio_integrity_intervals(bi, sectors) * bi->tuple_size; -} - -/* - * Return the first bvec that contains integrity data. Only drivers that are - * limited to a single integrity segment should use this helper. - */ -static inline struct bio_vec *rq_integrity_vec(struct request *rq) -{ - if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) - return NULL; - return rq->bio->bi_integrity->bip_vec; -} - -#else /* CONFIG_BLK_DEV_INTEGRITY */ - -struct bio; -struct block_device; -struct gendisk; -struct blk_integrity; - -static inline int blk_integrity_rq(struct request *rq) -{ - return 0; -} -static inline int blk_rq_count_integrity_sg(struct request_queue *q, - struct bio *b) -{ - return 0; -} -static inline int blk_rq_map_integrity_sg(struct request_queue *q, - struct bio *b, - struct scatterlist *s) -{ - return 0; -} -static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) -{ - return NULL; -} -static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) -{ - return NULL; -} -static inline bool -blk_integrity_queue_supports_integrity(struct request_queue *q) -{ - return false; -} -static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) -{ - return 0; -} -static inline void blk_integrity_register(struct gendisk *d, - struct blk_integrity *b) -{ -} -static inline void blk_integrity_unregister(struct gendisk *d) -{ -} -static inline void blk_queue_max_integrity_segments(struct request_queue *q, - unsigned int segs) -{ -} -static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) -{ - return 0; -} - -static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, - unsigned int sectors) -{ - return 0; -} - -static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, - unsigned int sectors) -{ - return 0; -} - -static inline struct bio_vec *rq_integrity_vec(struct request *rq) -{ - return NULL; -} - -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - #ifdef CONFIG_BLK_INLINE_ENCRYPTION bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q); From 24b83deb29b7f06a5573b65f2ce96f5482755d43 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:28 +0200 Subject: [PATCH 018/174] block: move struct request to blk-mq.h struct request is only used by blk-mq drivers, so move it and all related declarations to blk-mq.h. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-18-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-crypto-fallback.c | 1 + block/blk-crypto-internal.h | 2 +- drivers/block/rnbd/rnbd-proto.h | 2 +- drivers/md/dm-verity-target.c | 1 + drivers/mmc/core/sd.c | 1 + drivers/target/target_core_file.c | 1 + drivers/target/target_core_iblock.c | 1 + include/linux/blk-mq.h | 465 +++++++++++++++++++++++++++ include/linux/blk_types.h | 2 - include/linux/blkdev.h | 469 +--------------------------- include/linux/blktrace_api.h | 2 +- include/linux/t10-pi.h | 2 +- include/scsi/scsi_device.h | 2 +- 13 files changed, 476 insertions(+), 475 deletions(-) diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index c322176a1e09..ec4c7823541c 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "blk-crypto-internal.h" diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h index 0d36aae538d7..2fb0d65a464c 100644 --- a/block/blk-crypto-internal.h +++ b/block/blk-crypto-internal.h @@ -7,7 +7,7 @@ #define __LINUX_BLK_CRYPTO_INTERNAL_H #include -#include +#include /* Represents a crypto mode supported by blk-crypto */ struct blk_crypto_mode { diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h index c1bc5c0fef71..de5d5a8df81d 100644 --- a/drivers/block/rnbd/rnbd-proto.h +++ b/drivers/block/rnbd/rnbd-proto.h @@ -10,7 +10,7 @@ #define RNBD_PROTO_H #include -#include +#include #include #include #include diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 88288c8d6bc8..aae48a8b1a04 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -18,6 +18,7 @@ #include "dm-verity-verify-sig.h" #include #include +#include #define DM_MSG_PREFIX "verity" diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 4646b7a03db6..c9db24e16af1 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index ef4a8e189fba..02f64453b4c5 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index d39b87e2ed10..31df20abe141 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 13ba1861e688..bd4086a6f28e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -6,10 +6,218 @@ #include #include #include +#include struct blk_mq_tags; struct blk_flush_queue; +#define BLKDEV_MIN_RQ 4 +#define BLKDEV_MAX_RQ 128 /* Default maximum */ + +typedef void (rq_end_io_fn)(struct request *, blk_status_t); + +/* + * request flags */ +typedef __u32 __bitwise req_flags_t; + +/* drive already may have started this one */ +#define RQF_STARTED ((__force req_flags_t)(1 << 1)) +/* may not be passed by ioscheduler */ +#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) +/* request for flush sequence */ +#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) +/* merge of different types, fail separately */ +#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) +/* track inflight for MQ */ +#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) +/* don't call prep for this one */ +#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) +/* vaguely specified driver internal error. Ignored by the block layer */ +#define RQF_FAILED ((__force req_flags_t)(1 << 10)) +/* don't warn about errors */ +#define RQF_QUIET ((__force req_flags_t)(1 << 11)) +/* elevator private data attached */ +#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) +/* account into disk and partition IO statistics */ +#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) +/* runtime pm request */ +#define RQF_PM ((__force req_flags_t)(1 << 15)) +/* on IO scheduler merge hash */ +#define RQF_HASHED ((__force req_flags_t)(1 << 16)) +/* track IO completion time */ +#define RQF_STATS ((__force req_flags_t)(1 << 17)) +/* Look at ->special_vec for the actual data payload instead of the + bio chain. */ +#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) +/* The per-zone write lock is held for this request */ +#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) +/* already slept for hybrid poll */ +#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) +/* ->timeout has been called, don't expire again */ +#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) + +/* flags that prevent us from merging requests: */ +#define RQF_NOMERGE_FLAGS \ + (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) + +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +/* + * Try to put the fields that are referenced together in the same cacheline. + * + * If you modify this structure, make sure to update blk_rq_init() and + * especially blk_mq_rq_ctx_init() to take care of the added fields. + */ +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + + unsigned int cmd_flags; /* op and common flags */ + req_flags_t rq_flags; + + int tag; + int internal_tag; + + /* the following two fields are internal, NEVER access directly */ + unsigned int __data_len; /* total data len */ + sector_t __sector; /* sector cursor */ + + struct bio *bio; + struct bio *biotail; + + struct list_head queuelist; + + /* + * The hash is used inside the scheduler, and killed once the + * request reaches the dispatch list. The ipi_list is only used + * to queue the request for softirq completion, which is long + * after the request has been unhashed (and even removed from + * the dispatch list). + */ + union { + struct hlist_node hash; /* merge hash */ + struct llist_node ipi_list; + }; + + /* + * The rb_node is only used inside the io scheduler, requests + * are pruned when moved to the dispatch queue. So let the + * completion_data share space with the rb_node. + */ + union { + struct rb_node rb_node; /* sort/lookup */ + struct bio_vec special_vec; + void *completion_data; + int error_count; /* for legacy drivers, don't use */ + }; + + /* + * Three pointers are available for the IO schedulers, if they need + * more they have to dynamically allocate it. Flush requests are + * never put on the IO scheduler. So let the flush fields share + * space with the elevator data. + */ + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + + struct gendisk *rq_disk; + struct block_device *part; +#ifdef CONFIG_BLK_RQ_ALLOC_TIME + /* Time that the first bio started allocating this request. */ + u64 alloc_time_ns; +#endif + /* Time that this request was allocated for this IO. */ + u64 start_time_ns; + /* Time that I/O was submitted to the device. */ + u64 io_start_time_ns; + +#ifdef CONFIG_BLK_WBT + unsigned short wbt_flags; +#endif + /* + * rq sectors used for blk stats. It has the same value + * with blk_rq_sectors(rq), except that it never be zeroed + * by completion. + */ + unsigned short stats_sectors; + + /* + * Number of scatter-gather DMA addr+len pairs after + * physical address coalescing is performed. + */ + unsigned short nr_phys_segments; + +#ifdef CONFIG_BLK_DEV_INTEGRITY + unsigned short nr_integrity_segments; +#endif + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct bio_crypt_ctx *crypt_ctx; + struct blk_ksm_keyslot *crypt_keyslot; +#endif + + unsigned short write_hint; + unsigned short ioprio; + + enum mq_rq_state state; + refcount_t ref; + + unsigned int timeout; + unsigned long deadline; + + union { + struct __call_single_data csd; + u64 fifo_time; + }; + + /* + * completion callback. + */ + rq_end_io_fn *end_io; + void *end_io_data; +}; + +#define req_op(req) \ + ((req)->cmd_flags & REQ_OP_MASK) + +static inline bool blk_rq_is_passthrough(struct request *rq) +{ + return blk_op_is_passthrough(req_op(rq)); +} + +static inline unsigned short req_get_ioprio(struct request *req) +{ + return req->ioprio; +} + +#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) + +#define rq_dma_dir(rq) \ + (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) + +enum blk_eh_timer_return { + BLK_EH_DONE, /* drivers has completed the command */ + BLK_EH_RESET_TIMER, /* reset timer and try again */ +}; + +#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ +#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ + /** * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware * block device @@ -637,4 +845,261 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio); void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, struct lock_class_key *key); +static inline bool rq_is_sync(struct request *rq) +{ + return op_is_sync(rq->cmd_flags); +} + +void blk_rq_init(struct request_queue *q, struct request *rq); +void blk_put_request(struct request *rq); +struct request *blk_get_request(struct request_queue *q, unsigned int op, + blk_mq_req_flags_t flags); +int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); +void blk_rq_unprep_clone(struct request *rq); +blk_status_t blk_insert_cloned_request(struct request_queue *q, + struct request *rq); + +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; + unsigned long offset; + int null_mapped; + int from_user; +}; + +int blk_rq_map_user(struct request_queue *, struct request *, + struct rq_map_data *, void __user *, unsigned long, gfp_t); +int blk_rq_map_user_iov(struct request_queue *, struct request *, + struct rq_map_data *, const struct iov_iter *, gfp_t); +int blk_rq_unmap_user(struct bio *); +int blk_rq_map_kern(struct request_queue *, struct request *, void *, + unsigned int, gfp_t); +int blk_rq_append_bio(struct request *rq, struct bio *bio); +void blk_execute_rq_nowait(struct gendisk *, struct request *, int, + rq_end_io_fn *); +blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, + int at_head); + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +#define __rq_for_each_bio(_bio, rq) \ + if ((rq->bio)) \ + for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) + +#define rq_for_each_segment(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_segment(bvl, _iter.bio, _iter.iter) + +#define rq_for_each_bvec(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_bvec(bvl, _iter.bio, _iter.iter) + +#define rq_iter_last(bvec, _iter) \ + (_iter.bio->bi_next == NULL && \ + bio_iter_last(bvec, _iter.iter)) + +/* + * blk_rq_pos() : the current sector + * blk_rq_bytes() : bytes left in the entire request + * blk_rq_cur_bytes() : bytes left in the current segment + * blk_rq_err_bytes() : bytes left till the next error boundary + * blk_rq_sectors() : sectors left in the entire request + * blk_rq_cur_sectors() : sectors left in the current segment + * blk_rq_stats_sectors() : sectors of the entire request used for stats + */ +static inline sector_t blk_rq_pos(const struct request *rq) +{ + return rq->__sector; +} + +static inline unsigned int blk_rq_bytes(const struct request *rq) +{ + return rq->__data_len; +} + +static inline int blk_rq_cur_bytes(const struct request *rq) +{ + return rq->bio ? bio_cur_bytes(rq->bio) : 0; +} + +unsigned int blk_rq_err_bytes(const struct request *rq); + +static inline unsigned int blk_rq_sectors(const struct request *rq) +{ + return blk_rq_bytes(rq) >> SECTOR_SHIFT; +} + +static inline unsigned int blk_rq_cur_sectors(const struct request *rq) +{ + return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; +} + +static inline unsigned int blk_rq_stats_sectors(const struct request *rq) +{ + return rq->stats_sectors; +} + +/* + * Some commands like WRITE SAME have a payload or data transfer size which + * is different from the size of the request. Any driver that supports such + * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to + * calculate the data transfer size. + */ +static inline unsigned int blk_rq_payload_bytes(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return rq->special_vec.bv_len; + return blk_rq_bytes(rq); +} + +/* + * Return the first full biovec in the request. The caller needs to check that + * there are any bvecs before calling this helper. + */ +static inline struct bio_vec req_bvec(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return rq->special_vec; + return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); +} + +static inline unsigned int blk_rq_count_bios(struct request *rq) +{ + unsigned int nr_bios = 0; + struct bio *bio; + + __rq_for_each_bio(bio, rq) + nr_bios++; + + return nr_bios; +} + +void blk_steal_bios(struct bio_list *list, struct request *rq); + +/* + * Request completion related functions. + * + * blk_update_request() completes given number of bytes and updates + * the request without completing it. + */ +bool blk_update_request(struct request *rq, blk_status_t error, + unsigned int nr_bytes); +void blk_abort_request(struct request *); + +/* + * Number of physical segments as sent to the device. + * + * Normally this is the number of discontiguous data segments sent by the + * submitter. But for data-less command like discard we might have no + * actual data segments submitted, but the driver might have to add it's + * own special payload. In that case we still return 1 here so that this + * special payload will be mapped. + */ +static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return 1; + return rq->nr_phys_segments; +} + +/* + * Number of discard segments (or ranges) the driver needs to fill in. + * Each discard bio merged into a request is counted as one segment. + */ +static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) +{ + return max_t(unsigned short, rq->nr_phys_segments, 1); +} + +int __blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist, struct scatterlist **last_sg); +static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct scatterlist *last_sg = NULL; + + return __blk_rq_map_sg(q, rq, sglist, &last_sg); +} +void blk_dump_rq_flags(struct request *, char *); + +#ifdef CONFIG_BLK_DEV_ZONED +static inline unsigned int blk_rq_zone_no(struct request *rq) +{ + return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); +} + +static inline unsigned int blk_rq_zone_is_seq(struct request *rq) +{ + return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); +} + +bool blk_req_needs_zone_write_lock(struct request *rq); +bool blk_req_zone_write_trylock(struct request *rq); +void __blk_req_zone_write_lock(struct request *rq); +void __blk_req_zone_write_unlock(struct request *rq); + +static inline void blk_req_zone_write_lock(struct request *rq) +{ + if (blk_req_needs_zone_write_lock(rq)) + __blk_req_zone_write_lock(rq); +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ + if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) + __blk_req_zone_write_unlock(rq); +} + +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return rq->q->seq_zones_wlock && + test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + if (!blk_req_needs_zone_write_lock(rq)) + return true; + return !blk_req_zone_is_write_locked(rq); +} +#else /* CONFIG_BLK_DEV_ZONED */ +static inline bool blk_req_needs_zone_write_lock(struct request *rq) +{ + return false; +} + +static inline void blk_req_zone_write_lock(struct request *rq) +{ +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ +} +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return false; +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + return true; +} +#endif /* CONFIG_BLK_DEV_ZONED */ + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" #endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +void rq_flush_dcache_pages(struct request *rq); +#else +static inline void rq_flush_dcache_pages(struct request *rq) +{ +} +#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */ +#endif /* BLK_MQ_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index be622b5a21ed..3b967053e9f5 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -431,8 +431,6 @@ enum stat_group { #define bio_op(bio) \ ((bio)->bi_opf & REQ_OP_MASK) -#define req_op(req) \ - ((req)->cmd_flags & REQ_OP_MASK) /* obsolete, don't use in new code */ static inline void bio_set_op_attrs(struct bio *bio, unsigned op, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 56e60e5c09d0..0e960d74615e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -32,9 +31,6 @@ struct blk_queue_stats; struct blk_stat_callback; struct blk_keyslot_manager; -#define BLKDEV_MIN_RQ 4 -#define BLKDEV_MAX_RQ 128 /* Default maximum */ - /* Must be consistent with blk_mq_poll_stats_bkt() */ #define BLK_MQ_POLL_STATS_BKTS 16 @@ -47,213 +43,12 @@ struct blk_keyslot_manager; */ #define BLKCG_MAX_POLS 6 -typedef void (rq_end_io_fn)(struct request *, blk_status_t); - -/* - * request flags */ -typedef __u32 __bitwise req_flags_t; - -/* drive already may have started this one */ -#define RQF_STARTED ((__force req_flags_t)(1 << 1)) -/* may not be passed by ioscheduler */ -#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) -/* request for flush sequence */ -#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) -/* merge of different types, fail separately */ -#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) -/* track inflight for MQ */ -#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) -/* don't call prep for this one */ -#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) -/* vaguely specified driver internal error. Ignored by the block layer */ -#define RQF_FAILED ((__force req_flags_t)(1 << 10)) -/* don't warn about errors */ -#define RQF_QUIET ((__force req_flags_t)(1 << 11)) -/* elevator private data attached */ -#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) -/* account into disk and partition IO statistics */ -#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) -/* runtime pm request */ -#define RQF_PM ((__force req_flags_t)(1 << 15)) -/* on IO scheduler merge hash */ -#define RQF_HASHED ((__force req_flags_t)(1 << 16)) -/* track IO completion time */ -#define RQF_STATS ((__force req_flags_t)(1 << 17)) -/* Look at ->special_vec for the actual data payload instead of the - bio chain. */ -#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) -/* The per-zone write lock is held for this request */ -#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) -/* already slept for hybrid poll */ -#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) -/* ->timeout has been called, don't expire again */ -#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) - -/* flags that prevent us from merging requests: */ -#define RQF_NOMERGE_FLAGS \ - (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) - -/* - * Request state for blk-mq. - */ -enum mq_rq_state { - MQ_RQ_IDLE = 0, - MQ_RQ_IN_FLIGHT = 1, - MQ_RQ_COMPLETE = 2, -}; - -/* - * Try to put the fields that are referenced together in the same cacheline. - * - * If you modify this structure, make sure to update blk_rq_init() and - * especially blk_mq_rq_ctx_init() to take care of the added fields. - */ -struct request { - struct request_queue *q; - struct blk_mq_ctx *mq_ctx; - struct blk_mq_hw_ctx *mq_hctx; - - unsigned int cmd_flags; /* op and common flags */ - req_flags_t rq_flags; - - int tag; - int internal_tag; - - /* the following two fields are internal, NEVER access directly */ - unsigned int __data_len; /* total data len */ - sector_t __sector; /* sector cursor */ - - struct bio *bio; - struct bio *biotail; - - struct list_head queuelist; - - /* - * The hash is used inside the scheduler, and killed once the - * request reaches the dispatch list. The ipi_list is only used - * to queue the request for softirq completion, which is long - * after the request has been unhashed (and even removed from - * the dispatch list). - */ - union { - struct hlist_node hash; /* merge hash */ - struct llist_node ipi_list; - }; - - /* - * The rb_node is only used inside the io scheduler, requests - * are pruned when moved to the dispatch queue. So let the - * completion_data share space with the rb_node. - */ - union { - struct rb_node rb_node; /* sort/lookup */ - struct bio_vec special_vec; - void *completion_data; - int error_count; /* for legacy drivers, don't use */ - }; - - /* - * Three pointers are available for the IO schedulers, if they need - * more they have to dynamically allocate it. Flush requests are - * never put on the IO scheduler. So let the flush fields share - * space with the elevator data. - */ - union { - struct { - struct io_cq *icq; - void *priv[2]; - } elv; - - struct { - unsigned int seq; - struct list_head list; - rq_end_io_fn *saved_end_io; - } flush; - }; - - struct gendisk *rq_disk; - struct block_device *part; -#ifdef CONFIG_BLK_RQ_ALLOC_TIME - /* Time that the first bio started allocating this request. */ - u64 alloc_time_ns; -#endif - /* Time that this request was allocated for this IO. */ - u64 start_time_ns; - /* Time that I/O was submitted to the device. */ - u64 io_start_time_ns; - -#ifdef CONFIG_BLK_WBT - unsigned short wbt_flags; -#endif - /* - * rq sectors used for blk stats. It has the same value - * with blk_rq_sectors(rq), except that it never be zeroed - * by completion. - */ - unsigned short stats_sectors; - - /* - * Number of scatter-gather DMA addr+len pairs after - * physical address coalescing is performed. - */ - unsigned short nr_phys_segments; - -#if defined(CONFIG_BLK_DEV_INTEGRITY) - unsigned short nr_integrity_segments; -#endif - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - struct bio_crypt_ctx *crypt_ctx; - struct blk_ksm_keyslot *crypt_keyslot; -#endif - - unsigned short write_hint; - unsigned short ioprio; - - enum mq_rq_state state; - refcount_t ref; - - unsigned int timeout; - unsigned long deadline; - - union { - struct __call_single_data csd; - u64 fifo_time; - }; - - /* - * completion callback. - */ - rq_end_io_fn *end_io; - void *end_io_data; -}; - static inline bool blk_op_is_passthrough(unsigned int op) { op &= REQ_OP_MASK; return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; } -static inline bool blk_rq_is_passthrough(struct request *rq) -{ - return blk_op_is_passthrough(req_op(rq)); -} - -static inline unsigned short req_get_ioprio(struct request *req) -{ - return req->ioprio; -} - -struct bio_vec; - -enum blk_eh_timer_return { - BLK_EH_DONE, /* drivers has completed the command */ - BLK_EH_RESET_TIMER, /* reset timer and try again */ -}; - -#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ -#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ - /* * Zoned block device models (zoned limit). * @@ -620,11 +415,6 @@ extern void blk_clear_pm_only(struct request_queue *q); #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) -#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) - -#define rq_dma_dir(rq) \ - (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) - #define dma_map_bvec(dev, bv, dir, attrs) \ dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ (dir), (attrs)) @@ -740,11 +530,6 @@ static inline unsigned int queue_max_active_zones(const struct request_queue *q) } #endif /* CONFIG_BLK_DEV_ZONED */ -static inline bool rq_is_sync(struct request *rq) -{ - return op_is_sync(rq->cmd_flags); -} - static inline unsigned int blk_queue_depth(struct request_queue *q) { if (q->queue_depth) @@ -759,83 +544,20 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) #define BLK_MIN_SG_TIMEOUT (7 * HZ) -struct rq_map_data { - struct page **pages; - int page_order; - int nr_entries; - unsigned long offset; - int null_mapped; - int from_user; -}; - -struct req_iterator { - struct bvec_iter iter; - struct bio *bio; -}; - /* This should not be used directly - use rq_for_each_segment */ #define for_each_bio(_bio) \ for (; _bio; _bio = _bio->bi_next) -#define __rq_for_each_bio(_bio, rq) \ - if ((rq->bio)) \ - for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) -#define rq_for_each_segment(bvl, _rq, _iter) \ - __rq_for_each_bio(_iter.bio, _rq) \ - bio_for_each_segment(bvl, _iter.bio, _iter.iter) - -#define rq_for_each_bvec(bvl, _rq, _iter) \ - __rq_for_each_bio(_iter.bio, _rq) \ - bio_for_each_bvec(bvl, _iter.bio, _iter.iter) - -#define rq_iter_last(bvec, _iter) \ - (_iter.bio->bi_next == NULL && \ - bio_iter_last(bvec, _iter.iter)) - -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" -#endif -#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -extern void rq_flush_dcache_pages(struct request *rq); -#else -static inline void rq_flush_dcache_pages(struct request *rq) -{ -} -#endif extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); blk_qc_t submit_bio_noacct(struct bio *bio); -extern void blk_rq_init(struct request_queue *q, struct request *rq); -extern void blk_put_request(struct request *); -extern struct request *blk_get_request(struct request_queue *, unsigned int op, - blk_mq_req_flags_t flags); + extern int blk_lld_busy(struct request_queue *q); -extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, - struct bio_set *bs, gfp_t gfp_mask, - int (*bio_ctr)(struct bio *, struct bio *, void *), - void *data); -extern void blk_rq_unprep_clone(struct request *rq); -extern blk_status_t blk_insert_cloned_request(struct request_queue *q, - struct request *rq); -int blk_rq_append_bio(struct request *rq, struct bio *bio); extern void blk_queue_split(struct bio **); extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); extern void blk_queue_exit(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); -extern int blk_rq_map_user(struct request_queue *, struct request *, - struct rq_map_data *, void __user *, unsigned long, - gfp_t); -extern int blk_rq_unmap_user(struct bio *); -extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); -extern int blk_rq_map_user_iov(struct request_queue *, struct request *, - struct rq_map_data *, const struct iov_iter *, - gfp_t); -extern void blk_execute_rq_nowait(struct gendisk *, - struct request *, int, rq_end_io_fn *); - -blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, - int at_head); /* Helper to convert REQ_OP_XXX to its string format XXX */ extern const char *blk_op_str(unsigned int op); @@ -867,47 +589,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) #define SECTOR_MASK (PAGE_SECTORS - 1) -/* - * blk_rq_pos() : the current sector - * blk_rq_bytes() : bytes left in the entire request - * blk_rq_cur_bytes() : bytes left in the current segment - * blk_rq_err_bytes() : bytes left till the next error boundary - * blk_rq_sectors() : sectors left in the entire request - * blk_rq_cur_sectors() : sectors left in the current segment - * blk_rq_stats_sectors() : sectors of the entire request used for stats - */ -static inline sector_t blk_rq_pos(const struct request *rq) -{ - return rq->__sector; -} - -static inline unsigned int blk_rq_bytes(const struct request *rq) -{ - return rq->__data_len; -} - -static inline int blk_rq_cur_bytes(const struct request *rq) -{ - return rq->bio ? bio_cur_bytes(rq->bio) : 0; -} - -extern unsigned int blk_rq_err_bytes(const struct request *rq); - -static inline unsigned int blk_rq_sectors(const struct request *rq) -{ - return blk_rq_bytes(rq) >> SECTOR_SHIFT; -} - -static inline unsigned int blk_rq_cur_sectors(const struct request *rq) -{ - return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; -} - -static inline unsigned int blk_rq_stats_sectors(const struct request *rq) -{ - return rq->stats_sectors; -} - #ifdef CONFIG_BLK_DEV_ZONED /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ @@ -924,42 +605,8 @@ static inline unsigned int bio_zone_is_seq(struct bio *bio) return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev), bio->bi_iter.bi_sector); } - -static inline unsigned int blk_rq_zone_no(struct request *rq) -{ - return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); -} - -static inline unsigned int blk_rq_zone_is_seq(struct request *rq) -{ - return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); -} #endif /* CONFIG_BLK_DEV_ZONED */ -/* - * Some commands like WRITE SAME have a payload or data transfer size which - * is different from the size of the request. Any driver that supports such - * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to - * calculate the data transfer size. - */ -static inline unsigned int blk_rq_payload_bytes(struct request *rq) -{ - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - return rq->special_vec.bv_len; - return blk_rq_bytes(rq); -} - -/* - * Return the first full biovec in the request. The caller needs to check that - * there are any bvecs before calling this helper. - */ -static inline struct bio_vec req_bvec(struct request *rq) -{ - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - return rq->special_vec; - return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); -} - static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, int op) { @@ -999,30 +646,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, return min(q->limits.max_sectors, chunk_sectors); } -static inline unsigned int blk_rq_count_bios(struct request *rq) -{ - unsigned int nr_bios = 0; - struct bio *bio; - - __rq_for_each_bio(bio, rq) - nr_bios++; - - return nr_bios; -} - -void blk_steal_bios(struct bio_list *list, struct request *rq); - -/* - * Request completion related functions. - * - * blk_update_request() completes given number of bytes and updates - * the request without completing it. - */ -extern bool blk_update_request(struct request *rq, blk_status_t error, - unsigned int nr_bytes); - -extern void blk_abort_request(struct request *); - /* * Access functions for manipulating queue properties */ @@ -1081,42 +704,6 @@ extern void blk_queue_required_elevator_features(struct request_queue *q, extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, struct device *dev); -/* - * Number of physical segments as sent to the device. - * - * Normally this is the number of discontiguous data segments sent by the - * submitter. But for data-less command like discard we might have no - * actual data segments submitted, but the driver might have to add it's - * own special payload. In that case we still return 1 here so that this - * special payload will be mapped. - */ -static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) -{ - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - return 1; - return rq->nr_phys_segments; -} - -/* - * Number of discard segments (or ranges) the driver needs to fill in. - * Each discard bio merged into a request is counted as one segment. - */ -static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) -{ - return max_t(unsigned short, rq->nr_phys_segments, 1); -} - -int __blk_rq_map_sg(struct request_queue *q, struct request *rq, - struct scatterlist *sglist, struct scatterlist **last_sg); -static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, - struct scatterlist *sglist) -{ - struct scatterlist *last_sg = NULL; - - return __blk_rq_map_sg(q, rq, sglist, &last_sg); -} -extern void blk_dump_rq_flags(struct request *, char *); - bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); @@ -1613,60 +1200,6 @@ extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); -#ifdef CONFIG_BLK_DEV_ZONED -bool blk_req_needs_zone_write_lock(struct request *rq); -bool blk_req_zone_write_trylock(struct request *rq); -void __blk_req_zone_write_lock(struct request *rq); -void __blk_req_zone_write_unlock(struct request *rq); - -static inline void blk_req_zone_write_lock(struct request *rq) -{ - if (blk_req_needs_zone_write_lock(rq)) - __blk_req_zone_write_lock(rq); -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ - if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) - __blk_req_zone_write_unlock(rq); -} - -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return rq->q->seq_zones_wlock && - test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - if (!blk_req_needs_zone_write_lock(rq)) - return true; - return !blk_req_zone_is_write_locked(rq); -} -#else -static inline bool blk_req_needs_zone_write_lock(struct request *rq) -{ - return false; -} - -static inline void blk_req_zone_write_lock(struct request *rq) -{ -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ -} -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return false; -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - return true; -} -#endif /* CONFIG_BLK_DEV_ZONED */ - static inline void blk_wake_io_task(struct task_struct *waiter) { /* diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index a083e15df608..22501a293fa5 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -2,7 +2,7 @@ #ifndef BLKTRACE_H #define BLKTRACE_H -#include +#include #include #include #include diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 96305a64a5a7..c635c2e014e3 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -3,7 +3,7 @@ #define _LINUX_T10_PI_H #include -#include +#include /* * A T10 PI-capable target device can be formatted with different diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index b97e142a7ca9..430b73bd02ac 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include #include From e2c7275dc0fe3cba6a3de5a5f4c98f923e3d9d14 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 27 Sep 2021 15:03:25 -0700 Subject: [PATCH 019/174] block/mq-deadline: Improve request accounting further The scheduler .insert_requests() callback is called when a request is queued for the first time and also when it is requeued. Only count a request the first time it is queued. Additionally, since the mq-deadline scheduler only performs zone locking for requests that have been inserted, skip the zone unlock code for requests that have not been inserted into the mq-deadline scheduler. Fixes: 38ba64d12d4c ("block/mq-deadline: Track I/O statistics") Reviewed-by: Damien Le Moal Reviewed-by: Niklas Cassel Cc: Hannes Reinecke Signed-off-by: Bart Van Assche Link: https://lore.kernel.org/r/20210927220328.1410161-2-bvanassche@acm.org Signed-off-by: Jens Axboe --- block/mq-deadline.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 47f042fa6a68..c27b4347ca91 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -677,8 +677,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_req_zone_write_unlock(rq); prio = ioprio_class_to_prio[ioprio_class]; - dd_count(dd, inserted, prio); - rq->elv.priv[0] = (void *)(uintptr_t)1; + if (!rq->elv.priv[0]) { + dd_count(dd, inserted, prio); + rq->elv.priv[0] = (void *)(uintptr_t)1; + } if (blk_mq_sched_try_insert_merge(q, rq, &free)) { blk_mq_free_requests(&free); @@ -759,12 +761,13 @@ static void dd_finish_request(struct request *rq) /* * The block layer core may call dd_finish_request() without having - * called dd_insert_requests(). Hence only update statistics for - * requests for which dd_insert_requests() has been called. See also - * blk_mq_request_bypass_insert(). + * called dd_insert_requests(). Skip requests that bypassed I/O + * scheduling. See also blk_mq_request_bypass_insert(). */ - if (rq->elv.priv[0]) - dd_count(dd, completed, prio); + if (!rq->elv.priv[0]) + return; + + dd_count(dd, completed, prio); if (blk_queue_is_zoned(q)) { unsigned long flags; From 32f64cad97187f4aed50aca3ed1b5a51a00f848b Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 27 Sep 2021 15:03:26 -0700 Subject: [PATCH 020/174] block/mq-deadline: Add an invariant check Check a statistics invariant at module unload time. When running blktests, the invariant is verified every time a request queue is removed and hence is verified at least once per test. Reviewed-by: Damien Le Moal Reviewed-by: Niklas Cassel Cc: Hannes Reinecke Signed-off-by: Bart Van Assche Link: https://lore.kernel.org/r/20210927220328.1410161-3-bvanassche@acm.org Signed-off-by: Jens Axboe --- block/mq-deadline.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index c27b4347ca91..2586b3f8c7e9 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -270,6 +270,12 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, deadline_remove_request(rq->q, per_prio, rq); } +/* Number of requests queued for a given priority level. */ +static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) +{ + return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); +} + /* * deadline_check_fifo returns 0 if there are no expired requests on the fifo, * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) @@ -539,6 +545,12 @@ static void dd_exit_sched(struct elevator_queue *e) WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); + WARN_ONCE(dd_queued(dd, prio) != 0, + "statistics for priority %d: i %u m %u d %u c %u\n", + prio, dd_sum(dd, inserted, prio), + dd_sum(dd, merged, prio), + dd_sum(dd, dispatched, prio), + dd_sum(dd, completed, prio)); } free_percpu(dd->stats); @@ -950,12 +962,6 @@ static int dd_async_depth_show(void *data, struct seq_file *m) return 0; } -/* Number of requests queued for a given priority level. */ -static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) -{ - return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); -} - static int dd_queued_show(void *data, struct seq_file *m) { struct request_queue *q = data; From bce0363ed84af8e8e27df464b2082f4b5f58b57e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 27 Sep 2021 15:03:27 -0700 Subject: [PATCH 021/174] block/mq-deadline: Stop using per-CPU counters Calculating the sum over all CPUs of per-CPU counters frequently is inefficient. Hence switch from per-CPU to individual counters. Three counters are protected by the mq-deadline spinlock since these are only accessed from contexts that already hold that spinlock. The fourth counter is atomic because protecting it with the mq-deadline spinlock would trigger lock contention. Reviewed-by: Damien Le Moal Reviewed-by: Niklas Cassel Cc: Hannes Reinecke Signed-off-by: Bart Van Assche Link: https://lore.kernel.org/r/20210927220328.1410161-4-bvanassche@acm.org Signed-off-by: Jens Axboe --- block/mq-deadline.c | 126 ++++++++++++++++++++------------------------ 1 file changed, 57 insertions(+), 69 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 2586b3f8c7e9..b262f40f32c0 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -51,17 +51,16 @@ enum dd_prio { enum { DD_PRIO_COUNT = 3 }; -/* I/O statistics per I/O priority. */ +/* + * I/O statistics per I/O priority. It is fine if these counters overflow. + * What matters is that these counters are at least as wide as + * log2(max_outstanding_requests). + */ struct io_stats_per_prio { - local_t inserted; - local_t merged; - local_t dispatched; - local_t completed; -}; - -/* I/O statistics for all I/O priorities (enum dd_prio). */ -struct io_stats { - struct io_stats_per_prio stats[DD_PRIO_COUNT]; + uint32_t inserted; + uint32_t merged; + uint32_t dispatched; + atomic_t completed; }; /* @@ -74,6 +73,7 @@ struct dd_per_prio { struct list_head fifo_list[DD_DIR_COUNT]; /* Next request in FIFO order. Read, write or both are NULL. */ struct request *next_rq[DD_DIR_COUNT]; + struct io_stats_per_prio stats; }; struct deadline_data { @@ -88,8 +88,6 @@ struct deadline_data { unsigned int batching; /* number of sequential requests made */ unsigned int starved; /* times reads have starved writes */ - struct io_stats __percpu *stats; - /* * settings that change how the i/o scheduler behaves */ @@ -103,33 +101,6 @@ struct deadline_data { spinlock_t zone_lock; }; -/* Count one event of type 'event_type' and with I/O priority 'prio' */ -#define dd_count(dd, event_type, prio) do { \ - struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \ - \ - BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ - BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ - local_inc(&io_stats->stats[(prio)].event_type); \ - put_cpu_ptr(io_stats); \ -} while (0) - -/* - * Returns the total number of dd_count(dd, event_type, prio) calls across all - * CPUs. No locking or barriers since it is fine if the returned sum is slightly - * outdated. - */ -#define dd_sum(dd, event_type, prio) ({ \ - unsigned int cpu; \ - u32 sum = 0; \ - \ - BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ - BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ - for_each_present_cpu(cpu) \ - sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \ - stats[(prio)].event_type); \ - sum; \ -}) - /* Maps an I/O priority class to a deadline scheduler priority. */ static const enum dd_prio ioprio_class_to_prio[] = { [IOPRIO_CLASS_NONE] = DD_BE_PRIO, @@ -233,7 +204,9 @@ static void dd_merged_requests(struct request_queue *q, struct request *req, const u8 ioprio_class = dd_rq_ioclass(next); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; - dd_count(dd, merged, prio); + lockdep_assert_held(&dd->lock); + + dd->per_prio[prio].stats.merged++; /* * if next expires before rq, assign its expire time to rq @@ -273,7 +246,11 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, /* Number of requests queued for a given priority level. */ static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) { - return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); + const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; + + lockdep_assert_held(&dd->lock); + + return stats->inserted - atomic_read(&stats->completed); } /* @@ -463,7 +440,7 @@ dispatch_request: done: ioprio_class = dd_rq_ioclass(rq); prio = ioprio_class_to_prio[ioprio_class]; - dd_count(dd, dispatched, prio); + dd->per_prio[prio].stats.dispatched++; /* * If the request needs its target zone locked, do it. */ @@ -542,18 +519,21 @@ static void dd_exit_sched(struct elevator_queue *e) for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; + const struct io_stats_per_prio *stats = &per_prio->stats; + uint32_t queued; WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); - WARN_ONCE(dd_queued(dd, prio) != 0, - "statistics for priority %d: i %u m %u d %u c %u\n", - prio, dd_sum(dd, inserted, prio), - dd_sum(dd, merged, prio), - dd_sum(dd, dispatched, prio), - dd_sum(dd, completed, prio)); - } - free_percpu(dd->stats); + spin_lock(&dd->lock); + queued = dd_queued(dd, prio); + spin_unlock(&dd->lock); + + WARN_ONCE(queued != 0, + "statistics for priority %d: i %u m %u d %u c %u\n", + prio, stats->inserted, stats->merged, + stats->dispatched, atomic_read(&stats->completed)); + } kfree(dd); } @@ -578,11 +558,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) eq->elevator_data = dd; - dd->stats = alloc_percpu_gfp(typeof(*dd->stats), - GFP_KERNEL | __GFP_ZERO); - if (!dd->stats) - goto free_dd; - for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; @@ -604,9 +579,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) q->elevator = eq; return 0; -free_dd: - kfree(dd); - put_eq: kobject_put(&eq->kobj); return ret; @@ -689,8 +661,9 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_req_zone_write_unlock(rq); prio = ioprio_class_to_prio[ioprio_class]; + per_prio = &dd->per_prio[prio]; if (!rq->elv.priv[0]) { - dd_count(dd, inserted, prio); + per_prio->stats.inserted++; rq->elv.priv[0] = (void *)(uintptr_t)1; } @@ -701,7 +674,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, trace_block_rq_insert(rq); - per_prio = &dd->per_prio[prio]; if (at_head) { list_add(&rq->queuelist, &per_prio->dispatch); } else { @@ -779,7 +751,7 @@ static void dd_finish_request(struct request *rq) if (!rq->elv.priv[0]) return; - dd_count(dd, completed, prio); + atomic_inc(&per_prio->stats.completed); if (blk_queue_is_zoned(q)) { unsigned long flags; @@ -966,28 +938,44 @@ static int dd_queued_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; + u32 rt, be, idle; + + spin_lock(&dd->lock); + rt = dd_queued(dd, DD_RT_PRIO); + be = dd_queued(dd, DD_BE_PRIO); + idle = dd_queued(dd, DD_IDLE_PRIO); + spin_unlock(&dd->lock); + + seq_printf(m, "%u %u %u\n", rt, be, idle); - seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO), - dd_queued(dd, DD_BE_PRIO), - dd_queued(dd, DD_IDLE_PRIO)); return 0; } /* Number of requests owned by the block driver for a given priority. */ static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio) { - return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio) - - dd_sum(dd, completed, prio); + const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; + + lockdep_assert_held(&dd->lock); + + return stats->dispatched + stats->merged - + atomic_read(&stats->completed); } static int dd_owned_by_driver_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; + u32 rt, be, idle; + + spin_lock(&dd->lock); + rt = dd_owned_by_driver(dd, DD_RT_PRIO); + be = dd_owned_by_driver(dd, DD_BE_PRIO); + idle = dd_owned_by_driver(dd, DD_IDLE_PRIO); + spin_unlock(&dd->lock); + + seq_printf(m, "%u %u %u\n", rt, be, idle); - seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO), - dd_owned_by_driver(dd, DD_BE_PRIO), - dd_owned_by_driver(dd, DD_IDLE_PRIO)); return 0; } From 322cff70d46c6c230a722684fd65ae6b5f57436e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 27 Sep 2021 15:03:28 -0700 Subject: [PATCH 022/174] block/mq-deadline: Prioritize high-priority requests In addition to reverting commit 7b05bf771084 ("Revert "block/mq-deadline: Prioritize high-priority requests""), this patch uses 'jiffies' instead of ktime_get() in the code for aging lower priority requests. This patch has been tested as follows: Measured QD=1/jobs=1 IOPS for nullb with the mq-deadline scheduler. Result without and with this patch: 555 K IOPS. Measured QD=1/jobs=8 IOPS for nullb with the mq-deadline scheduler. Result without and with this patch: about 380 K IOPS. Ran the following script: set -e scriptdir=$(dirname "$0") if [ -e /sys/module/scsi_debug ]; then modprobe -r scsi_debug; fi modprobe scsi_debug ndelay=1000000 max_queue=16 sd='' while [ -z "$sd" ]; do sd=$(basename /sys/bus/pseudo/drivers/scsi_debug/adapter*/host*/target*/*/block/*) done echo $((100*1000)) > "/sys/block/$sd/queue/iosched/prio_aging_expire" if [ -e /sys/fs/cgroup/io.prio.class ]; then cd /sys/fs/cgroup echo restrict-to-be >io.prio.class echo +io > cgroup.subtree_control else cd /sys/fs/cgroup/blkio/ echo restrict-to-be >blkio.prio.class fi echo $$ >cgroup.procs mkdir -p hipri cd hipri if [ -e io.prio.class ]; then echo none-to-rt >io.prio.class else echo none-to-rt >blkio.prio.class fi { "${scriptdir}/max-iops" -a1 -d32 -j1 -e mq-deadline "/dev/$sd" >& ~/low-pri.txt & } echo $$ >cgroup.procs "${scriptdir}/max-iops" -a1 -d32 -j1 -e mq-deadline "/dev/$sd" >& ~/hi-pri.txt Result: * 11000 IOPS for the high-priority job * 40 IOPS for the low-priority job If the prio aging expiry time is changed from 100s into 0, the IOPS results change into 6712 and 6796 IOPS. The max-iops script is a script that runs fio with the following arguments: --bs=4K --gtod_reduce=1 --ioengine=libaio --ioscheduler=${arg_e} --runtime=60 --norandommap --rw=read --thread --buffered=0 --numjobs=${arg_j} --iodepth=${arg_d} --iodepth_batch_submit=${arg_a} --iodepth_batch_complete=$((arg_d / 2)) --name=${positional_argument_1} --filename=${positional_argument_1} Cc: Damien Le Moal Cc: Niklas Cassel Cc: Hannes Reinecke Signed-off-by: Bart Van Assche Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20210927220328.1410161-5-bvanassche@acm.org [axboe: @latest -> @latest_start] Signed-off-by: Jens Axboe --- block/mq-deadline.c | 77 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 4 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index b262f40f32c0..2001ac186f5c 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -31,6 +31,11 @@ */ static const int read_expire = HZ / 2; /* max time before a read is submitted. */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ +/* + * Time after which to dispatch lower priority requests even if higher + * priority requests are pending. + */ +static const int prio_aging_expire = 10 * HZ; static const int writes_starved = 2; /* max times reads can starve a write */ static const int fifo_batch = 16; /* # of sequential requests treated as one by the above parameters. For throughput. */ @@ -96,6 +101,7 @@ struct deadline_data { int writes_starved; int front_merges; u32 async_depth; + int prio_aging_expire; spinlock_t lock; spinlock_t zone_lock; @@ -338,12 +344,27 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, return rq; } +/* + * Returns true if and only if @rq started after @latest_start where + * @latest_start is in jiffies. + */ +static bool started_after(struct deadline_data *dd, struct request *rq, + unsigned long latest_start) +{ + unsigned long start_time = (unsigned long)rq->fifo_time; + + start_time -= dd->fifo_expire[rq_data_dir(rq)]; + + return time_after(start_time, latest_start); +} + /* * deadline_dispatch_requests selects the best request according to - * read/write expire, fifo_batch, etc + * read/write expire, fifo_batch, etc and with a start time <= @latest_start. */ static struct request *__dd_dispatch_request(struct deadline_data *dd, - struct dd_per_prio *per_prio) + struct dd_per_prio *per_prio, + unsigned long latest_start) { struct request *rq, *next_rq; enum dd_data_dir data_dir; @@ -355,6 +376,8 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, if (!list_empty(&per_prio->dispatch)) { rq = list_first_entry(&per_prio->dispatch, struct request, queuelist); + if (started_after(dd, rq, latest_start)) + return NULL; list_del_init(&rq->queuelist); goto done; } @@ -432,6 +455,9 @@ dispatch_find_request: dd->batching = 0; dispatch_request: + if (started_after(dd, rq, latest_start)) + return NULL; + /* * rq is the selected appropriate request. */ @@ -449,6 +475,34 @@ done: return rq; } +/* + * Check whether there are any requests with priority other than DD_RT_PRIO + * that were inserted more than prio_aging_expire jiffies ago. + */ +static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd, + unsigned long now) +{ + struct request *rq; + enum dd_prio prio; + int prio_cnt; + + lockdep_assert_held(&dd->lock); + + prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) + + !!dd_queued(dd, DD_IDLE_PRIO); + if (prio_cnt < 2) + return NULL; + + for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) { + rq = __dd_dispatch_request(dd, &dd->per_prio[prio], + now - dd->prio_aging_expire); + if (rq) + return rq; + } + + return NULL; +} + /* * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests(). * @@ -460,15 +514,26 @@ done: static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; + const unsigned long now = jiffies; struct request *rq; enum dd_prio prio; spin_lock(&dd->lock); + rq = dd_dispatch_prio_aged_requests(dd, now); + if (rq) + goto unlock; + + /* + * Next, dispatch requests in priority order. Ignore lower priority + * requests if any higher priority requests are pending. + */ for (prio = 0; prio <= DD_PRIO_MAX; prio++) { - rq = __dd_dispatch_request(dd, &dd->per_prio[prio]); - if (rq) + rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now); + if (rq || dd_queued(dd, prio)) break; } + +unlock: spin_unlock(&dd->lock); return rq; @@ -573,6 +638,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) dd->front_merges = 1; dd->last_dir = DD_WRITE; dd->fifo_batch = fifo_batch; + dd->prio_aging_expire = prio_aging_expire; spin_lock_init(&dd->lock); spin_lock_init(&dd->zone_lock); @@ -796,6 +862,7 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); +SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire); SHOW_INT(deadline_writes_starved_show, dd->writes_starved); SHOW_INT(deadline_front_merges_show, dd->front_merges); SHOW_INT(deadline_async_depth_show, dd->front_merges); @@ -825,6 +892,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); +STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX); STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX); @@ -843,6 +911,7 @@ static struct elv_fs_entry deadline_attrs[] = { DD_ATTR(front_merges), DD_ATTR(async_depth), DD_ATTR(fifo_batch), + DD_ATTR(prio_aging_expire), __ATTR_NULL }; From 8a3ee6778ef1a9a3bfd3b1134ea29e95d3568271 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Sep 2021 07:27:55 +0200 Subject: [PATCH 023/174] block: print the current process in handle_bad_sector Make the bad sector information a little more useful by printing current->comm to identify the caller. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20210928052755.113016-1-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index d2c6caadef89..469550845244 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -655,8 +655,9 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector) { char b[BDEVNAME_SIZE]; - pr_info_ratelimited("attempt to access beyond end of device\n" + pr_info_ratelimited("%s: attempt to access beyond end of device\n" "%s: rw=%d, want=%llu, limit=%llu\n", + current->comm, bio_devname(bio, b), bio->bi_opf, bio_end_sector(bio), maxsector); } From 65de57bb2e66f1fbede166c1307570ebd09eae83 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:26 +0800 Subject: [PATCH 024/174] blk-mq: Change rqs check in blk_mq_free_rqs() The original code in commit 24d2f90309b23 ("blk-mq: split out tag initialization, support shared tags") would check tags->rqs is non-NULL and then dereference tags->rqs[]. Then in commit 2af8cbe30531 ("blk-mq: split tag ->rqs[] into two"), we started to dereference tags->static_rqs[], but continued to check non-NULL tags->rqs. Check tags->static_rqs as non-NULL instead, which is more logical. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-2-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 2539ba976949..03cbde75756d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2349,7 +2349,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, { struct page *page; - if (tags->rqs && set->ops->exit_request) { + if (tags->static_rqs && set->ops->exit_request) { int i; for (i = 0; i < tags->nr_tags; i++) { From d2a27964e60ff18e637334864042af54de383902 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:27 +0800 Subject: [PATCH 025/174] block: Rename BLKDEV_MAX_RQ -> BLKDEV_DEFAULT_RQ It is a bit confusing that there is BLKDEV_MAX_RQ and MAX_SCHED_RQ, as the name BLKDEV_MAX_RQ would imply the max requests always, which it is not. Rename to BLKDEV_MAX_RQ to BLKDEV_DEFAULT_RQ, matching its usage - that being the default number of requests assigned when allocating a request queue. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-3-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-mq-sched.c | 2 +- block/blk-mq-sched.h | 2 +- drivers/block/rbd.c | 2 +- include/linux/blk-mq.h | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 469550845244..2590e7724990 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -587,7 +587,7 @@ struct request_queue *blk_alloc_queue(int node_id) blk_queue_dma_alignment(q, 511); blk_set_default_limits(&q->limits); - q->nr_requests = BLKDEV_MAX_RQ; + q->nr_requests = BLKDEV_DEFAULT_RQ; return q; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 0f006cabfd91..2231fb0d4c35 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -606,7 +606,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) * Additionally, this is a per-hw queue depth. */ q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, - BLKDEV_MAX_RQ); + BLKDEV_DEFAULT_RQ); queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_sched_alloc_tags(q, hctx, i); diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 5181487db792..f6a1ce9e31f3 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -6,7 +6,7 @@ #include "blk-mq.h" #include "blk-mq-tag.h" -#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ) +#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ) void blk_mq_sched_assign_ioc(struct request *rq); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e65c9d706f6f..bf60aebd0cfb 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -836,7 +836,7 @@ struct rbd_options { u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ }; -#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ +#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024) #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ #define RBD_READ_ONLY_DEFAULT false diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index bd4086a6f28e..31cc41dfef6b 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -12,7 +12,7 @@ struct blk_mq_tags; struct blk_flush_queue; #define BLKDEV_MIN_RQ 4 -#define BLKDEV_MAX_RQ 128 /* Default maximum */ +#define BLKDEV_DEFAULT_RQ 128 typedef void (rq_end_io_fn)(struct request *, blk_status_t); From 8fa044640f128c0cd015f72cda42989a664889fc Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:28 +0800 Subject: [PATCH 026/174] blk-mq: Relocate shared sbitmap resize in blk_mq_update_nr_requests() For shared sbitmap, if the call to blk_mq_tag_update_depth() was successful for any hctx when hctx->sched_tags is not set, then it would be successful for all (due to nature in which blk_mq_tag_update_depth() fails). As such, there is no need to call blk_mq_tag_resize_shared_sbitmap() for each hctx. So relocate the call until after the hctx iteration under the !q->elevator check, which is equivalent (to !hctx->sched_tags). Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-4-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 03cbde75756d..240c04aaa3f8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3625,8 +3625,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) if (!hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, false); - if (!ret && blk_mq_is_sbitmap_shared(set->flags)) - blk_mq_tag_resize_shared_sbitmap(set, nr); } else { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, nr, true); @@ -3644,9 +3642,13 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) } if (!ret) { q->nr_requests = nr; - if (q->elevator && blk_mq_is_sbitmap_shared(set->flags)) - sbitmap_queue_resize(&q->sched_bitmap_tags, - nr - set->reserved_tags); + if (blk_mq_is_sbitmap_shared(set->flags)) { + if (q->elevator) + sbitmap_queue_resize(&q->sched_bitmap_tags, + nr - set->reserved_tags); + else + blk_mq_tag_resize_shared_sbitmap(set, nr); + } } blk_mq_unquiesce_queue(q); From f6adcef5f317c78a899ca9766e90e47026834158 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:29 +0800 Subject: [PATCH 027/174] blk-mq: Invert check in blk_mq_update_nr_requests() It's easier to read: if (x) X; else Y; over: if (!x) Y; else X; No functional change intended. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-5-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 240c04aaa3f8..ca0e65117064 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3622,18 +3622,18 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) * If we're using an MQ scheduler, just update the scheduler * queue depth. This is similar to what the old code would do. */ - if (!hctx->sched_tags) { - ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, - false); - } else { + if (hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, - nr, true); + nr, true); if (blk_mq_is_sbitmap_shared(set->flags)) { hctx->sched_tags->bitmap_tags = &q->sched_bitmap_tags; hctx->sched_tags->breserved_tags = &q->sched_breserved_tags; } + } else { + ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, + false); } if (ret) break; From d99a6bb337677d812d5bef7795c9fcf17f1ccebe Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:30 +0800 Subject: [PATCH 028/174] blk-mq-sched: Rename blk_mq_sched_alloc_{tags -> map_and_rqs}() Function blk_mq_sched_alloc_tags() does same as __blk_mq_alloc_map_and_request(), so give a similar name to be consistent. Similarly rename label err_free_tags -> err_free_map_and_rqs. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-6-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 2231fb0d4c35..644b6d554d72 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -515,9 +515,9 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, percpu_ref_put(&q->q_usage_counter); } -static int blk_mq_sched_alloc_tags(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, - unsigned int hctx_idx) +static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, + struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx) { struct blk_mq_tag_set *set = q->tag_set; int ret; @@ -609,15 +609,15 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) BLKDEV_DEFAULT_RQ); queue_for_each_hw_ctx(q, hctx, i) { - ret = blk_mq_sched_alloc_tags(q, hctx, i); + ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); if (ret) - goto err_free_tags; + goto err_free_map_and_rqs; } if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { ret = blk_mq_init_sched_shared_sbitmap(q); if (ret) - goto err_free_tags; + goto err_free_map_and_rqs; } ret = e->ops.init_sched(q, e); @@ -645,7 +645,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) err_free_sbitmap: if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) blk_mq_exit_sched_shared_sbitmap(q); -err_free_tags: +err_free_map_and_rqs: blk_mq_sched_free_requests(q); blk_mq_sched_tags_teardown(q); q->elevator = NULL; From 1820f4f0a5e75633ff384167027bf45ed1b10234 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:31 +0800 Subject: [PATCH 029/174] blk-mq-sched: Rename blk_mq_sched_free_{requests -> rqs}() To be more concise and consistent in naming, rename blk_mq_sched_free_requests() -> blk_mq_sched_free_rqs(). Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-7-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-mq-sched.c | 6 +++--- block/blk-mq-sched.h | 2 +- block/blk.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 2590e7724990..0ed17547bfed 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -403,7 +403,7 @@ void blk_cleanup_queue(struct request_queue *q) */ mutex_lock(&q->sysfs_lock); if (q->elevator) - blk_mq_sched_free_requests(q); + blk_mq_sched_free_rqs(q); mutex_unlock(&q->sysfs_lock); percpu_ref_exit(&q->q_usage_counter); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 644b6d554d72..bdbb6c31b433 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -631,7 +631,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) ret = e->ops.init_hctx(hctx, i); if (ret) { eq = q->elevator; - blk_mq_sched_free_requests(q); + blk_mq_sched_free_rqs(q); blk_mq_exit_sched(q, eq); kobject_put(&eq->kobj); return ret; @@ -646,7 +646,7 @@ err_free_sbitmap: if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) blk_mq_exit_sched_shared_sbitmap(q); err_free_map_and_rqs: - blk_mq_sched_free_requests(q); + blk_mq_sched_free_rqs(q); blk_mq_sched_tags_teardown(q); q->elevator = NULL; return ret; @@ -656,7 +656,7 @@ err_free_map_and_rqs: * called in either blk_queue_cleanup or elevator_switch, tagset * is required for freeing requests */ -void blk_mq_sched_free_requests(struct request_queue *q) +void blk_mq_sched_free_rqs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; int i; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index f6a1ce9e31f3..e4d367e0b2a3 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -29,7 +29,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); -void blk_mq_sched_free_requests(struct request_queue *q); +void blk_mq_sched_free_rqs(struct request_queue *q); static inline bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, diff --git a/block/blk.h b/block/blk.h index fa90691cfdab..8cdee4844d8a 100644 --- a/block/blk.h +++ b/block/blk.h @@ -242,7 +242,7 @@ static inline void elevator_exit(struct request_queue *q, { lockdep_assert_held(&q->sysfs_lock); - blk_mq_sched_free_requests(q); + blk_mq_sched_free_rqs(q); __elevator_exit(q, e); } From f32e4eafaf29aa493bdea3123dac09ad135b599b Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:32 +0800 Subject: [PATCH 030/174] blk-mq: Pass driver tags to blk_mq_clear_rq_mapping() Function blk_mq_clear_rq_mapping() will be used for shared sbitmap tags in future, so pass a driver tags pointer instead of the tagset container and HW queue index. Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-8-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index ca0e65117064..0619298d82b4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2311,10 +2311,9 @@ static size_t order_to_size(unsigned int order) } /* called before freeing request pool in @tags */ -static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set, - struct blk_mq_tags *tags, unsigned int hctx_idx) +static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, + struct blk_mq_tags *tags) { - struct blk_mq_tags *drv_tags = set->tags[hctx_idx]; struct page *page; unsigned long flags; @@ -2323,7 +2322,7 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set, unsigned long end = start + order_to_size(page->private); int i; - for (i = 0; i < set->queue_depth; i++) { + for (i = 0; i < drv_tags->nr_tags; i++) { struct request *rq = drv_tags->rqs[i]; unsigned long rq_addr = (unsigned long)rq; @@ -2347,8 +2346,11 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set, void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { + struct blk_mq_tags *drv_tags; struct page *page; + drv_tags = set->tags[hctx_idx]; + if (tags->static_rqs && set->ops->exit_request) { int i; @@ -2362,7 +2364,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, } } - blk_mq_clear_rq_mapping(set, tags, hctx_idx); + blk_mq_clear_rq_mapping(drv_tags, tags); while (!list_empty(&tags->page_list)) { page = list_first_entry(&tags->page_list, struct page, lru); From 4f245d5bf0f7432c881e22a77066160a6cba8e03 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:33 +0800 Subject: [PATCH 031/174] blk-mq: Don't clear driver tags own mapping Function blk_mq_clear_rq_mapping() is required to clear the sched tags mappings in driver tags rqs[]. But there is no need for a driver tags to clear its own mapping, so skip clearing the mapping in this scenario. Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-9-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index 0619298d82b4..38cffe362a29 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2317,6 +2317,10 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, struct page *page; unsigned long flags; + /* There is no need to clear a driver tags own mapping */ + if (drv_tags == tags) + return; + list_for_each_entry(page, &tags->page_list, lru) { unsigned long start = (unsigned long)page_address(page); unsigned long end = start + order_to_size(page->private); From a7e7388dced47a10ca13ae95ca975ea2830f196b Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:34 +0800 Subject: [PATCH 032/174] blk-mq: Add blk_mq_tag_update_sched_shared_sbitmap() Put the functionality to update the sched shared sbitmap size in a common function. Since the same formula is always used to resize, and it can be got from the request queue argument, so just pass the request queue pointer. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-10-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 3 +-- block/blk-mq-tag.c | 6 ++++++ block/blk-mq-tag.h | 1 + block/blk-mq.c | 3 +-- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index bdbb6c31b433..6c15f6e98e2e 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -575,8 +575,7 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) &queue->sched_breserved_tags; } - sbitmap_queue_resize(&queue->sched_bitmap_tags, - queue->nr_requests - set->reserved_tags); + blk_mq_tag_update_sched_shared_sbitmap(queue); return 0; } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index ff5caeb82542..55b5a226dcc0 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -634,6 +634,12 @@ void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int s sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags); } +void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q) +{ + sbitmap_queue_resize(&q->sched_bitmap_tags, + q->nr_requests - q->tag_set->reserved_tags); +} + /** * blk_mq_unique_tag() - return a tag that is unique queue-wide * @rq: request for which to compute a unique tag diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index f0a0ee758a55..a9f5f1824819 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -50,6 +50,7 @@ extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, unsigned int depth, bool can_grow); extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size); +extern void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, diff --git a/block/blk-mq.c b/block/blk-mq.c index 38cffe362a29..59bbfc935d87 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3650,8 +3650,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) q->nr_requests = nr; if (blk_mq_is_sbitmap_shared(set->flags)) { if (q->elevator) - sbitmap_queue_resize(&q->sched_bitmap_tags, - nr - set->reserved_tags); + blk_mq_tag_update_sched_shared_sbitmap(q); else blk_mq_tag_resize_shared_sbitmap(set, nr); } From 63064be150e4b1ba1e4af594ef5aa81adf21a52d Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:35 +0800 Subject: [PATCH 033/174] blk-mq: Add blk_mq_alloc_map_and_rqs() Add a function to combine allocating tags and the associated requests, and factor out common patterns to use this new function. Some function only call blk_mq_alloc_map_and_rqs() now, but more functionality will be added later. Also make blk_mq_alloc_rq_map() and blk_mq_alloc_rqs() static since they are only used in blk-mq.c, and finally rename some functions for conciseness and consistency with other function names: - __blk_mq_alloc_map_and_{request -> rqs}() - blk_mq_alloc_{map_and_requests -> set_map_and_rqs}() Suggested-by: Ming Lei Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-11-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 15 +++-------- block/blk-mq-tag.c | 9 +------ block/blk-mq.c | 62 +++++++++++++++++++++++++------------------- block/blk-mq.h | 9 ++----- 4 files changed, 42 insertions(+), 53 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 6c15f6e98e2e..d1b56bb9ac64 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -519,21 +519,12 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - struct blk_mq_tag_set *set = q->tag_set; - int ret; + hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, + q->nr_requests); - hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, - set->reserved_tags, set->flags); if (!hctx->sched_tags) return -ENOMEM; - - ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); - if (ret) { - blk_mq_free_rq_map(hctx->sched_tags, set->flags); - hctx->sched_tags = NULL; - } - - return ret; + return 0; } /* called in queue's release handler, tagset has gone away */ diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 55b5a226dcc0..db99f1246795 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -592,7 +592,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (tdepth > tags->nr_tags) { struct blk_mq_tag_set *set = hctx->queue->tag_set; struct blk_mq_tags *new; - bool ret; if (!can_grow) return -EINVAL; @@ -604,15 +603,9 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (tdepth > MAX_SCHED_RQ) return -EINVAL; - new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, - tags->nr_reserved_tags, set->flags); + new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); if (!new) return -ENOMEM; - ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); - if (ret) { - blk_mq_free_rq_map(new, set->flags); - return -ENOMEM; - } blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); blk_mq_free_rq_map(*tagsptr, set->flags); diff --git a/block/blk-mq.c b/block/blk-mq.c index 59bbfc935d87..ab3a831baa9d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2392,11 +2392,11 @@ void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags) blk_mq_free_tags(tags, flags); } -struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, - unsigned int hctx_idx, - unsigned int nr_tags, - unsigned int reserved_tags, - unsigned int flags) +static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, + unsigned int hctx_idx, + unsigned int nr_tags, + unsigned int reserved_tags, + unsigned int flags) { struct blk_mq_tags *tags; int node; @@ -2444,8 +2444,9 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, return 0; } -int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, - unsigned int hctx_idx, unsigned int depth) +static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, + unsigned int hctx_idx, unsigned int depth) { unsigned int i, j, entries_per_page, max_order = 4; size_t rq_size, left; @@ -2856,25 +2857,34 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, } } -static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, - int hctx_idx) +struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, + unsigned int hctx_idx, + unsigned int depth) { - unsigned int flags = set->flags; - int ret = 0; + struct blk_mq_tags *tags; + int ret; - set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, - set->queue_depth, set->reserved_tags, flags); - if (!set->tags[hctx_idx]) - return false; + tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags, + set->flags); + if (!tags) + return NULL; - ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, - set->queue_depth); - if (!ret) - return true; + ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); + if (ret) { + blk_mq_free_rq_map(tags, set->flags); + return NULL; + } - blk_mq_free_rq_map(set->tags[hctx_idx], flags); - set->tags[hctx_idx] = NULL; - return false; + return tags; +} + +static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, + int hctx_idx) +{ + set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, + set->queue_depth); + + return set->tags[hctx_idx]; } static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, @@ -2919,7 +2929,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) hctx_idx = set->map[j].mq_map[i]; /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[hctx_idx] && - !__blk_mq_alloc_map_and_request(set, hctx_idx)) { + !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { /* * If tags initialization fail for some hctx, * that hctx won't be brought online. In this @@ -3352,7 +3362,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) int i; for (i = 0; i < set->nr_hw_queues; i++) { - if (!__blk_mq_alloc_map_and_request(set, i)) + if (!__blk_mq_alloc_map_and_rqs(set, i)) goto out_unwind; cond_resched(); } @@ -3371,7 +3381,7 @@ out_unwind: * may reduce the depth asked for, if memory is tight. set->queue_depth * will be updated to reflect the allocated depth. */ -static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set) +static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) { unsigned int depth; int err; @@ -3537,7 +3547,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (ret) goto out_free_mq_map; - ret = blk_mq_alloc_map_and_requests(set); + ret = blk_mq_alloc_set_map_and_rqs(set); if (ret) goto out_free_mq_map; diff --git a/block/blk-mq.h b/block/blk-mq.h index d08779f77a26..83585a344568 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -55,13 +55,8 @@ void blk_mq_put_rq_ref(struct request *rq); void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx); void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); -struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, - unsigned int hctx_idx, - unsigned int nr_tags, - unsigned int reserved_tags, - unsigned int flags); -int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, - unsigned int hctx_idx, unsigned int depth); +struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, + unsigned int hctx_idx, unsigned int depth); /* * Internal helpers for request insertion into sw queues From 645db34e50501aac141713fb47a315e5202ff890 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:36 +0800 Subject: [PATCH 034/174] blk-mq: Refactor and rename blk_mq_free_map_and_{requests->rqs}() Refactor blk_mq_free_map_and_requests() such that it can be used at many sites at which the tag map and rqs are freed. Also rename to blk_mq_free_map_and_rqs(), which is shorter and matches the alloc equivalent. Suggested-by: Ming Lei Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-12-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 3 +-- block/blk-mq.c | 40 ++++++++++++++++++++++++---------------- block/blk-mq.h | 4 +++- 3 files changed, 28 insertions(+), 19 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index db99f1246795..a0ecc6d88f84 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -607,8 +607,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (!new) return -ENOMEM; - blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); - blk_mq_free_rq_map(*tagsptr, set->flags); + blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num); *tagsptr = new; } else { /* diff --git a/block/blk-mq.c b/block/blk-mq.c index ab3a831baa9d..c27591a04c4f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2887,15 +2887,15 @@ static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, return set->tags[hctx_idx]; } -static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, - unsigned int hctx_idx) +void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, + unsigned int hctx_idx) { unsigned int flags = set->flags; - if (set->tags && set->tags[hctx_idx]) { - blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); - blk_mq_free_rq_map(set->tags[hctx_idx], flags); - set->tags[hctx_idx] = NULL; + if (tags) { + blk_mq_free_rqs(set, tags, hctx_idx); + blk_mq_free_rq_map(tags, flags); } } @@ -2976,8 +2976,10 @@ static void blk_mq_map_swqueue(struct request_queue *q) * fallback in case of a new remap fails * allocation */ - if (i && set->tags[i]) - blk_mq_free_map_and_requests(set, i); + if (i && set->tags[i]) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + } hctx->tags = NULL; continue; @@ -3273,8 +3275,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx = hctxs[j]; if (hctx) { - if (hctx->tags) - blk_mq_free_map_and_requests(set, j); + blk_mq_free_map_and_rqs(set, set->tags[j], j); + set->tags[j] = NULL; blk_mq_exit_hctx(q, set, hctx, j); hctxs[j] = NULL; } @@ -3370,8 +3372,10 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) return 0; out_unwind: - while (--i >= 0) - blk_mq_free_map_and_requests(set, i); + while (--i >= 0) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + } return -ENOMEM; } @@ -3566,8 +3570,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) return 0; out_free_mq_rq_maps: - for (i = 0; i < set->nr_hw_queues; i++) - blk_mq_free_map_and_requests(set, i); + for (i = 0; i < set->nr_hw_queues; i++) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + } out_free_mq_map: for (i = 0; i < set->nr_maps; i++) { kfree(set->map[i].mq_map); @@ -3599,8 +3605,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i, j; - for (i = 0; i < set->nr_hw_queues; i++) - blk_mq_free_map_and_requests(set, i); + for (i = 0; i < set->nr_hw_queues; i++) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + } if (blk_mq_is_sbitmap_shared(set->flags)) blk_mq_exit_shared_sbitmap(set); diff --git a/block/blk-mq.h b/block/blk-mq.h index 83585a344568..bcb0ca89d37a 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -57,7 +57,9 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int depth); - +void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, + unsigned int hctx_idx); /* * Internal helpers for request insertion into sw queues */ From e155b0c238b20f0a866f4334d292656665836c8a Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:37 +0800 Subject: [PATCH 035/174] blk-mq: Use shared tags for shared sbitmap support Currently we use separate sbitmap pairs and active_queues atomic_t for shared sbitmap support. However a full sets of static requests are used per HW queue, which is quite wasteful, considering that the total number of requests usable at any given time across all HW queues is limited by the shared sbitmap depth. As such, it is considerably more memory efficient in the case of shared sbitmap to allocate a set of static rqs per tag set or request queue, and not per HW queue. So replace the sbitmap pairs and active_queues atomic_t with a shared tags per tagset and request queue, which will hold a set of shared static rqs. Since there is now no valid HW queue index to be passed to the blk_mq_ops .init and .exit_request callbacks, pass an invalid index token. This changes the semantics of the APIs, such that the callback would need to validate the HW queue index before using it. Currently no user of shared sbitmap actually uses the HW queue index (as would be expected). Signed-off-by: John Garry Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-13-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 84 +++++++++++++++++----------------- block/blk-mq-tag.c | 63 ++++++++----------------- block/blk-mq-tag.h | 6 +-- block/blk-mq.c | 101 +++++++++++++++++++++-------------------- block/blk-mq.h | 7 ++- include/linux/blk-mq.h | 15 +++--- include/linux/blkdev.h | 3 +- 7 files changed, 126 insertions(+), 153 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index d1b56bb9ac64..428da4949d80 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -519,6 +519,11 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { + if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { + hctx->sched_tags = q->shared_sbitmap_tags; + return 0; + } + hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, q->nr_requests); @@ -527,61 +532,54 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, return 0; } +static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) +{ + blk_mq_free_rq_map(queue->shared_sbitmap_tags); + queue->shared_sbitmap_tags = NULL; +} + /* called in queue's release handler, tagset has gone away */ -static void blk_mq_sched_tags_teardown(struct request_queue *q) +static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { if (hctx->sched_tags) { - blk_mq_free_rq_map(hctx->sched_tags, hctx->flags); + if (!blk_mq_is_sbitmap_shared(q->tag_set->flags)) + blk_mq_free_rq_map(hctx->sched_tags); hctx->sched_tags = NULL; } } + + if (blk_mq_is_sbitmap_shared(flags)) + blk_mq_exit_sched_shared_sbitmap(q); } static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) { struct blk_mq_tag_set *set = queue->tag_set; - int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); - struct blk_mq_hw_ctx *hctx; - int ret, i; /* * Set initial depth at max so that we don't need to reallocate for * updating nr_requests. */ - ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags, - &queue->sched_breserved_tags, - MAX_SCHED_RQ, set->reserved_tags, - set->numa_node, alloc_policy); - if (ret) - return ret; - - queue_for_each_hw_ctx(queue, hctx, i) { - hctx->sched_tags->bitmap_tags = - &queue->sched_bitmap_tags; - hctx->sched_tags->breserved_tags = - &queue->sched_breserved_tags; - } + queue->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, + BLK_MQ_NO_HCTX_IDX, + MAX_SCHED_RQ); + if (!queue->shared_sbitmap_tags) + return -ENOMEM; blk_mq_tag_update_sched_shared_sbitmap(queue); return 0; } -static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) -{ - sbitmap_queue_free(&queue->sched_bitmap_tags); - sbitmap_queue_free(&queue->sched_breserved_tags); -} - int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) { + unsigned int i, flags = q->tag_set->flags; struct blk_mq_hw_ctx *hctx; struct elevator_queue *eq; - unsigned int i; int ret; if (!e) { @@ -598,21 +596,21 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, BLKDEV_DEFAULT_RQ); + if (blk_mq_is_sbitmap_shared(flags)) { + ret = blk_mq_init_sched_shared_sbitmap(q); + if (ret) + return ret; + } + queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); if (ret) goto err_free_map_and_rqs; } - if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { - ret = blk_mq_init_sched_shared_sbitmap(q); - if (ret) - goto err_free_map_and_rqs; - } - ret = e->ops.init_sched(q, e); if (ret) - goto err_free_sbitmap; + goto err_free_map_and_rqs; blk_mq_debugfs_register_sched(q); @@ -632,12 +630,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return 0; -err_free_sbitmap: - if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) - blk_mq_exit_sched_shared_sbitmap(q); err_free_map_and_rqs: blk_mq_sched_free_rqs(q); - blk_mq_sched_tags_teardown(q); + blk_mq_sched_tags_teardown(q, flags); + q->elevator = NULL; return ret; } @@ -651,9 +647,15 @@ void blk_mq_sched_free_rqs(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) { - if (hctx->sched_tags) - blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); + if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { + blk_mq_free_rqs(q->tag_set, q->shared_sbitmap_tags, + BLK_MQ_NO_HCTX_IDX); + } else { + queue_for_each_hw_ctx(q, hctx, i) { + if (hctx->sched_tags) + blk_mq_free_rqs(q->tag_set, + hctx->sched_tags, i); + } } } @@ -674,8 +676,6 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) blk_mq_debugfs_unregister_sched(q); if (e->type->ops.exit_sched) e->type->ops.exit_sched(e); - blk_mq_sched_tags_teardown(q); - if (blk_mq_is_sbitmap_shared(flags)) - blk_mq_exit_sched_shared_sbitmap(q); + blk_mq_sched_tags_teardown(q, flags); q->elevator = NULL; } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index a0ecc6d88f84..0e10e8404bf0 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -26,11 +26,10 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { if (blk_mq_is_sbitmap_shared(hctx->flags)) { struct request_queue *q = hctx->queue; - struct blk_mq_tag_set *set = q->tag_set; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) && !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) - atomic_inc(&set->active_queues_shared_sbitmap); + atomic_inc(&hctx->tags->active_queues); } else { if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) @@ -57,14 +56,14 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->tags; - struct request_queue *q = hctx->queue; - struct blk_mq_tag_set *set = q->tag_set; if (blk_mq_is_sbitmap_shared(hctx->flags)) { + struct request_queue *q = hctx->queue; + if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return; - atomic_dec(&set->active_queues_shared_sbitmap); + atomic_dec(&tags->active_queues); } else { if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return; @@ -510,38 +509,10 @@ static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, return 0; } -int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set) -{ - int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); - int i, ret; - - ret = blk_mq_init_bitmaps(&set->__bitmap_tags, &set->__breserved_tags, - set->queue_depth, set->reserved_tags, - set->numa_node, alloc_policy); - if (ret) - return ret; - - for (i = 0; i < set->nr_hw_queues; i++) { - struct blk_mq_tags *tags = set->tags[i]; - - tags->bitmap_tags = &set->__bitmap_tags; - tags->breserved_tags = &set->__breserved_tags; - } - - return 0; -} - -void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set) -{ - sbitmap_queue_free(&set->__bitmap_tags); - sbitmap_queue_free(&set->__breserved_tags); -} - struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, - int node, unsigned int flags) + int node, int alloc_policy) { - int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags); struct blk_mq_tags *tags; if (total_tags > BLK_MQ_TAG_MAX) { @@ -557,9 +528,6 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, tags->nr_reserved_tags = reserved_tags; spin_lock_init(&tags->lock); - if (blk_mq_is_sbitmap_shared(flags)) - return tags; - if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) { kfree(tags); return NULL; @@ -567,12 +535,10 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, return tags; } -void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags) +void blk_mq_free_tags(struct blk_mq_tags *tags) { - if (!blk_mq_is_sbitmap_shared(flags)) { - sbitmap_queue_free(tags->bitmap_tags); - sbitmap_queue_free(tags->breserved_tags); - } + sbitmap_queue_free(tags->bitmap_tags); + sbitmap_queue_free(tags->breserved_tags); kfree(tags); } @@ -603,6 +569,13 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (tdepth > MAX_SCHED_RQ) return -EINVAL; + /* + * Only the sbitmap needs resizing since we allocated the max + * initially. + */ + if (blk_mq_is_sbitmap_shared(set->flags)) + return 0; + new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); if (!new) return -ENOMEM; @@ -623,12 +596,14 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size) { - sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags); + struct blk_mq_tags *tags = set->shared_sbitmap_tags; + + sbitmap_queue_resize(&tags->__bitmap_tags, size - set->reserved_tags); } void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q) { - sbitmap_queue_resize(&q->sched_bitmap_tags, + sbitmap_queue_resize(q->shared_sbitmap_tags->bitmap_tags, q->nr_requests - q->tag_set->reserved_tags); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index a9f5f1824819..e43f7589b96c 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -32,16 +32,14 @@ struct blk_mq_tags { extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, - int node, unsigned int flags); -extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags); + int node, int alloc_policy); +extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, struct sbitmap_queue *breserved_tags, unsigned int queue_depth, unsigned int reserved, int node, int alloc_policy); -extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set); -extern void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set); extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); diff --git a/block/blk-mq.c b/block/blk-mq.c index c27591a04c4f..5537375f6400 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2353,7 +2353,10 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, struct blk_mq_tags *drv_tags; struct page *page; - drv_tags = set->tags[hctx_idx]; + if (blk_mq_is_sbitmap_shared(set->flags)) + drv_tags = set->shared_sbitmap_tags; + else + drv_tags = set->tags[hctx_idx]; if (tags->static_rqs && set->ops->exit_request) { int i; @@ -2382,21 +2385,20 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, } } -void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags) +void blk_mq_free_rq_map(struct blk_mq_tags *tags) { kfree(tags->rqs); tags->rqs = NULL; kfree(tags->static_rqs); tags->static_rqs = NULL; - blk_mq_free_tags(tags, flags); + blk_mq_free_tags(tags); } static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int nr_tags, - unsigned int reserved_tags, - unsigned int flags) + unsigned int reserved_tags) { struct blk_mq_tags *tags; int node; @@ -2405,7 +2407,8 @@ static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, if (node == NUMA_NO_NODE) node = set->numa_node; - tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags); + tags = blk_mq_init_tags(nr_tags, reserved_tags, node, + BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); if (!tags) return NULL; @@ -2413,7 +2416,7 @@ static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node); if (!tags->rqs) { - blk_mq_free_tags(tags, flags); + blk_mq_free_tags(tags); return NULL; } @@ -2422,7 +2425,7 @@ static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, node); if (!tags->static_rqs) { kfree(tags->rqs); - blk_mq_free_tags(tags, flags); + blk_mq_free_tags(tags); return NULL; } @@ -2864,14 +2867,13 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags; int ret; - tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags, - set->flags); + tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); if (!tags) return NULL; ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); if (ret) { - blk_mq_free_rq_map(tags, set->flags); + blk_mq_free_rq_map(tags); return NULL; } @@ -2881,6 +2883,12 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, int hctx_idx) { + if (blk_mq_is_sbitmap_shared(set->flags)) { + set->tags[hctx_idx] = set->shared_sbitmap_tags; + + return true; + } + set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, set->queue_depth); @@ -2891,14 +2899,21 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { - unsigned int flags = set->flags; - if (tags) { blk_mq_free_rqs(set, tags, hctx_idx); - blk_mq_free_rq_map(tags, flags); + blk_mq_free_rq_map(tags); } } +static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, + unsigned int hctx_idx) +{ + if (!blk_mq_is_sbitmap_shared(set->flags)) + blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); + + set->tags[hctx_idx] = NULL; +} + static void blk_mq_map_swqueue(struct request_queue *q) { unsigned int i, j, hctx_idx; @@ -2976,10 +2991,8 @@ static void blk_mq_map_swqueue(struct request_queue *q) * fallback in case of a new remap fails * allocation */ - if (i && set->tags[i]) { - blk_mq_free_map_and_rqs(set, set->tags[i], i); - set->tags[i] = NULL; - } + if (i) + __blk_mq_free_map_and_rqs(set, i); hctx->tags = NULL; continue; @@ -3275,8 +3288,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx = hctxs[j]; if (hctx) { - blk_mq_free_map_and_rqs(set, set->tags[j], j); - set->tags[j] = NULL; + __blk_mq_free_map_and_rqs(set, j); blk_mq_exit_hctx(q, set, hctx, j); hctxs[j] = NULL; } @@ -3363,6 +3375,14 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; + if (blk_mq_is_sbitmap_shared(set->flags)) { + set->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, + BLK_MQ_NO_HCTX_IDX, + set->queue_depth); + if (!set->shared_sbitmap_tags) + return -ENOMEM; + } + for (i = 0; i < set->nr_hw_queues; i++) { if (!__blk_mq_alloc_map_and_rqs(set, i)) goto out_unwind; @@ -3372,9 +3392,12 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) return 0; out_unwind: - while (--i >= 0) { - blk_mq_free_map_and_rqs(set, set->tags[i], i); - set->tags[i] = NULL; + while (--i >= 0) + __blk_mq_free_map_and_rqs(set, i); + + if (blk_mq_is_sbitmap_shared(set->flags)) { + blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, + BLK_MQ_NO_HCTX_IDX); } return -ENOMEM; @@ -3555,25 +3578,11 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (ret) goto out_free_mq_map; - if (blk_mq_is_sbitmap_shared(set->flags)) { - atomic_set(&set->active_queues_shared_sbitmap, 0); - - if (blk_mq_init_shared_sbitmap(set)) { - ret = -ENOMEM; - goto out_free_mq_rq_maps; - } - } - mutex_init(&set->tag_list_lock); INIT_LIST_HEAD(&set->tag_list); return 0; -out_free_mq_rq_maps: - for (i = 0; i < set->nr_hw_queues; i++) { - blk_mq_free_map_and_rqs(set, set->tags[i], i); - set->tags[i] = NULL; - } out_free_mq_map: for (i = 0; i < set->nr_maps; i++) { kfree(set->map[i].mq_map); @@ -3605,13 +3614,13 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i, j; - for (i = 0; i < set->nr_hw_queues; i++) { - blk_mq_free_map_and_rqs(set, set->tags[i], i); - set->tags[i] = NULL; - } + for (i = 0; i < set->nr_hw_queues; i++) + __blk_mq_free_map_and_rqs(set, i); - if (blk_mq_is_sbitmap_shared(set->flags)) - blk_mq_exit_shared_sbitmap(set); + if (blk_mq_is_sbitmap_shared(set->flags)) { + blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, + BLK_MQ_NO_HCTX_IDX); + } for (j = 0; j < set->nr_maps; j++) { kfree(set->map[j].mq_map); @@ -3649,12 +3658,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) if (hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, nr, true); - if (blk_mq_is_sbitmap_shared(set->flags)) { - hctx->sched_tags->bitmap_tags = - &q->sched_bitmap_tags; - hctx->sched_tags->breserved_tags = - &q->sched_breserved_tags; - } } else { ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, false); diff --git a/block/blk-mq.h b/block/blk-mq.h index bcb0ca89d37a..8824ae03215a 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -54,7 +54,7 @@ void blk_mq_put_rq_ref(struct request *rq); */ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx); -void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); +void blk_mq_free_rq_map(struct blk_mq_tags *tags); struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int depth); void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, @@ -330,17 +330,16 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, if (blk_mq_is_sbitmap_shared(hctx->flags)) { struct request_queue *q = hctx->queue; - struct blk_mq_tag_set *set = q->tag_set; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return true; - users = atomic_read(&set->active_queues_shared_sbitmap); } else { if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return true; - users = atomic_read(&hctx->tags->active_queues); } + users = atomic_read(&hctx->tags->active_queues); + if (!users) return true; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 31cc41dfef6b..faa20a19bfcc 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -440,13 +440,11 @@ enum hctx_type { * @flags: Zero or more BLK_MQ_F_* flags. * @driver_data: Pointer to data owned by the block driver that created this * tag set. - * @active_queues_shared_sbitmap: - * number of active request queues per tag set. - * @__bitmap_tags: A shared tags sbitmap, used over all hctx's - * @__breserved_tags: - * A shared reserved tags sbitmap, used over all hctx's * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues * elements. + * @shared_sbitmap_tags: + * Shared sbitmap set of tags. Has @nr_hw_queues elements. If + * set, shared by all @tags. * @tag_list_lock: Serializes tag_list accesses. * @tag_list: List of the request queues that use this tag set. See also * request_queue.tag_set_list. @@ -463,12 +461,11 @@ struct blk_mq_tag_set { unsigned int timeout; unsigned int flags; void *driver_data; - atomic_t active_queues_shared_sbitmap; - struct sbitmap_queue __bitmap_tags; - struct sbitmap_queue __breserved_tags; struct blk_mq_tags **tags; + struct blk_mq_tags *shared_sbitmap_tags; + struct mutex tag_list_lock; struct list_head tag_list; }; @@ -640,6 +637,8 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) +#define BLK_MQ_NO_HCTX_IDX (-1U) + struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, struct lock_class_key *lkclass); #define blk_mq_alloc_disk(set, queuedata) \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0e960d74615e..cf92c13eb80e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -238,8 +238,7 @@ struct request_queue { atomic_t nr_active_requests_shared_sbitmap; - struct sbitmap_queue sched_bitmap_tags; - struct sbitmap_queue sched_breserved_tags; + struct blk_mq_tags *shared_sbitmap_tags; struct list_head icq_list; #ifdef CONFIG_BLK_CGROUP From ae0f1a732f4a5db284e2af02c305255734efd19c Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:38 +0800 Subject: [PATCH 036/174] blk-mq: Stop using pointers for blk_mq_tags bitmap tags Now that we use shared tags for shared sbitmap support, we don't require the tags sbitmap pointers, so drop them. This essentially reverts commit 222a5ae03cdd ("blk-mq: Use pointers for blk_mq_tags bitmap tags"). Function blk_mq_init_bitmap_tags() is removed also, since it would be only a wrappper for blk_mq_init_bitmaps(). Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Signed-off-by: John Garry Link: https://lore.kernel.org/r/1633429419-228500-14-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 4 +-- block/blk-mq-debugfs.c | 8 +++--- block/blk-mq-tag.c | 56 +++++++++++++++--------------------------- block/blk-mq-tag.h | 7 ++---- block/blk-mq.c | 8 +++--- block/kyber-iosched.c | 4 +-- block/mq-deadline.c | 2 +- 7 files changed, 35 insertions(+), 54 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index b5ef23f63d51..fec18118dc30 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -6884,8 +6884,8 @@ static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) struct blk_mq_tags *tags = hctx->sched_tags; unsigned int min_shallow; - min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags); - sbitmap_queue_min_shallow_depth(tags->bitmap_tags, min_shallow); + min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow); } static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 3b38d15723de..3daea160d670 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -453,11 +453,11 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m, atomic_read(&tags->active_queues)); seq_puts(m, "\nbitmap_tags:\n"); - sbitmap_queue_show(tags->bitmap_tags, m); + sbitmap_queue_show(&tags->bitmap_tags, m); if (tags->nr_reserved_tags) { seq_puts(m, "\nbreserved_tags:\n"); - sbitmap_queue_show(tags->breserved_tags, m); + sbitmap_queue_show(&tags->breserved_tags, m); } } @@ -488,7 +488,7 @@ static int hctx_tags_bitmap_show(void *data, struct seq_file *m) if (res) goto out; if (hctx->tags) - sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m); + sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); mutex_unlock(&q->sysfs_lock); out: @@ -522,7 +522,7 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) if (res) goto out; if (hctx->sched_tags) - sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m); + sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); mutex_unlock(&q->sysfs_lock); out: diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 0e10e8404bf0..211068a5f676 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -44,9 +44,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) */ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) { - sbitmap_queue_wake_all(tags->bitmap_tags); + sbitmap_queue_wake_all(&tags->bitmap_tags); if (include_reserve) - sbitmap_queue_wake_all(tags->breserved_tags); + sbitmap_queue_wake_all(&tags->breserved_tags); } /* @@ -100,10 +100,10 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) WARN_ON_ONCE(1); return BLK_MQ_NO_TAG; } - bt = tags->breserved_tags; + bt = &tags->breserved_tags; tag_offset = 0; } else { - bt = tags->bitmap_tags; + bt = &tags->bitmap_tags; tag_offset = tags->nr_reserved_tags; } @@ -149,9 +149,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) data->ctx); tags = blk_mq_tags_from_data(data); if (data->flags & BLK_MQ_REQ_RESERVED) - bt = tags->breserved_tags; + bt = &tags->breserved_tags; else - bt = tags->bitmap_tags; + bt = &tags->bitmap_tags; /* * If destination hw queue is changed, fake wake up on @@ -185,10 +185,10 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, const int real_tag = tag - tags->nr_reserved_tags; BUG_ON(real_tag >= tags->nr_tags); - sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu); + sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); } else { BUG_ON(tag >= tags->nr_reserved_tags); - sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu); + sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); } } @@ -339,9 +339,9 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED); if (tags->nr_reserved_tags) - bt_tags_for_each(tags, tags->breserved_tags, fn, priv, + bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, flags | BT_TAG_ITER_RESERVED); - bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags); + bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags); } /** @@ -458,8 +458,8 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, continue; if (tags->nr_reserved_tags) - bt_for_each(hctx, tags->breserved_tags, fn, priv, true); - bt_for_each(hctx, tags->bitmap_tags, fn, priv, false); + bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); + bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); } blk_queue_exit(q); } @@ -491,24 +491,6 @@ free_bitmap_tags: return -ENOMEM; } -static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, - int node, int alloc_policy) -{ - int ret; - - ret = blk_mq_init_bitmaps(&tags->__bitmap_tags, - &tags->__breserved_tags, - tags->nr_tags, tags->nr_reserved_tags, - node, alloc_policy); - if (ret) - return ret; - - tags->bitmap_tags = &tags->__bitmap_tags; - tags->breserved_tags = &tags->__breserved_tags; - - return 0; -} - struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, int node, int alloc_policy) @@ -528,7 +510,9 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, tags->nr_reserved_tags = reserved_tags; spin_lock_init(&tags->lock); - if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) { + if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags, + total_tags, reserved_tags, node, + alloc_policy) < 0) { kfree(tags); return NULL; } @@ -537,8 +521,8 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, void blk_mq_free_tags(struct blk_mq_tags *tags) { - sbitmap_queue_free(tags->bitmap_tags); - sbitmap_queue_free(tags->breserved_tags); + sbitmap_queue_free(&tags->bitmap_tags); + sbitmap_queue_free(&tags->breserved_tags); kfree(tags); } @@ -587,7 +571,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, * Don't need (or can't) update reserved tags here, they * remain static and should never need resizing. */ - sbitmap_queue_resize(tags->bitmap_tags, + sbitmap_queue_resize(&tags->bitmap_tags, tdepth - tags->nr_reserved_tags); } @@ -598,12 +582,12 @@ void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int s { struct blk_mq_tags *tags = set->shared_sbitmap_tags; - sbitmap_queue_resize(&tags->__bitmap_tags, size - set->reserved_tags); + sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); } void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q) { - sbitmap_queue_resize(q->shared_sbitmap_tags->bitmap_tags, + sbitmap_queue_resize(&q->shared_sbitmap_tags->bitmap_tags, q->nr_requests - q->tag_set->reserved_tags); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index e43f7589b96c..1052d69147ba 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -13,11 +13,8 @@ struct blk_mq_tags { atomic_t active_queues; - struct sbitmap_queue *bitmap_tags; - struct sbitmap_queue *breserved_tags; - - struct sbitmap_queue __bitmap_tags; - struct sbitmap_queue __breserved_tags; + struct sbitmap_queue bitmap_tags; + struct sbitmap_queue breserved_tags; struct request **rqs; struct request **static_rqs; diff --git a/block/blk-mq.c b/block/blk-mq.c index 5537375f6400..e026e5ebe2c9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1071,14 +1071,14 @@ static inline unsigned int queued_to_index(unsigned int queued) static bool __blk_mq_get_driver_tag(struct request *rq) { - struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags; + struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; int tag; blk_mq_tag_busy(rq->mq_hctx); if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { - bt = rq->mq_hctx->tags->breserved_tags; + bt = &rq->mq_hctx->tags->breserved_tags; tag_offset = 0; } else { if (!hctx_may_queue(rq->mq_hctx, bt)) @@ -1121,7 +1121,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, struct sbitmap_queue *sbq; list_del_init(&wait->entry); - sbq = hctx->tags->bitmap_tags; + sbq = &hctx->tags->bitmap_tags; atomic_dec(&sbq->ws_active); } spin_unlock(&hctx->dispatch_wait_lock); @@ -1139,7 +1139,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct sbitmap_queue *sbq = hctx->tags->bitmap_tags; + struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; struct wait_queue_head *wq; wait_queue_entry_t *wait; bool ret; diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 53dafc4bcd72..fdd74a4df56f 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -453,11 +453,11 @@ static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) { struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; struct blk_mq_tags *tags = hctx->sched_tags; - unsigned int shift = tags->bitmap_tags->sb.shift; + unsigned int shift = tags->bitmap_tags.sb.shift; kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U; - sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth); } static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 2001ac186f5c..85d919bf60c7 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -567,7 +567,7 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) dd->async_depth = max(1UL, 3 * q->nr_requests / 4); - sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth); } /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ From 079a2e3e862548087041a1873bbffceb41a72a33 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:39 +0800 Subject: [PATCH 037/174] blk-mq: Change shared sbitmap naming to shared tags Now that shared sbitmap support really means shared tags, rename symbols to match that. Signed-off-by: John Garry Link: https://lore.kernel.org/r/1633429419-228500-15-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-mq-sched.c | 32 ++++++++++++++++---------------- block/blk-mq-tag.c | 18 +++++++++--------- block/blk-mq-tag.h | 4 ++-- block/blk-mq.c | 32 ++++++++++++++++---------------- block/blk-mq.h | 16 ++++++++-------- block/elevator.c | 2 +- include/linux/blk-mq.h | 8 ++++---- include/linux/blkdev.h | 4 ++-- 9 files changed, 59 insertions(+), 59 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 0ed17547bfed..b08aed26e6bb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -554,7 +554,7 @@ struct request_queue *blk_alloc_queue(int node_id) q->node = node_id; - atomic_set(&q->nr_active_requests_shared_sbitmap, 0); + atomic_set(&q->nr_active_requests_shared_tags, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); INIT_WORK(&q->timeout_work, blk_timeout_work); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 428da4949d80..27312da7d638 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -519,8 +519,8 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { - hctx->sched_tags = q->shared_sbitmap_tags; + if (blk_mq_is_shared_tags(q->tag_set->flags)) { + hctx->sched_tags = q->sched_shared_tags; return 0; } @@ -532,10 +532,10 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, return 0; } -static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) +static void blk_mq_exit_sched_shared_tags(struct request_queue *queue) { - blk_mq_free_rq_map(queue->shared_sbitmap_tags); - queue->shared_sbitmap_tags = NULL; + blk_mq_free_rq_map(queue->sched_shared_tags); + queue->sched_shared_tags = NULL; } /* called in queue's release handler, tagset has gone away */ @@ -546,17 +546,17 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int fla queue_for_each_hw_ctx(q, hctx, i) { if (hctx->sched_tags) { - if (!blk_mq_is_sbitmap_shared(q->tag_set->flags)) + if (!blk_mq_is_shared_tags(q->tag_set->flags)) blk_mq_free_rq_map(hctx->sched_tags); hctx->sched_tags = NULL; } } - if (blk_mq_is_sbitmap_shared(flags)) - blk_mq_exit_sched_shared_sbitmap(q); + if (blk_mq_is_shared_tags(flags)) + blk_mq_exit_sched_shared_tags(q); } -static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) +static int blk_mq_init_sched_shared_tags(struct request_queue *queue) { struct blk_mq_tag_set *set = queue->tag_set; @@ -564,13 +564,13 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) * Set initial depth at max so that we don't need to reallocate for * updating nr_requests. */ - queue->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, + queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX, MAX_SCHED_RQ); - if (!queue->shared_sbitmap_tags) + if (!queue->sched_shared_tags) return -ENOMEM; - blk_mq_tag_update_sched_shared_sbitmap(queue); + blk_mq_tag_update_sched_shared_tags(queue); return 0; } @@ -596,8 +596,8 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, BLKDEV_DEFAULT_RQ); - if (blk_mq_is_sbitmap_shared(flags)) { - ret = blk_mq_init_sched_shared_sbitmap(q); + if (blk_mq_is_shared_tags(flags)) { + ret = blk_mq_init_sched_shared_tags(q); if (ret) return ret; } @@ -647,8 +647,8 @@ void blk_mq_sched_free_rqs(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { - blk_mq_free_rqs(q->tag_set, q->shared_sbitmap_tags, + if (blk_mq_is_shared_tags(q->tag_set->flags)) { + blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, BLK_MQ_NO_HCTX_IDX); } else { queue_for_each_hw_ctx(q, hctx, i) { diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 211068a5f676..72a2724a4eee 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -24,7 +24,7 @@ */ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { - if (blk_mq_is_sbitmap_shared(hctx->flags)) { + if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) && @@ -57,19 +57,19 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->tags; - if (blk_mq_is_sbitmap_shared(hctx->flags)) { + if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue; if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return; - atomic_dec(&tags->active_queues); } else { if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return; - atomic_dec(&tags->active_queues); } + atomic_dec(&tags->active_queues); + blk_mq_tag_wakeup_all(tags, false); } @@ -557,7 +557,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, * Only the sbitmap needs resizing since we allocated the max * initially. */ - if (blk_mq_is_sbitmap_shared(set->flags)) + if (blk_mq_is_shared_tags(set->flags)) return 0; new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); @@ -578,16 +578,16 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, return 0; } -void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size) +void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size) { - struct blk_mq_tags *tags = set->shared_sbitmap_tags; + struct blk_mq_tags *tags = set->shared_tags; sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); } -void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q) +void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) { - sbitmap_queue_resize(&q->shared_sbitmap_tags->bitmap_tags, + sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags, q->nr_requests - q->tag_set->reserved_tags); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 1052d69147ba..d8ce89fa1686 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -43,9 +43,9 @@ extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tags, unsigned int depth, bool can_grow); -extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, +extern void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size); -extern void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q); +extern void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, diff --git a/block/blk-mq.c b/block/blk-mq.c index e026e5ebe2c9..f7428e11b109 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2235,7 +2235,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) blk_insert_flush(rq); blk_mq_run_hw_queue(data.hctx, true); } else if (plug && (q->nr_hw_queues == 1 || - blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) || + blk_mq_is_shared_tags(rq->mq_hctx->flags) || q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { /* * Use plugging if we have a ->commit_rqs() hook as well, as @@ -2353,8 +2353,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, struct blk_mq_tags *drv_tags; struct page *page; - if (blk_mq_is_sbitmap_shared(set->flags)) - drv_tags = set->shared_sbitmap_tags; + if (blk_mq_is_shared_tags(set->flags)) + drv_tags = set->shared_tags; else drv_tags = set->tags[hctx_idx]; @@ -2883,8 +2883,8 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, int hctx_idx) { - if (blk_mq_is_sbitmap_shared(set->flags)) { - set->tags[hctx_idx] = set->shared_sbitmap_tags; + if (blk_mq_is_shared_tags(set->flags)) { + set->tags[hctx_idx] = set->shared_tags; return true; } @@ -2908,7 +2908,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx) { - if (!blk_mq_is_sbitmap_shared(set->flags)) + if (!blk_mq_is_shared_tags(set->flags)) blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); set->tags[hctx_idx] = NULL; @@ -3375,11 +3375,11 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; - if (blk_mq_is_sbitmap_shared(set->flags)) { - set->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, + if (blk_mq_is_shared_tags(set->flags)) { + set->shared_tags = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX, set->queue_depth); - if (!set->shared_sbitmap_tags) + if (!set->shared_tags) return -ENOMEM; } @@ -3395,8 +3395,8 @@ out_unwind: while (--i >= 0) __blk_mq_free_map_and_rqs(set, i); - if (blk_mq_is_sbitmap_shared(set->flags)) { - blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, + if (blk_mq_is_shared_tags(set->flags)) { + blk_mq_free_map_and_rqs(set, set->shared_tags, BLK_MQ_NO_HCTX_IDX); } @@ -3617,8 +3617,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) for (i = 0; i < set->nr_hw_queues; i++) __blk_mq_free_map_and_rqs(set, i); - if (blk_mq_is_sbitmap_shared(set->flags)) { - blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, + if (blk_mq_is_shared_tags(set->flags)) { + blk_mq_free_map_and_rqs(set, set->shared_tags, BLK_MQ_NO_HCTX_IDX); } @@ -3669,11 +3669,11 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) } if (!ret) { q->nr_requests = nr; - if (blk_mq_is_sbitmap_shared(set->flags)) { + if (blk_mq_is_shared_tags(set->flags)) { if (q->elevator) - blk_mq_tag_update_sched_shared_sbitmap(q); + blk_mq_tag_update_sched_shared_tags(q); else - blk_mq_tag_resize_shared_sbitmap(set, nr); + blk_mq_tag_resize_shared_tags(set, nr); } } diff --git a/block/blk-mq.h b/block/blk-mq.h index 8824ae03215a..171e8cdcff54 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -157,7 +157,7 @@ struct blk_mq_alloc_data { struct blk_mq_hw_ctx *hctx; }; -static inline bool blk_mq_is_sbitmap_shared(unsigned int flags) +static inline bool blk_mq_is_shared_tags(unsigned int flags) { return flags & BLK_MQ_F_TAG_HCTX_SHARED; } @@ -217,24 +217,24 @@ static inline int blk_mq_get_rq_budget_token(struct request *rq) static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) { - if (blk_mq_is_sbitmap_shared(hctx->flags)) - atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap); + if (blk_mq_is_shared_tags(hctx->flags)) + atomic_inc(&hctx->queue->nr_active_requests_shared_tags); else atomic_inc(&hctx->nr_active); } static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) { - if (blk_mq_is_sbitmap_shared(hctx->flags)) - atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap); + if (blk_mq_is_shared_tags(hctx->flags)) + atomic_dec(&hctx->queue->nr_active_requests_shared_tags); else atomic_dec(&hctx->nr_active); } static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) { - if (blk_mq_is_sbitmap_shared(hctx->flags)) - return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap); + if (blk_mq_is_shared_tags(hctx->flags)) + return atomic_read(&hctx->queue->nr_active_requests_shared_tags); return atomic_read(&hctx->nr_active); } static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, @@ -328,7 +328,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, if (bt->sb.depth == 1) return true; - if (blk_mq_is_sbitmap_shared(hctx->flags)) { + if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) diff --git a/block/elevator.c b/block/elevator.c index 57be09cd7f6d..1f39f6e8ebb9 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -637,7 +637,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q) return NULL; if (q->nr_hw_queues != 1 && - !blk_mq_is_sbitmap_shared(q->tag_set->flags)) + !blk_mq_is_shared_tags(q->tag_set->flags)) return NULL; return elevator_get(q, "mq-deadline", false); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index faa20a19bfcc..75d75657df21 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -442,9 +442,9 @@ enum hctx_type { * tag set. * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues * elements. - * @shared_sbitmap_tags: - * Shared sbitmap set of tags. Has @nr_hw_queues elements. If - * set, shared by all @tags. + * @shared_tags: + * Shared set of tags. Has @nr_hw_queues elements. If set, + * shared by all @tags. * @tag_list_lock: Serializes tag_list accesses. * @tag_list: List of the request queues that use this tag set. See also * request_queue.tag_set_list. @@ -464,7 +464,7 @@ struct blk_mq_tag_set { struct blk_mq_tags **tags; - struct blk_mq_tags *shared_sbitmap_tags; + struct blk_mq_tags *shared_tags; struct mutex tag_list_lock; struct list_head tag_list; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cf92c13eb80e..b19172db7eef 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -236,9 +236,9 @@ struct request_queue { struct timer_list timeout; struct work_struct timeout_work; - atomic_t nr_active_requests_shared_sbitmap; + atomic_t nr_active_requests_shared_tags; - struct blk_mq_tags *shared_sbitmap_tags; + struct blk_mq_tags *sched_shared_tags; struct list_head icq_list; #ifdef CONFIG_BLK_CGROUP From a7b36ee6ba299ffa5c3b36af187b4d0fb32a557c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 5 Oct 2021 09:11:56 -0600 Subject: [PATCH 038/174] block: move blk-throtl fast path inline Even if no policies are defined, we spend ~2% of the total IO time checking. Move the fast path inline. Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 1 + block/blk-core.c | 1 + block/blk-merge.c | 1 + block/blk-sysfs.c | 1 + block/blk-throttle.c | 161 +------------------------------------- block/blk-throttle.h | 182 +++++++++++++++++++++++++++++++++++++++++++ block/blk.h | 16 ---- 7 files changed, 189 insertions(+), 174 deletions(-) create mode 100644 block/blk-throttle.h diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 9a1c5839dd46..166104d92b5e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -32,6 +32,7 @@ #include #include "blk.h" #include "blk-ioprio.h" +#include "blk-throttle.h" /* * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. diff --git a/block/blk-core.c b/block/blk-core.c index b08aed26e6bb..3b5ee3f7cc1e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -50,6 +50,7 @@ #include "blk-mq.h" #include "blk-mq-sched.h" #include "blk-pm.h" +#include "blk-throttle.h" struct dentry *blk_debugfs_root; diff --git a/block/blk-merge.c b/block/blk-merge.c index 5b4f23014df8..bf7aedaad8f8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -13,6 +13,7 @@ #include "blk.h" #include "blk-rq-qos.h" +#include "blk-throttle.h" static inline bool bio_will_gap(struct request_queue *q, struct request *prev_rq, struct bio *prev, struct bio *next) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 614d9d47de36..da883efcba33 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -17,6 +17,7 @@ #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-wbt.h" +#include "blk-throttle.h" struct queue_sysfs_entry { struct attribute attr; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 7c4e7993ba97..8cefd14deed5 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -13,6 +13,7 @@ #include #include "blk.h" #include "blk-cgroup-rwstat.h" +#include "blk-throttle.h" /* Max dispatch from a group in 1 round */ #define THROTL_GRP_QUANTUM 8 @@ -37,60 +38,9 @@ */ #define LATENCY_FILTERED_HD (1000L) /* 1ms */ -static struct blkcg_policy blkcg_policy_throtl; - /* A workqueue to queue throttle related work */ static struct workqueue_struct *kthrotld_workqueue; -/* - * To implement hierarchical throttling, throtl_grps form a tree and bios - * are dispatched upwards level by level until they reach the top and get - * issued. When dispatching bios from the children and local group at each - * level, if the bios are dispatched into a single bio_list, there's a risk - * of a local or child group which can queue many bios at once filling up - * the list starving others. - * - * To avoid such starvation, dispatched bios are queued separately - * according to where they came from. When they are again dispatched to - * the parent, they're popped in round-robin order so that no single source - * hogs the dispatch window. - * - * throtl_qnode is used to keep the queued bios separated by their sources. - * Bios are queued to throtl_qnode which in turn is queued to - * throtl_service_queue and then dispatched in round-robin order. - * - * It's also used to track the reference counts on blkg's. A qnode always - * belongs to a throtl_grp and gets queued on itself or the parent, so - * incrementing the reference of the associated throtl_grp when a qnode is - * queued and decrementing when dequeued is enough to keep the whole blkg - * tree pinned while bios are in flight. - */ -struct throtl_qnode { - struct list_head node; /* service_queue->queued[] */ - struct bio_list bios; /* queued bios */ - struct throtl_grp *tg; /* tg this qnode belongs to */ -}; - -struct throtl_service_queue { - struct throtl_service_queue *parent_sq; /* the parent service_queue */ - - /* - * Bios queued directly to this service_queue or dispatched from - * children throtl_grp's. - */ - struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ - unsigned int nr_queued[2]; /* number of queued bios */ - - /* - * RB tree of active children throtl_grp's, which are sorted by - * their ->disptime. - */ - struct rb_root_cached pending_tree; /* RB tree of active tgs */ - unsigned int nr_pending; /* # queued in the tree */ - unsigned long first_pending_disptime; /* disptime of the first tg */ - struct timer_list pending_timer; /* fires on first_pending_disptime */ -}; - enum tg_state_flags { THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ @@ -98,93 +48,6 @@ enum tg_state_flags { #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) -enum { - LIMIT_LOW, - LIMIT_MAX, - LIMIT_CNT, -}; - -struct throtl_grp { - /* must be the first member */ - struct blkg_policy_data pd; - - /* active throtl group service_queue member */ - struct rb_node rb_node; - - /* throtl_data this group belongs to */ - struct throtl_data *td; - - /* this group's service queue */ - struct throtl_service_queue service_queue; - - /* - * qnode_on_self is used when bios are directly queued to this - * throtl_grp so that local bios compete fairly with bios - * dispatched from children. qnode_on_parent is used when bios are - * dispatched from this throtl_grp into its parent and will compete - * with the sibling qnode_on_parents and the parent's - * qnode_on_self. - */ - struct throtl_qnode qnode_on_self[2]; - struct throtl_qnode qnode_on_parent[2]; - - /* - * Dispatch time in jiffies. This is the estimated time when group - * will unthrottle and is ready to dispatch more bio. It is used as - * key to sort active groups in service tree. - */ - unsigned long disptime; - - unsigned int flags; - - /* are there any throtl rules between this group and td? */ - bool has_rules[2]; - - /* internally used bytes per second rate limits */ - uint64_t bps[2][LIMIT_CNT]; - /* user configured bps limits */ - uint64_t bps_conf[2][LIMIT_CNT]; - - /* internally used IOPS limits */ - unsigned int iops[2][LIMIT_CNT]; - /* user configured IOPS limits */ - unsigned int iops_conf[2][LIMIT_CNT]; - - /* Number of bytes dispatched in current slice */ - uint64_t bytes_disp[2]; - /* Number of bio's dispatched in current slice */ - unsigned int io_disp[2]; - - unsigned long last_low_overflow_time[2]; - - uint64_t last_bytes_disp[2]; - unsigned int last_io_disp[2]; - - unsigned long last_check_time; - - unsigned long latency_target; /* us */ - unsigned long latency_target_conf; /* us */ - /* When did we start a new slice */ - unsigned long slice_start[2]; - unsigned long slice_end[2]; - - unsigned long last_finish_time; /* ns / 1024 */ - unsigned long checked_last_finish_time; /* ns / 1024 */ - unsigned long avg_idletime; /* ns / 1024 */ - unsigned long idletime_threshold; /* us */ - unsigned long idletime_threshold_conf; /* us */ - - unsigned int bio_cnt; /* total bios */ - unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ - unsigned long bio_cnt_reset_time; - - atomic_t io_split_cnt[2]; - atomic_t last_io_split_cnt[2]; - - struct blkg_rwstat stat_bytes; - struct blkg_rwstat stat_ios; -}; - /* We measure latency for request size from <= 4k to >= 1M */ #define LATENCY_BUCKET_SIZE 9 @@ -231,16 +94,6 @@ struct throtl_data static void throtl_pending_timer_fn(struct timer_list *t); -static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) -{ - return pd ? container_of(pd, struct throtl_grp, pd) : NULL; -} - -static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) -{ - return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); -} - static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) { return pd_to_blkg(&tg->pd); @@ -1794,7 +1647,7 @@ static void throtl_shutdown_wq(struct request_queue *q) cancel_work_sync(&td->dispatch_work); } -static struct blkcg_policy blkcg_policy_throtl = { +struct blkcg_policy blkcg_policy_throtl = { .dfl_cftypes = throtl_files, .legacy_cftypes = throtl_legacy_files, @@ -2208,7 +2061,7 @@ void blk_throtl_charge_bio_split(struct bio *bio) } while (parent); } -bool blk_throtl_bio(struct bio *bio) +bool __blk_throtl_bio(struct bio *bio) { struct request_queue *q = bio->bi_bdev->bd_disk->queue; struct blkcg_gq *blkg = bio->bi_blkg; @@ -2221,19 +2074,12 @@ bool blk_throtl_bio(struct bio *bio) rcu_read_lock(); - /* see throtl_charge_bio() */ - if (bio_flagged(bio, BIO_THROTTLED)) - goto out; - if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) { blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, bio->bi_iter.bi_size); blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); } - if (!tg->has_rules[rw]) - goto out; - spin_lock_irq(&q->queue_lock); throtl_update_latency_buckets(td); @@ -2317,7 +2163,6 @@ again: out_unlock: spin_unlock_irq(&q->queue_lock); -out: bio_set_flag(bio, BIO_THROTTLED); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW diff --git a/block/blk-throttle.h b/block/blk-throttle.h new file mode 100644 index 000000000000..175f03abd9e4 --- /dev/null +++ b/block/blk-throttle.h @@ -0,0 +1,182 @@ +#ifndef BLK_THROTTLE_H +#define BLK_THROTTLE_H + +#include "blk-cgroup-rwstat.h" + +/* + * To implement hierarchical throttling, throtl_grps form a tree and bios + * are dispatched upwards level by level until they reach the top and get + * issued. When dispatching bios from the children and local group at each + * level, if the bios are dispatched into a single bio_list, there's a risk + * of a local or child group which can queue many bios at once filling up + * the list starving others. + * + * To avoid such starvation, dispatched bios are queued separately + * according to where they came from. When they are again dispatched to + * the parent, they're popped in round-robin order so that no single source + * hogs the dispatch window. + * + * throtl_qnode is used to keep the queued bios separated by their sources. + * Bios are queued to throtl_qnode which in turn is queued to + * throtl_service_queue and then dispatched in round-robin order. + * + * It's also used to track the reference counts on blkg's. A qnode always + * belongs to a throtl_grp and gets queued on itself or the parent, so + * incrementing the reference of the associated throtl_grp when a qnode is + * queued and decrementing when dequeued is enough to keep the whole blkg + * tree pinned while bios are in flight. + */ +struct throtl_qnode { + struct list_head node; /* service_queue->queued[] */ + struct bio_list bios; /* queued bios */ + struct throtl_grp *tg; /* tg this qnode belongs to */ +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; /* the parent service_queue */ + + /* + * Bios queued directly to this service_queue or dispatched from + * children throtl_grp's. + */ + struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ + unsigned int nr_queued[2]; /* number of queued bios */ + + /* + * RB tree of active children throtl_grp's, which are sorted by + * their ->disptime. + */ + struct rb_root_cached pending_tree; /* RB tree of active tgs */ + unsigned int nr_pending; /* # queued in the tree */ + unsigned long first_pending_disptime; /* disptime of the first tg */ + struct timer_list pending_timer; /* fires on first_pending_disptime */ +}; + +enum { + LIMIT_LOW, + LIMIT_MAX, + LIMIT_CNT, +}; + +struct throtl_grp { + /* must be the first member */ + struct blkg_policy_data pd; + + /* active throtl group service_queue member */ + struct rb_node rb_node; + + /* throtl_data this group belongs to */ + struct throtl_data *td; + + /* this group's service queue */ + struct throtl_service_queue service_queue; + + /* + * qnode_on_self is used when bios are directly queued to this + * throtl_grp so that local bios compete fairly with bios + * dispatched from children. qnode_on_parent is used when bios are + * dispatched from this throtl_grp into its parent and will compete + * with the sibling qnode_on_parents and the parent's + * qnode_on_self. + */ + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + + /* + * Dispatch time in jiffies. This is the estimated time when group + * will unthrottle and is ready to dispatch more bio. It is used as + * key to sort active groups in service tree. + */ + unsigned long disptime; + + unsigned int flags; + + /* are there any throtl rules between this group and td? */ + bool has_rules[2]; + + /* internally used bytes per second rate limits */ + uint64_t bps[2][LIMIT_CNT]; + /* user configured bps limits */ + uint64_t bps_conf[2][LIMIT_CNT]; + + /* internally used IOPS limits */ + unsigned int iops[2][LIMIT_CNT]; + /* user configured IOPS limits */ + unsigned int iops_conf[2][LIMIT_CNT]; + + /* Number of bytes dispatched in current slice */ + uint64_t bytes_disp[2]; + /* Number of bio's dispatched in current slice */ + unsigned int io_disp[2]; + + unsigned long last_low_overflow_time[2]; + + uint64_t last_bytes_disp[2]; + unsigned int last_io_disp[2]; + + unsigned long last_check_time; + + unsigned long latency_target; /* us */ + unsigned long latency_target_conf; /* us */ + /* When did we start a new slice */ + unsigned long slice_start[2]; + unsigned long slice_end[2]; + + unsigned long last_finish_time; /* ns / 1024 */ + unsigned long checked_last_finish_time; /* ns / 1024 */ + unsigned long avg_idletime; /* ns / 1024 */ + unsigned long idletime_threshold; /* us */ + unsigned long idletime_threshold_conf; /* us */ + + unsigned int bio_cnt; /* total bios */ + unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ + unsigned long bio_cnt_reset_time; + + atomic_t io_split_cnt[2]; + atomic_t last_io_split_cnt[2]; + + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; +}; + +extern struct blkcg_policy blkcg_policy_throtl; + +static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) +{ + return pd ? container_of(pd, struct throtl_grp, pd) : NULL; +} + +static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) +{ + return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); +} + +/* + * Internal throttling interface + */ +#ifndef CONFIG_BLK_DEV_THROTTLING +static inline int blk_throtl_init(struct request_queue *q) { return 0; } +static inline void blk_throtl_exit(struct request_queue *q) { } +static inline void blk_throtl_register_queue(struct request_queue *q) { } +static inline void blk_throtl_charge_bio_split(struct bio *bio) { } +static inline bool blk_throtl_bio(struct bio *bio) { return false; } +#else /* CONFIG_BLK_DEV_THROTTLING */ +int blk_throtl_init(struct request_queue *q); +void blk_throtl_exit(struct request_queue *q); +void blk_throtl_register_queue(struct request_queue *q); +void blk_throtl_charge_bio_split(struct bio *bio); +bool __blk_throtl_bio(struct bio *bio); +static inline bool blk_throtl_bio(struct bio *bio) +{ + struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg); + + if (bio_flagged(bio, BIO_THROTTLED)) + return false; + if (!tg->has_rules[bio_data_dir(bio)]) + return false; + + return __blk_throtl_bio(bio); +} +#endif /* CONFIG_BLK_DEV_THROTTLING */ + +#endif diff --git a/block/blk.h b/block/blk.h index 8cdee4844d8a..7aaccbc26db4 100644 --- a/block/blk.h +++ b/block/blk.h @@ -325,22 +325,6 @@ void ioc_clear_queue(struct request_queue *q); int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); -/* - * Internal throttling interface - */ -#ifdef CONFIG_BLK_DEV_THROTTLING -extern int blk_throtl_init(struct request_queue *q); -extern void blk_throtl_exit(struct request_queue *q); -extern void blk_throtl_register_queue(struct request_queue *q); -extern void blk_throtl_charge_bio_split(struct bio *bio); -bool blk_throtl_bio(struct bio *bio); -#else /* CONFIG_BLK_DEV_THROTTLING */ -static inline int blk_throtl_init(struct request_queue *q) { return 0; } -static inline void blk_throtl_exit(struct request_queue *q) { } -static inline void blk_throtl_register_queue(struct request_queue *q) { } -static inline void blk_throtl_charge_bio_split(struct bio *bio) { } -static inline bool blk_throtl_bio(struct bio *bio) { return false; } -#endif /* CONFIG_BLK_DEV_THROTTLING */ #ifdef CONFIG_BLK_DEV_THROTTLING_LOW extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, From 0006707723233cb2a9a23ca19fc3d0864835704c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 5 Oct 2021 09:23:59 -0600 Subject: [PATCH 039/174] block: inherit request start time from bio for BLK_CGROUP Doing high IOPS testing with blk-cgroups enabled spends ~15-20% of the time just doing ktime_get_ns() -> readtsc. We essentially read and set the start time twice, one for the bio and then again when that bio is mapped to a request. Given that the time between the two is very short, inherit the bio start time instead of reading it again. This cuts 1/3rd of the overhead of the time keeping. Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-mq.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index f7428e11b109..8e41d88fcb8a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -725,7 +725,14 @@ void blk_mq_start_request(struct request *rq) trace_block_rq_issue(rq); if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { - rq->io_start_time_ns = ktime_get_ns(); + u64 start_time; +#ifdef CONFIG_BLK_CGROUP + if (rq->bio) + start_time = bio_issue_time(&rq->bio->bi_issue); + else +#endif + start_time = ktime_get_ns(); + rq->io_start_time_ns = start_time; rq->stats_sectors = blk_rq_sectors(rq); rq->rq_flags |= RQF_STATS; rq_qos_issue(q, rq); From ba0ffdd8ce48ad7f7e85191cd29f9674caca3745 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 6 Oct 2021 12:01:07 -0600 Subject: [PATCH 040/174] block: bump max plugged deferred size from 16 to 32 Particularly for NVMe with efficient deferred submission for many requests, there are nice benefits to be seen by bumping the default max plug count from 16 to 32. This is especially true for virtualized setups, where the submit part is more expensive. But can be noticed even on native hardware. Reduce the multiple queue factor from 4 to 2, since we're changing the default size. While changing it, move the defines into the block layer private header. These aren't values that anyone outside of the block layer uses, or should use. Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 ++-- block/blk.h | 6 ++++++ include/linux/blkdev.h | 2 -- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 8e41d88fcb8a..d861a969b2e0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2152,14 +2152,14 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) } /* - * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple + * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple * queues. This is important for md arrays to benefit from merging * requests. */ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) { if (plug->multiple_queues) - return BLK_MAX_REQUEST_COUNT * 4; + return BLK_MAX_REQUEST_COUNT * 2; return BLK_MAX_REQUEST_COUNT; } diff --git a/block/blk.h b/block/blk.h index 7aaccbc26db4..019c50a140ba 100644 --- a/block/blk.h +++ b/block/blk.h @@ -224,6 +224,12 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, void blk_account_io_start(struct request *req); void blk_account_io_done(struct request *req, u64 now); +/* + * Plug flush limits + */ +#define BLK_MAX_REQUEST_COUNT 32 +#define BLK_PLUG_FLUSH_SIZE (128 * 1024) + /* * Internal elevator interface */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b19172db7eef..472b4ab007c6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -727,8 +727,6 @@ struct blk_plug { bool multiple_queues; bool nowait; }; -#define BLK_MAX_REQUEST_COUNT 16 -#define BLK_PLUG_FLUSH_SIZE (128 * 1024) struct blk_plug_cb; typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); From 47c122e35d7e43b14129ceb9ed3a7e67599978fa Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 6 Oct 2021 06:34:11 -0600 Subject: [PATCH 041/174] block: pre-allocate requests if plug is started and is a batch The caller typically has a good (or even exact) idea of how many requests it needs to submit. We can make the request/tag allocation a lot more efficient if we just allocate N requests/tags upfront when we queue the first bio from the batch. Provide a new plug start helper that allows the caller to specify how many IOs are expected. This sets plug->nr_ios, and we can use that for smarter request allocation. The plug provides a holding spot for requests, and request allocation will check it before calling into the normal request allocation path. The blk_finish_plug() is called, check if there are unused requests and free them. This should not happen in normal operations. The exception is if we get merging, then we may be left with requests that need freeing when done. This raises the per-core performance on my setup from ~5.8M to ~6.1M IOPS. Signed-off-by: Jens Axboe --- block/blk-core.c | 47 ++++++++++++++++------------ block/blk-mq.c | 70 ++++++++++++++++++++++++++++++++++-------- block/blk-mq.h | 5 +++ include/linux/blk-mq.h | 5 ++- include/linux/blkdev.h | 15 ++++++++- 5 files changed, 109 insertions(+), 33 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 3b5ee3f7cc1e..e25a1c3f8b76 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1632,6 +1632,31 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, } EXPORT_SYMBOL(kblockd_mod_delayed_work_on); +void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) +{ + struct task_struct *tsk = current; + + /* + * If this is a nested plug, don't actually assign it. + */ + if (tsk->plug) + return; + + INIT_LIST_HEAD(&plug->mq_list); + plug->cached_rq = NULL; + plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); + plug->rq_count = 0; + plug->multiple_queues = false; + plug->nowait = false; + INIT_LIST_HEAD(&plug->cb_list); + + /* + * Store ordering should not be needed here, since a potential + * preempt will imply a full memory barrier + */ + tsk->plug = plug; +} + /** * blk_start_plug - initialize blk_plug and track it inside the task_struct * @plug: The &struct blk_plug that needs to be initialized @@ -1657,25 +1682,7 @@ EXPORT_SYMBOL(kblockd_mod_delayed_work_on); */ void blk_start_plug(struct blk_plug *plug) { - struct task_struct *tsk = current; - - /* - * If this is a nested plug, don't actually assign it. - */ - if (tsk->plug) - return; - - INIT_LIST_HEAD(&plug->mq_list); - INIT_LIST_HEAD(&plug->cb_list); - plug->rq_count = 0; - plug->multiple_queues = false; - plug->nowait = false; - - /* - * Store ordering should not be needed here, since a potential - * preempt will imply a full memory barrier - */ - tsk->plug = plug; + blk_start_plug_nr_ios(plug, 1); } EXPORT_SYMBOL(blk_start_plug); @@ -1727,6 +1734,8 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) if (!list_empty(&plug->mq_list)) blk_mq_flush_plug_list(plug, from_schedule); + if (unlikely(!from_schedule && plug->cached_rq)) + blk_mq_free_plug_rqs(plug); } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index d861a969b2e0..d9f14d3c2b8c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -359,6 +359,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) struct request_queue *q = data->q; struct elevator_queue *e = q->elevator; u64 alloc_time_ns = 0; + struct request *rq; unsigned int tag; /* alloc_time includes depth and tag waits */ @@ -392,10 +393,21 @@ retry: * case just retry the hctx assignment and tag allocation as CPU hotplug * should have migrated us to an online CPU by now. */ - tag = blk_mq_get_tag(data); - if (tag == BLK_MQ_NO_TAG) { + do { + tag = blk_mq_get_tag(data); + if (tag != BLK_MQ_NO_TAG) { + rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + if (!--data->nr_tags) + return rq; + if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) + return rq; + rq->rq_next = *data->cached_rq; + *data->cached_rq = rq; + data->flags |= BLK_MQ_REQ_NOWAIT; + continue; + } if (data->flags & BLK_MQ_REQ_NOWAIT) - return NULL; + break; /* * Give up the CPU and sleep for a random short time to ensure @@ -404,8 +416,15 @@ retry: */ msleep(3); goto retry; + } while (1); + + if (data->cached_rq) { + rq = *data->cached_rq; + *data->cached_rq = rq->rq_next; + return rq; } - return blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + + return NULL; } struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, @@ -415,6 +434,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, .q = q, .flags = flags, .cmd_flags = op, + .nr_tags = 1, }; struct request *rq; int ret; @@ -443,6 +463,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, .q = q, .flags = flags, .cmd_flags = op, + .nr_tags = 1, }; u64 alloc_time_ns = 0; unsigned int cpu; @@ -544,6 +565,18 @@ void blk_mq_free_request(struct request *rq) } EXPORT_SYMBOL_GPL(blk_mq_free_request); +void blk_mq_free_plug_rqs(struct blk_plug *plug) +{ + while (plug->cached_rq) { + struct request *rq; + + rq = plug->cached_rq; + plug->cached_rq = rq->rq_next; + percpu_ref_get(&rq->q->q_usage_counter); + blk_mq_free_request(rq); + } +} + inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { u64 now = 0; @@ -2185,6 +2218,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) const int is_flush_fua = op_is_flush(bio->bi_opf); struct blk_mq_alloc_data data = { .q = q, + .nr_tags = 1, }; struct request *rq; struct blk_plug *plug; @@ -2211,13 +2245,26 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) hipri = bio->bi_opf & REQ_HIPRI; - data.cmd_flags = bio->bi_opf; - rq = __blk_mq_alloc_request(&data); - if (unlikely(!rq)) { - rq_qos_cleanup(q, bio); - if (bio->bi_opf & REQ_NOWAIT) - bio_wouldblock_error(bio); - goto queue_exit; + plug = blk_mq_plug(q, bio); + if (plug && plug->cached_rq) { + rq = plug->cached_rq; + plug->cached_rq = rq->rq_next; + INIT_LIST_HEAD(&rq->queuelist); + data.hctx = rq->mq_hctx; + } else { + data.cmd_flags = bio->bi_opf; + if (plug) { + data.nr_tags = plug->nr_ios; + plug->nr_ios = 1; + data.cached_rq = &plug->cached_rq; + } + rq = __blk_mq_alloc_request(&data); + if (unlikely(!rq)) { + rq_qos_cleanup(q, bio); + if (bio->bi_opf & REQ_NOWAIT) + bio_wouldblock_error(bio); + goto queue_exit; + } } trace_block_getrq(bio); @@ -2236,7 +2283,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) return BLK_QC_T_NONE; } - plug = blk_mq_plug(q, bio); if (unlikely(is_flush_fua)) { /* Bypass scheduler for flush requests */ blk_insert_flush(rq); diff --git a/block/blk-mq.h b/block/blk-mq.h index 171e8cdcff54..5da970bb8865 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -125,6 +125,7 @@ extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); extern int blk_mq_sysfs_register(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); +void blk_mq_free_plug_rqs(struct blk_plug *plug); void blk_mq_release(struct request_queue *q); @@ -152,6 +153,10 @@ struct blk_mq_alloc_data { unsigned int shallow_depth; unsigned int cmd_flags; + /* allocate multiple requests/tags in one go */ + unsigned int nr_tags; + struct request **cached_rq; + /* input & output parameter */ struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 75d75657df21..0e941f217578 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -90,7 +90,10 @@ struct request { struct bio *bio; struct bio *biotail; - struct list_head queuelist; + union { + struct list_head queuelist; + struct request *rq_next; + }; /* * The hash is used inside the scheduler, and killed once the diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 472b4ab007c6..17705c970d7e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -722,10 +722,17 @@ extern void blk_set_queue_dying(struct request_queue *); */ struct blk_plug { struct list_head mq_list; /* blk-mq requests */ - struct list_head cb_list; /* md requires an unplug callback */ + + /* if ios_left is > 1, we can batch tag/rq allocations */ + struct request *cached_rq; + unsigned short nr_ios; + unsigned short rq_count; + bool multiple_queues; bool nowait; + + struct list_head cb_list; /* md requires an unplug callback */ }; struct blk_plug_cb; @@ -738,6 +745,7 @@ struct blk_plug_cb { extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, int size); extern void blk_start_plug(struct blk_plug *); +extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); extern void blk_finish_plug(struct blk_plug *); extern void blk_flush_plug_list(struct blk_plug *, bool); @@ -772,6 +780,11 @@ long nr_blockdev_pages(void); struct blk_plug { }; +static inline void blk_start_plug_nr_ios(struct blk_plug *plug, + unsigned short nr_ios) +{ +} + static inline void blk_start_plug(struct blk_plug *plug) { } From b90cfaed3789ecdc5580027fc91e3056bc6b3216 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:40:44 +0200 Subject: [PATCH 042/174] blk-mq: cleanup and rename __blk_mq_alloc_request The newly added loop for the cached requests in __blk_mq_alloc_request is a little too convoluted for my taste, so unwind it a bit. Also rename the function to __blk_mq_alloc_requests now that it can allocate more than a single request. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104045.658051-2-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 56 +++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index d9f14d3c2b8c..98a5d0850b95 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -354,7 +354,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, return rq; } -static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) +static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { struct request_queue *q = data->q; struct elevator_queue *e = q->elevator; @@ -395,36 +395,36 @@ retry: */ do { tag = blk_mq_get_tag(data); - if (tag != BLK_MQ_NO_TAG) { - rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); - if (!--data->nr_tags) - return rq; - if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) - return rq; - rq->rq_next = *data->cached_rq; - *data->cached_rq = rq; - data->flags |= BLK_MQ_REQ_NOWAIT; - continue; + if (tag == BLK_MQ_NO_TAG) { + if (data->flags & BLK_MQ_REQ_NOWAIT) + break; + /* + * Give up the CPU and sleep for a random short time to + * ensure that thread using a realtime scheduling class + * are migrated off the CPU, and thus off the hctx that + * is going away. + */ + msleep(3); + goto retry; } - if (data->flags & BLK_MQ_REQ_NOWAIT) - break; - /* - * Give up the CPU and sleep for a random short time to ensure - * that thread using a realtime scheduling class are migrated - * off the CPU, and thus off the hctx that is going away. - */ - msleep(3); - goto retry; + rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + if (!--data->nr_tags || e || + (data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) + return rq; + + /* link into the cached list */ + rq->rq_next = *data->cached_rq; + *data->cached_rq = rq; + data->flags |= BLK_MQ_REQ_NOWAIT; } while (1); - if (data->cached_rq) { - rq = *data->cached_rq; - *data->cached_rq = rq->rq_next; - return rq; - } + if (!data->cached_rq) + return NULL; - return NULL; + rq = *data->cached_rq; + *data->cached_rq = rq->rq_next; + return rq; } struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, @@ -443,7 +443,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, if (ret) return ERR_PTR(ret); - rq = __blk_mq_alloc_request(&data); + rq = __blk_mq_alloc_requests(&data); if (!rq) goto out_queue_exit; rq->__data_len = 0; @@ -2258,7 +2258,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) plug->nr_ios = 1; data.cached_rq = &plug->cached_rq; } - rq = __blk_mq_alloc_request(&data); + rq = __blk_mq_alloc_requests(&data); if (unlikely(!rq)) { rq_qos_cleanup(q, bio); if (bio->bi_opf & REQ_NOWAIT) From 0f38d76646157357fcfa02f50575ea044830c494 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:40:45 +0200 Subject: [PATCH 043/174] blk-mq: cleanup blk_mq_submit_bio Move the blk_mq_alloc_data stack allocation only into the branch that actually needs it, and use rq->mq_hctx instead of data.hctx to refer to the hctx. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104045.658051-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 98a5d0850b95..87dc2debedfb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2216,10 +2216,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) struct request_queue *q = bio->bi_bdev->bd_disk->queue; const int is_sync = op_is_sync(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf); - struct blk_mq_alloc_data data = { - .q = q, - .nr_tags = 1, - }; struct request *rq; struct blk_plug *plug; struct request *same_queue_rq = NULL; @@ -2250,9 +2246,13 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq = plug->cached_rq; plug->cached_rq = rq->rq_next; INIT_LIST_HEAD(&rq->queuelist); - data.hctx = rq->mq_hctx; } else { - data.cmd_flags = bio->bi_opf; + struct blk_mq_alloc_data data = { + .q = q, + .nr_tags = 1, + .cmd_flags = bio->bi_opf, + }; + if (plug) { data.nr_tags = plug->nr_ios; plug->nr_ios = 1; @@ -2271,7 +2271,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq_qos_track(q, rq, bio); - cookie = request_to_qc_t(data.hctx, rq); + cookie = request_to_qc_t(rq->mq_hctx, rq); blk_mq_bio_to_request(rq, bio, nr_segs); @@ -2286,7 +2286,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) if (unlikely(is_flush_fua)) { /* Bypass scheduler for flush requests */ blk_insert_flush(rq); - blk_mq_run_hw_queue(data.hctx, true); + blk_mq_run_hw_queue(rq->mq_hctx, true); } else if (plug && (q->nr_hw_queues == 1 || blk_mq_is_shared_tags(rq->mq_hctx->flags) || q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { @@ -2333,18 +2333,17 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) trace_block_plug(q); if (same_queue_rq) { - data.hctx = same_queue_rq->mq_hctx; trace_block_unplug(q, 1, true); - blk_mq_try_issue_directly(data.hctx, same_queue_rq, - &cookie); + blk_mq_try_issue_directly(same_queue_rq->mq_hctx, + same_queue_rq, &cookie); } } else if ((q->nr_hw_queues > 1 && is_sync) || - !data.hctx->dispatch_busy) { + !rq->mq_hctx->dispatch_busy) { /* * There is no scheduler and we can try to send directly * to the hardware. */ - blk_mq_try_issue_directly(data.hctx, rq, &cookie); + blk_mq_try_issue_directly(rq->mq_hctx, rq, &cookie); } else { /* Default case. */ blk_mq_sched_insert_request(rq, false, true, true); From 4a60f360a5c9533d77db011db6766448f763c86a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 16 Oct 2021 07:34:49 -0600 Subject: [PATCH 044/174] block: don't dereference request after flush insertion We could have a race here, where the request gets freed before we call into blk_mq_run_hw_queue(). If this happens, we cannot rely on the state of the request. Grab the hardware context before inserting the flush. Fixes: 0f38d7664615 ("blk-mq: cleanup blk_mq_submit_bio") Reviewed-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 87dc2debedfb..b7b8437f0a04 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2284,9 +2284,10 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) } if (unlikely(is_flush_fua)) { + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; /* Bypass scheduler for flush requests */ blk_insert_flush(rq); - blk_mq_run_hw_queue(rq->mq_hctx, true); + blk_mq_run_hw_queue(hctx, true); } else if (plug && (q->nr_hw_queues == 1 || blk_mq_is_shared_tags(rq->mq_hctx->flags) || q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { From fea349b037869beda6db5ed0c150f8a2a9a317f6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:44:48 +0200 Subject: [PATCH 045/174] block: unexport blkdev_ioctl With the raw driver gone, there is no modular user left. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104450.659013-2-hch@lst.de Signed-off-by: Jens Axboe --- block/ioctl.c | 1 - 1 file changed, 1 deletion(-) diff --git a/block/ioctl.c b/block/ioctl.c index eb0491e90b9a..0f823444cc55 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -588,7 +588,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return -ENOTTY; return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); } -EXPORT_SYMBOL_GPL(blkdev_ioctl); /* for /dev/raw */ #ifdef CONFIG_COMPAT From 84b8514b46b417e299746b1297d3d0899e8539f3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:44:49 +0200 Subject: [PATCH 046/174] block: move the *blkdev_ioctl declarations out of blkdev.h These are only used inside of block/. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104450.659013-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk.h | 4 ++++ include/linux/genhd.h | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/block/blk.h b/block/blk.h index 019c50a140ba..eadb2a640a69 100644 --- a/block/blk.h +++ b/block/blk.h @@ -405,6 +405,10 @@ static inline void bio_clear_hipri(struct bio *bio) bio->bi_opf &= ~REQ_HIPRI; } +int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, + unsigned long arg); +long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); + extern const struct address_space_operations def_blk_aops; #endif /* BLK_INTERNAL_H */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 0b48a0cf4262..cd4038fd5743 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -281,10 +281,6 @@ bool bdev_check_media_change(struct block_device *bdev); int __invalidate_device(struct block_device *bdev, bool kill_dirty); void set_capacity(struct gendisk *disk, sector_t size); -/* for drivers/char/raw.c: */ -int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); -long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); - #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); From 8a709512eae7ccd5ba73e8634474caadbc9ebe76 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:44:50 +0200 Subject: [PATCH 047/174] block: merge block_ioctl into blkdev_ioctl Simplify the ioctl path and match the code structure on the compat side. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104450.659013-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk.h | 3 +-- block/fops.c | 19 +------------------ block/ioctl.c | 18 ++++++++++++++---- 3 files changed, 16 insertions(+), 24 deletions(-) diff --git a/block/blk.h b/block/blk.h index eadb2a640a69..618482b29671 100644 --- a/block/blk.h +++ b/block/blk.h @@ -405,8 +405,7 @@ static inline void bio_clear_hipri(struct bio *bio) bio->bi_opf &= ~REQ_HIPRI; } -int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, - unsigned long arg); +long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); extern const struct address_space_operations def_blk_aops; diff --git a/block/fops.c b/block/fops.c index 1e970c247e0e..7bb9581a146c 100644 --- a/block/fops.c +++ b/block/fops.c @@ -461,23 +461,6 @@ static int blkdev_close(struct inode *inode, struct file *filp) return 0; } -static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) -{ - struct block_device *bdev = I_BDEV(bdev_file_inode(file)); - fmode_t mode = file->f_mode; - - /* - * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have - * to updated it before every ioctl. - */ - if (file->f_flags & O_NDELAY) - mode |= FMODE_NDELAY; - else - mode &= ~FMODE_NDELAY; - - return blkdev_ioctl(bdev, mode, cmd, arg); -} - /* * Write data to the block device. Only intended for the block device itself * and the raw driver which basically is a fake block device. @@ -621,7 +604,7 @@ const struct file_operations def_blk_fops = { .iopoll = blkdev_iopoll, .mmap = generic_file_mmap, .fsync = blkdev_fsync, - .unlocked_ioctl = block_ioctl, + .unlocked_ioctl = blkdev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_blkdev_ioctl, #endif diff --git a/block/ioctl.c b/block/ioctl.c index 0f823444cc55..77b1b2453f39 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -538,12 +538,22 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode, * * New commands must be compatible and go into blkdev_common_ioctl */ -int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, - unsigned long arg) +long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) { - int ret; - loff_t size; + struct block_device *bdev = I_BDEV(file->f_mapping->host); void __user *argp = (void __user *)arg; + fmode_t mode = file->f_mode; + loff_t size; + int ret; + + /* + * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have + * to updated it before every ioctl. + */ + if (file->f_flags & O_NDELAY) + mode |= FMODE_NDELAY; + else + mode &= ~FMODE_NDELAY; switch (cmd) { /* These need separate implementations for the data structure */ From be6bfe36db1795babe9d92178a47b2e02193cb0f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 9 Oct 2021 13:25:41 +0100 Subject: [PATCH 048/174] block: inline hot paths of blk_account_io_*() Extract hot paths of __blk_account_io_start() and __blk_account_io_done() into inline functions, so we don't always pay for function calls. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b0662a636bd4cc7b4f84c9d0a41efa46a688ef13.1633781740.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-core.c | 30 +++++++++--------------------- block/blk.h | 24 +++++++++++++++++++++--- 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index e25a1c3f8b76..80fa7aa394c7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1180,8 +1180,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * if (blk_crypto_insert_cloned_request(rq)) return BLK_STS_IOERR; - if (blk_queue_io_stat(q)) - blk_account_io_start(rq); + blk_account_io_start(rq); /* * Since we have a scheduler attached on the top device, @@ -1260,30 +1259,19 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) } } -void blk_account_io_done(struct request *req, u64 now) +void __blk_account_io_done(struct request *req, u64 now) { - /* - * Account IO completion. flush_rq isn't accounted as a - * normal IO on queueing nor completion. Accounting the - * containing request is enough. - */ - if (req->part && blk_do_io_stat(req) && - !(req->rq_flags & RQF_FLUSH_SEQ)) { - const int sgrp = op_stat_group(req_op(req)); + const int sgrp = op_stat_group(req_op(req)); - part_stat_lock(); - update_io_ticks(req->part, jiffies, true); - part_stat_inc(req->part, ios[sgrp]); - part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); - part_stat_unlock(); - } + part_stat_lock(); + update_io_ticks(req->part, jiffies, true); + part_stat_inc(req->part, ios[sgrp]); + part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); + part_stat_unlock(); } -void blk_account_io_start(struct request *rq) +void __blk_account_io_start(struct request *rq) { - if (!blk_do_io_stat(rq)) - return; - /* passthrough requests can hold bios that do not have ->bi_bdev set */ if (rq->bio && rq->bio->bi_bdev) rq->part = rq->bio->bi_bdev; diff --git a/block/blk.h b/block/blk.h index 618482b29671..cab8d659d8a6 100644 --- a/block/blk.h +++ b/block/blk.h @@ -221,8 +221,8 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); -void blk_account_io_start(struct request *req); -void blk_account_io_done(struct request *req, u64 now); +void __blk_account_io_start(struct request *req); +void __blk_account_io_done(struct request *req, u64 now); /* * Plug flush limits @@ -286,7 +286,25 @@ int blk_dev_init(void); */ static inline bool blk_do_io_stat(struct request *rq) { - return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT); + return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk; +} + +static inline void blk_account_io_done(struct request *req, u64 now) +{ + /* + * Account IO completion. flush_rq isn't accounted as a + * normal IO on queueing nor completion. Accounting the + * containing request is enough. + */ + if (blk_do_io_stat(req) && req->part && + !(req->rq_flags & RQF_FLUSH_SEQ)) + __blk_account_io_done(req, now); +} + +static inline void blk_account_io_start(struct request *req) +{ + if (blk_do_io_stat(req)) + __blk_account_io_start(req); } static inline void req_set_nomerge(struct request_queue *q, struct request *req) From e9ea15963f3b9d979e1d6d758b8e407775ae6588 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 9 Oct 2021 13:25:42 +0100 Subject: [PATCH 049/174] blk-mq: inline hot part of __blk_mq_sched_restart Extract a fast check out of __block_mq_sched_restart() and inline it for performance reasons. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/894abaa0998e5999f2fe18f271e5efdfc2c32bd2.1633781740.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 4 +--- block/blk-mq-sched.h | 8 +++++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 27312da7d638..efc1374b8831 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -57,10 +57,8 @@ void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) } EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); -void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) +void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) { - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) - return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); /* diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index e4d367e0b2a3..c97b816c3800 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -17,7 +17,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, struct list_head *free); void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); -void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); +void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue, bool async); @@ -31,6 +31,12 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); void blk_mq_sched_free_rqs(struct request_queue *q); +static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) +{ + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + __blk_mq_sched_restart(hctx); +} + static inline bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) From 9e8c0d0d4d21d2c2c60132e2c18a73d08a230b42 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:17:57 +0200 Subject: [PATCH 050/174] block: remove BIO_BUG_ON BIO_DEBUG is always defined, so just switch the two instances to use BUG_ON directly. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-2-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 4 ++-- include/linux/bio.h | 8 -------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/block/bio.c b/block/bio.c index a6fb6a0b4295..35b875563c8b 100644 --- a/block/bio.c +++ b/block/bio.c @@ -156,7 +156,7 @@ out: void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) { - BIO_BUG_ON(nr_vecs > BIO_MAX_VECS); + BUG_ON(nr_vecs > BIO_MAX_VECS); if (nr_vecs == BIO_MAX_VECS) mempool_free(bv, pool); @@ -677,7 +677,7 @@ static void bio_alloc_cache_destroy(struct bio_set *bs) void bio_put(struct bio *bio) { if (unlikely(bio_flagged(bio, BIO_REFFED))) { - BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); + BUG_ON(!atomic_read(&bio->__bi_cnt)); if (!atomic_dec_and_test(&bio->__bi_cnt)) return; } diff --git a/include/linux/bio.h b/include/linux/bio.h index 00952e92eae1..65a356fa7110 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -11,14 +11,6 @@ #include #include -#define BIO_DEBUG - -#ifdef BIO_DEBUG -#define BIO_BUG_ON BUG_ON -#else -#define BIO_BUG_ON -#endif - #define BIO_MAX_VECS 256U static inline unsigned int bio_max_segs(unsigned int nr_segs) From 11d9cab1ca6ed08747c6c5a274c6a8e8307aefbc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:17:58 +0200 Subject: [PATCH 051/174] block: don't include in bio.h doesn't need any of the definitions from ioprio.h. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-3-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/bio.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/bio.h b/include/linux/bio.h index 65a356fa7110..ae94794d07c9 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -6,7 +6,6 @@ #define __LINUX_BIO_H #include -#include /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include #include From 8addffd657a956944c1b8a74a1c9aabcfc5b4530 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:17:59 +0200 Subject: [PATCH 052/174] block: move bio_mergeable out of bio.h bio_mergeable is only needed by I/O schedulers, so move it to blk-mq-sched.h. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-sched.h | 5 +++++ include/linux/bio.h | 8 -------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index c97b816c3800..fe252278ed9a 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -37,6 +37,11 @@ static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) __blk_mq_sched_restart(hctx); } +static inline bool bio_mergeable(struct bio *bio) +{ + return !(bio->bi_opf & REQ_NOMERGE_FLAGS); +} + static inline bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) diff --git a/include/linux/bio.h b/include/linux/bio.h index ae94794d07c9..ec255f282994 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -69,14 +69,6 @@ static inline bool bio_no_advance_iter(const struct bio *bio) bio_op(bio) == REQ_OP_WRITE_ZEROES; } -static inline bool bio_mergeable(struct bio *bio) -{ - if (bio->bi_opf & REQ_NOMERGE_FLAGS) - return false; - - return true; -} - static inline unsigned int bio_cur_bytes(struct bio *bio) { if (bio_has_data(bio)) From b6559d8f9fdd7f0e139161cffea2645bd8d084c6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:00 +0200 Subject: [PATCH 053/174] block: fold bio_cur_bytes into blk_rq_cur_bytes Fold bio_cur_bytes into the only caller. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-5-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/bio.h | 8 -------- include/linux/blk-mq.h | 6 +++++- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/include/linux/bio.h b/include/linux/bio.h index ec255f282994..cffd5eba401e 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -69,14 +69,6 @@ static inline bool bio_no_advance_iter(const struct bio *bio) bio_op(bio) == REQ_OP_WRITE_ZEROES; } -static inline unsigned int bio_cur_bytes(struct bio *bio) -{ - if (bio_has_data(bio)) - return bio_iovec(bio).bv_len; - else /* dataless requests such as discard */ - return bio->bi_iter.bi_size; -} - static inline void *bio_data(struct bio *bio) { if (bio_has_data(bio)) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0e941f217578..2219e9277118 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -927,7 +927,11 @@ static inline unsigned int blk_rq_bytes(const struct request *rq) static inline int blk_rq_cur_bytes(const struct request *rq) { - return rq->bio ? bio_cur_bytes(rq->bio) : 0; + if (!rq->bio) + return 0; + if (!bio_has_data(rq->bio)) /* dataless requests such as discard */ + return rq->bio->bi_iter.bi_size; + return bio_iovec(rq->bio).bv_len; } unsigned int blk_rq_err_bytes(const struct request *rq); From 9a6083becbe113ed1e28059ce659dc8ae71b33c3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:01 +0200 Subject: [PATCH 054/174] block: move bio_full out of bio.h bio_full is only used in bio.c, so move it there. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-6-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 17 +++++++++++++++++ include/linux/bio.h | 19 ------------------- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/block/bio.c b/block/bio.c index 35b875563c8b..7e2899071de2 100644 --- a/block/bio.c +++ b/block/bio.c @@ -772,6 +772,23 @@ const char *bio_devname(struct bio *bio, char *buf) } EXPORT_SYMBOL(bio_devname); +/** + * bio_full - check if the bio is full + * @bio: bio to check + * @len: length of one segment to be added + * + * Return true if @bio is full and one segment with @len bytes can't be + * added to the bio, otherwise return false + */ +static inline bool bio_full(struct bio *bio, unsigned len) +{ + if (bio->bi_vcnt >= bio->bi_max_vecs) + return true; + if (bio->bi_iter.bi_size > UINT_MAX - len) + return true; + return false; +} + static inline bool page_is_mergeable(const struct bio_vec *bv, struct page *page, unsigned int len, unsigned int off, bool *same_page) diff --git a/include/linux/bio.h b/include/linux/bio.h index cffd5eba401e..2ffc7c768091 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -77,25 +77,6 @@ static inline void *bio_data(struct bio *bio) return NULL; } -/** - * bio_full - check if the bio is full - * @bio: bio to check - * @len: length of one segment to be added - * - * Return true if @bio is full and one segment with @len bytes can't be - * added to the bio, otherwise return false - */ -static inline bool bio_full(struct bio *bio, unsigned len) -{ - if (bio->bi_vcnt >= bio->bi_max_vecs) - return true; - - if (bio->bi_iter.bi_size > UINT_MAX - len) - return true; - - return false; -} - static inline bool bio_next_segment(const struct bio *bio, struct bvec_iter_all *iter) { From 9774b39175fe3606ce2a836a47134c53b75681c9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:02 +0200 Subject: [PATCH 055/174] block: mark __bio_try_merge_page static Mark __bio_try_merge_page static and move it up a bit to avoid the need for a forward declaration. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-7-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 77 ++++++++++++++++++++++----------------------- include/linux/bio.h | 2 -- 2 files changed, 38 insertions(+), 41 deletions(-) diff --git a/block/bio.c b/block/bio.c index 7e2899071de2..c89e402356a5 100644 --- a/block/bio.c +++ b/block/bio.c @@ -808,6 +808,44 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); } +/** + * __bio_try_merge_page - try appending data to an existing bvec. + * @bio: destination bio + * @page: start page to add + * @len: length of the data to add + * @off: offset of the data relative to @page + * @same_page: return if the segment has been merged inside the same page + * + * Try to add the data at @page + @off to the last bvec of @bio. This is a + * useful optimisation for file systems with a block size smaller than the + * page size. + * + * Warn if (@len, @off) crosses pages in case that @same_page is true. + * + * Return %true on success or %false on failure. + */ +static bool __bio_try_merge_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off, bool *same_page) +{ + if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) + return false; + + if (bio->bi_vcnt > 0) { + struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; + + if (page_is_mergeable(bv, page, len, off, same_page)) { + if (bio->bi_iter.bi_size > UINT_MAX - len) { + *same_page = false; + return false; + } + bv->bv_len += len; + bio->bi_iter.bi_size += len; + return true; + } + } + return false; +} + /* * Try to merge a page into a segment, while obeying the hardware segment * size limit. This is not for normal read/write bios, but for passthrough @@ -939,45 +977,6 @@ int bio_add_zone_append_page(struct bio *bio, struct page *page, } EXPORT_SYMBOL_GPL(bio_add_zone_append_page); -/** - * __bio_try_merge_page - try appending data to an existing bvec. - * @bio: destination bio - * @page: start page to add - * @len: length of the data to add - * @off: offset of the data relative to @page - * @same_page: return if the segment has been merged inside the same page - * - * Try to add the data at @page + @off to the last bvec of @bio. This is a - * useful optimisation for file systems with a block size smaller than the - * page size. - * - * Warn if (@len, @off) crosses pages in case that @same_page is true. - * - * Return %true on success or %false on failure. - */ -bool __bio_try_merge_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int off, bool *same_page) -{ - if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) - return false; - - if (bio->bi_vcnt > 0) { - struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; - - if (page_is_mergeable(bv, page, len, off, same_page)) { - if (bio->bi_iter.bi_size > UINT_MAX - len) { - *same_page = false; - return false; - } - bv->bv_len += len; - bio->bi_iter.bi_size += len; - return true; - } - } - return false; -} -EXPORT_SYMBOL_GPL(__bio_try_merge_page); - /** * __bio_add_page - add page(s) to a bio in a new segment * @bio: destination bio diff --git a/include/linux/bio.h b/include/linux/bio.h index 2ffc7c768091..faa0a2fabaf0 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -425,8 +425,6 @@ extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); int bio_add_zone_append_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset); -bool __bio_try_merge_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int off, bool *same_page); void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); From ff18d77b5f0cf3e25aa13741eed4d788a13c04d7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:03 +0200 Subject: [PATCH 056/174] block: move bio_get_{first,last}_bvec out of bio.h bio_get_first_bvec and bio_get_last_bvec are only used in blk-merge.c, so move them there. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-8-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-merge.c | 31 +++++++++++++++++++++++++++++++ include/linux/bio.h | 31 ------------------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index bf7aedaad8f8..14ce19607cd8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -15,6 +15,37 @@ #include "blk-rq-qos.h" #include "blk-throttle.h" +static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) +{ + *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); +} + +static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) +{ + struct bvec_iter iter = bio->bi_iter; + int idx; + + bio_get_first_bvec(bio, bv); + if (bv->bv_len == bio->bi_iter.bi_size) + return; /* this bio only has a single bvec */ + + bio_advance_iter(bio, &iter, iter.bi_size); + + if (!iter.bi_bvec_done) + idx = iter.bi_idx - 1; + else /* in the middle of bvec */ + idx = iter.bi_idx; + + *bv = bio->bi_io_vec[idx]; + + /* + * iter.bi_bvec_done records actual length of the last bvec + * if this bio ends in the middle of one io vector + */ + if (iter.bi_bvec_done) + bv->bv_len = iter.bi_bvec_done; +} + static inline bool bio_will_gap(struct request_queue *q, struct request *prev_rq, struct bio *prev, struct bio *next) { diff --git a/include/linux/bio.h b/include/linux/bio.h index faa0a2fabaf0..4c5106f2ed57 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -221,37 +221,6 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) bio->bi_flags &= ~(1U << bit); } -static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) -{ - *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); -} - -static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) -{ - struct bvec_iter iter = bio->bi_iter; - int idx; - - bio_get_first_bvec(bio, bv); - if (bv->bv_len == bio->bi_iter.bi_size) - return; /* this bio only has a single bvec */ - - bio_advance_iter(bio, &iter, iter.bi_size); - - if (!iter.bi_bvec_done) - idx = iter.bi_idx - 1; - else /* in the middle of bvec */ - idx = iter.bi_idx; - - *bv = bio->bi_io_vec[idx]; - - /* - * iter.bi_bvec_done records actual length of the last bvec - * if this bio ends in the middle of one io vector - */ - if (iter.bi_bvec_done) - bv->bv_len = iter.bi_bvec_done; -} - static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); From 4f7ab09a1ca0bc955635705c359ab3021b405fd0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:04 +0200 Subject: [PATCH 057/174] block: mark bio_truncate static bio_truncate is only used in bio.c, so mark it static. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-9-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 2 +- include/linux/bio.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/block/bio.c b/block/bio.c index c89e402356a5..d5120451c36a 100644 --- a/block/bio.c +++ b/block/bio.c @@ -546,7 +546,7 @@ EXPORT_SYMBOL(zero_fill_bio); * REQ_OP_READ, zero the truncated part. This function should only * be used for handling corner cases, such as bio eod. */ -void bio_truncate(struct bio *bio, unsigned new_size) +static void bio_truncate(struct bio *bio, unsigned new_size) { struct bio_vec bv; struct bvec_iter iter; diff --git a/include/linux/bio.h b/include/linux/bio.h index 4c5106f2ed57..d8d27742a75f 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -405,7 +405,6 @@ extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter); extern void bio_copy_data(struct bio *dst, struct bio *src); extern void bio_free_pages(struct bio *bio); -void bio_truncate(struct bio *bio, unsigned new_size); void guard_bio_eod(struct bio *bio); void zero_fill_bio(struct bio *bio); From 8971a3b7f1bf2b3096e558a0362a469dee77f426 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 13 Oct 2021 09:57:13 +0100 Subject: [PATCH 058/174] blk-mq: optimise *end_request non-stat path We already have a blk_mq_need_time_stamp() check in __blk_mq_end_request() to get a timestamp, hide all the statistics accounting under it. It cuts some cycles for requests that don't need stats, and is free otherwise. Signed-off-by: Pavel Begunkov Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/e0f2ea812e93a8adcd07101212e7d7e70ca304e7.1634115360.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index b7b8437f0a04..568773de9a7f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -579,20 +579,18 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug) inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { - u64 now = 0; + if (blk_mq_need_time_stamp(rq)) { + u64 now = ktime_get_ns(); - if (blk_mq_need_time_stamp(rq)) - now = ktime_get_ns(); + if (rq->rq_flags & RQF_STATS) { + blk_mq_poll_stats_start(rq->q); + blk_stat_add(rq, now); + } - if (rq->rq_flags & RQF_STATS) { - blk_mq_poll_stats_start(rq->q); - blk_stat_add(rq, now); + blk_mq_sched_completed_request(rq, now); + blk_account_io_done(rq, now); } - blk_mq_sched_completed_request(rq, now); - - blk_account_io_done(rq, now); - if (rq->end_io) { rq_qos_done(rq->q, rq); rq->end_io(rq, error); From 9672b0d43782047b1825a96bafee1b6aefa35bc2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 9 Oct 2021 13:02:23 -0600 Subject: [PATCH 059/174] sbitmap: add __sbitmap_queue_get_batch() The block layer tag allocation batching still calls into sbitmap to get each tag, but we can improve on that. Add __sbitmap_queue_get_batch(), which returns a mask of tags all at once, along with an offset for those tags. An example return would be 0xff, where bits 0..7 are set, with tag_offset == 128. The valid tags in this case would be 128..135. A batch is specific to an individual sbitmap_map, hence it cannot be larger than that. The requested number of tags is automatically reduced to the max that can be satisfied with a single map. On failure, 0 is returned. Caller should fall back to single tag allocation at that point/ Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 13 +++++++++++ lib/sbitmap.c | 51 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 2713e689ad66..e30b56023ead 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -426,6 +426,19 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); */ int __sbitmap_queue_get(struct sbitmap_queue *sbq); +/** + * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits + * @sbq: Bitmap queue to allocate from. + * @nr_tags: number of tags requested + * @offset: offset to add to returned bits + * + * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is + * a bit in the mask returned, and the caller must add @offset to the value to + * get the absolute tag value. + */ +unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, + unsigned int *offset); + /** * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct * sbitmap_queue, limiting the depth used from each word, with preemption diff --git a/lib/sbitmap.c b/lib/sbitmap.c index b25db9be938a..f398e0ae548e 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -489,6 +489,57 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq) } EXPORT_SYMBOL_GPL(__sbitmap_queue_get); +unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, + unsigned int *offset) +{ + struct sbitmap *sb = &sbq->sb; + unsigned int hint, depth; + unsigned long index, nr; + int i; + + if (unlikely(sb->round_robin)) + return 0; + + depth = READ_ONCE(sb->depth); + hint = update_alloc_hint_before_get(sb, depth); + + index = SB_NR_TO_INDEX(sb, hint); + + for (i = 0; i < sb->map_nr; i++) { + struct sbitmap_word *map = &sb->map[index]; + unsigned long get_mask; + + sbitmap_deferred_clear(map); + if (map->word == (1UL << (map->depth - 1)) - 1) + continue; + + nr = find_first_zero_bit(&map->word, map->depth); + if (nr + nr_tags <= map->depth) { + atomic_long_t *ptr = (atomic_long_t *) &map->word; + int map_tags = min_t(int, nr_tags, map->depth); + unsigned long val, ret; + + get_mask = ((1UL << map_tags) - 1) << nr; + do { + val = READ_ONCE(map->word); + ret = atomic_long_cmpxchg(ptr, val, get_mask | val); + } while (ret != val); + get_mask = (get_mask & ~ret) >> nr; + if (get_mask) { + *offset = nr + (index << sb->shift); + update_alloc_hint_after_get(sb, depth, hint, + *offset + map_tags - 1); + return get_mask; + } + } + /* Jump to next index. */ + if (++index >= sb->map_nr) + index = 0; + } + + return 0; +} + int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, unsigned int shallow_depth) { From 349302da83529539040d2516de1deec1e09f491c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 9 Oct 2021 13:10:39 -0600 Subject: [PATCH 060/174] block: improve batched tag allocation Add a blk_mq_get_tags() helper, which uses the new sbitmap API for allocating a batch of tags all at once. This both simplifies the block code for batched allocation, and it is also more efficient than just doing repeated calls into __sbitmap_queue_get(). This reduces the sbitmap overhead in peak runs from ~3% to ~1% and yields a performanc increase from 6.6M IOPS to 6.8M IOPS for a single CPU core. Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 15 ++++++++ block/blk-mq-tag.h | 2 ++ block/blk-mq.c | 87 +++++++++++++++++++++++++++++----------------- 3 files changed, 73 insertions(+), 31 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 72a2724a4eee..c43b97201161 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -86,6 +86,21 @@ static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, return __sbitmap_queue_get(bt); } +unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, + unsigned int *offset) +{ + struct blk_mq_tags *tags = blk_mq_tags_from_data(data); + struct sbitmap_queue *bt = &tags->bitmap_tags; + unsigned long ret; + + if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED || + data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) + return 0; + ret = __sbitmap_queue_get_batch(bt, nr_tags, offset); + *offset += tags->nr_reserved_tags; + return ret; +} + unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) { struct blk_mq_tags *tags = blk_mq_tags_from_data(data); diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index d8ce89fa1686..71c2f7d8e9b7 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -38,6 +38,8 @@ extern int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, int node, int alloc_policy); extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); +unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, + unsigned int *offset); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, diff --git a/block/blk-mq.c b/block/blk-mq.c index 568773de9a7f..97b911866de1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -354,6 +354,38 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, return rq; } +static inline struct request * +__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, + u64 alloc_time_ns) +{ + unsigned int tag, tag_offset; + struct request *rq; + unsigned long tags; + int i, nr = 0; + + tags = blk_mq_get_tags(data, data->nr_tags, &tag_offset); + if (unlikely(!tags)) + return NULL; + + for (i = 0; tags; i++) { + if (!(tags & (1UL << i))) + continue; + tag = tag_offset + i; + tags &= ~(1UL << i); + rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + rq->rq_next = *data->cached_rq; + *data->cached_rq = rq; + } + data->nr_tags -= nr; + + if (!data->cached_rq) + return NULL; + + rq = *data->cached_rq; + *data->cached_rq = rq->rq_next; + return rq; +} + static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { struct request_queue *q = data->q; @@ -388,43 +420,36 @@ retry: if (!e) blk_mq_tag_busy(data->hctx); + /* + * Try batched alloc if we want more than 1 tag. + */ + if (data->nr_tags > 1) { + rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); + if (rq) + return rq; + data->nr_tags = 1; + } + /* * Waiting allocations only fail because of an inactive hctx. In that * case just retry the hctx assignment and tag allocation as CPU hotplug * should have migrated us to an online CPU by now. */ - do { - tag = blk_mq_get_tag(data); - if (tag == BLK_MQ_NO_TAG) { - if (data->flags & BLK_MQ_REQ_NOWAIT) - break; - /* - * Give up the CPU and sleep for a random short time to - * ensure that thread using a realtime scheduling class - * are migrated off the CPU, and thus off the hctx that - * is going away. - */ - msleep(3); - goto retry; - } + tag = blk_mq_get_tag(data); + if (tag == BLK_MQ_NO_TAG) { + if (data->flags & BLK_MQ_REQ_NOWAIT) + return NULL; + /* + * Give up the CPU and sleep for a random short time to + * ensure that thread using a realtime scheduling class + * are migrated off the CPU, and thus off the hctx that + * is going away. + */ + msleep(3); + goto retry; + } - rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); - if (!--data->nr_tags || e || - (data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) - return rq; - - /* link into the cached list */ - rq->rq_next = *data->cached_rq; - *data->cached_rq = rq; - data->flags |= BLK_MQ_REQ_NOWAIT; - } while (1); - - if (!data->cached_rq) - return NULL; - - rq = *data->cached_rq; - *data->cached_rq = rq->rq_next; - return rq; + return blk_mq_rq_ctx_init(data, tag, alloc_time_ns); } struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, From df252bde82ac19324b26192ea5e7527fbc1b6033 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Sep 2021 22:59:57 +0900 Subject: [PATCH 061/174] block: remove redundant =y from BLK_CGROUP dependency CONFIG_BLK_CGROUP is a boolean option, that is, its value is 'y' or 'n'. The comparison to 'y' is redundant. Signed-off-by: Masahiro Yamada Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210927140000.866249-2-masahiroy@kernel.org Signed-off-by: Jens Axboe --- block/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/block/Kconfig b/block/Kconfig index 8e28ae7718bd..1d83504749e7 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -73,7 +73,7 @@ config BLK_DEV_ZONED config BLK_DEV_THROTTLING bool "Block layer bio throttling support" - depends on BLK_CGROUP=y + depends on BLK_CGROUP select BLK_CGROUP_RWSTAT help Block layer bio throttling support. It can be used to limit @@ -112,7 +112,7 @@ config BLK_WBT_MQ config BLK_CGROUP_IOLATENCY bool "Enable support for latency based cgroup IO protection" - depends on BLK_CGROUP=y + depends on BLK_CGROUP help Enabling this option enables the .latency interface for IO throttling. The IO controller will attempt to maintain average IO latencies below @@ -132,7 +132,7 @@ config BLK_CGROUP_FC_APPID config BLK_CGROUP_IOCOST bool "Enable support for cost model based cgroup IO controller" - depends on BLK_CGROUP=y + depends on BLK_CGROUP select BLK_RQ_IO_DATA_LEN select BLK_RQ_ALLOC_TIME help From c50fca55d4395ae27a57dee820f6df9b6a26c295 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Sep 2021 22:59:58 +0900 Subject: [PATCH 062/174] block: simplify Kconfig files Everything under block/ depends on BLOCK. BLOCK_HOLDER_DEPRECATED is selected from drivers/md/Kconfig, which is entirely dependent on BLOCK. Extend the 'if BLOCK' ... 'endif' so it covers the whole block/Kconfig. Also, clean up the definition of BLOCK_COMPAT and BLK_MQ_PCI because COMPAT and PCI are boolean. Signed-off-by: Masahiro Yamada Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210927140000.866249-3-masahiroy@kernel.org Signed-off-by: Jens Axboe --- block/Kconfig | 18 +++++++----------- block/Kconfig.iosched | 4 ---- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/block/Kconfig b/block/Kconfig index 1d83504749e7..c4d35829ea4f 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -196,33 +196,29 @@ source "block/partitions/Kconfig" endmenu -endif # BLOCK - config BLOCK_COMPAT - bool - depends on BLOCK && COMPAT - default y + def_bool COMPAT config BLK_MQ_PCI - bool - depends on BLOCK && PCI - default y + def_bool PCI config BLK_MQ_VIRTIO bool - depends on BLOCK && VIRTIO + depends on VIRTIO default y config BLK_MQ_RDMA bool - depends on BLOCK && INFINIBAND + depends on INFINIBAND default y config BLK_PM - def_bool BLOCK && PM + def_bool PM # do not use in new code config BLOCK_HOLDER_DEPRECATED bool source "block/Kconfig.iosched" + +endif # BLOCK diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 2f2158e05a91..885fee86dfca 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -1,6 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -if BLOCK - menu "IO Schedulers" config MQ_IOSCHED_DEADLINE @@ -45,5 +43,3 @@ config BFQ_CGROUP_DEBUG files in a cgroup which can be useful for debugging. endmenu - -endif From b8b98a6225c9140ff5c2b3dce99a70ffba98e6d3 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Sep 2021 22:59:59 +0900 Subject: [PATCH 063/174] block: move menu "Partition type" to block/partitions/Kconfig Move the menu to the relevant place. Signed-off-by: Masahiro Yamada Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210927140000.866249-4-masahiroy@kernel.org Signed-off-by: Jens Axboe --- block/Kconfig | 4 ---- block/partitions/Kconfig | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/block/Kconfig b/block/Kconfig index c4d35829ea4f..c6ce41a5e5b2 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -190,12 +190,8 @@ config BLK_INLINE_ENCRYPTION_FALLBACK by falling back to the kernel crypto API when inline encryption hardware is not present. -menu "Partition Types" - source "block/partitions/Kconfig" -endmenu - config BLOCK_COMPAT def_bool COMPAT diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig index 278593b8e4e9..7aff4eb81c60 100644 --- a/block/partitions/Kconfig +++ b/block/partitions/Kconfig @@ -2,6 +2,8 @@ # # Partition configuration # +menu "Partition Types" + config PARTITION_ADVANCED bool "Advanced partition selection" help @@ -267,3 +269,5 @@ config CMDLINE_PARTITION help Say Y here if you want to read the partition table from bootargs. The format for the command line is just like mtdparts. + +endmenu From 4c928904ff771a8e830773b71a080047365324a5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Sep 2021 23:00:00 +0900 Subject: [PATCH 064/174] block: move CONFIG_BLOCK guard to top Makefile Every object under block/ depends on CONFIG_BLOCK. Move the guard to the top Makefile since there is no point to descend into block/ if CONFIG_BLOCK=n. Signed-off-by: Masahiro Yamada Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210927140000.866249-5-masahiroy@kernel.org Signed-off-by: Jens Axboe --- Makefile | 3 ++- block/Makefile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 91297670da8e..95ba2c34dc33 100644 --- a/Makefile +++ b/Makefile @@ -1115,7 +1115,8 @@ export MODORDER := $(extmod_prefix)modules.order export MODULES_NSDEPS := $(extmod_prefix)modules.nsdeps ifeq ($(KBUILD_EXTMOD),) -core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ +core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ +core-$(CONFIG_BLOCK) += block/ vmlinux-dirs := $(patsubst %/,%,$(filter %/, \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ diff --git a/block/Makefile b/block/Makefile index 41aa1ba69c90..74df168729ec 100644 --- a/block/Makefile +++ b/block/Makefile @@ -3,7 +3,7 @@ # Makefile for the kernel block layer # -obj-$(CONFIG_BLOCK) := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ +obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-exec.o blk-merge.o blk-timeout.o \ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ From d38a9c04c0d5637a828269dccb9703d42d40d42b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 Oct 2021 07:24:07 -0600 Subject: [PATCH 065/174] block: only check previous entry for plug merge attempt Currently we scan the entire plug list, which is potentially very expensive. In an IOPS bound workload, we can drive about 5.6M IOPS with merging enabled, and profiling shows that the plug merge check is the (by far) most expensive thing we're doing: Overhead Command Shared Object Symbol + 20.89% io_uring [kernel.vmlinux] [k] blk_attempt_plug_merge + 4.98% io_uring [kernel.vmlinux] [k] io_submit_sqes + 4.78% io_uring [kernel.vmlinux] [k] blkdev_direct_IO + 4.61% io_uring [kernel.vmlinux] [k] blk_mq_submit_bio Instead of browsing the whole list, just check the previously inserted entry. That is enough for a naive merge check and will catch most cases, and for devices that need full merging, the IO scheduler attached to such devices will do that anyway. The plug merge is meant to be an inexpensive check to avoid getting a request, but if we repeatedly scan the list for every single insert, it is very much not a cheap check. With this patch, the workload instead runs at ~7.0M IOPS, providing a 25% improvement. Disabling merging entirely yields another 5% improvement. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-merge.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index 14ce19607cd8..9b77b4d6c2a1 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -1084,8 +1084,8 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, * another request associated with @q is found on the plug list * (optional, may be %NULL) * - * Determine whether @bio being queued on @q can be merged with a request - * on %current's plugged list. Returns %true if merge was successful, + * Determine whether @bio being queued on @q can be merged with the previous + * request on %current's plugged list. Returns %true if merge was successful, * otherwise %false. * * Plugging coalesces IOs from the same issuer for the same purpose without @@ -1102,32 +1102,22 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, { struct blk_plug *plug; struct request *rq; - struct list_head *plug_list; plug = blk_mq_plug(q, bio); - if (!plug) + if (!plug || list_empty(&plug->mq_list)) return false; - plug_list = &plug->mq_list; - - list_for_each_entry_reverse(rq, plug_list, queuelist) { - if (rq->q == q && same_queue_rq) { - /* - * Only blk-mq multiple hardware queues case checks the - * rq in the same queue, there should be only one such - * rq in a queue - **/ - *same_queue_rq = rq; - } - - if (rq->q != q) - continue; - - if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == - BIO_MERGE_OK) - return true; + /* check the previously added entry for a quick merge attempt */ + rq = list_last_entry(&plug->mq_list, struct request, queuelist); + if (rq->q == q && same_queue_rq) { + /* + * Only blk-mq multiple hardware queues case checks the rq in + * the same queue, there should be only one such rq in a queue + */ + *same_queue_rq = rq; } - + if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == BIO_MERGE_OK) + return true; return false; } From 94c2ed58d0d856a35c04365bdb39fee6e77547de Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:11 +0200 Subject: [PATCH 066/174] direct-io: remove blk_poll support The polling support in the legacy direct-io support is a little crufty. It already doesn't support the asynchronous polling needed for io_uring polling, and is hard to adopt to upcoming changes in the polling interfaces. Given that all the major file systems already use the iomap direct I/O code, just drop the polling support. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-2-hch@lst.de Signed-off-by: Jens Axboe --- fs/direct-io.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/fs/direct-io.c b/fs/direct-io.c index b2e86e739d7a..453dcff0e7f5 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -119,7 +119,6 @@ struct dio { int flags; /* doesn't change */ int op; int op_flags; - blk_qc_t bio_cookie; struct gendisk *bio_disk; struct inode *inode; loff_t i_size; /* i_size when submitted */ @@ -438,11 +437,10 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) dio->bio_disk = bio->bi_bdev->bd_disk; - if (sdio->submit_io) { + if (sdio->submit_io) sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); - dio->bio_cookie = BLK_QC_T_NONE; - } else - dio->bio_cookie = submit_bio(bio); + else + submit_bio(bio); sdio->bio = NULL; sdio->boundary = 0; @@ -481,9 +479,7 @@ static struct bio *dio_await_one(struct dio *dio) __set_current_state(TASK_UNINTERRUPTIBLE); dio->waiter = current; spin_unlock_irqrestore(&dio->bio_lock, flags); - if (!(dio->iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true)) - blk_io_schedule(); + blk_io_schedule(); /* wake up sets us TASK_RUNNING */ spin_lock_irqsave(&dio->bio_lock, flags); dio->waiter = NULL; @@ -1214,8 +1210,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, } else { dio->op = REQ_OP_READ; } - if (iocb->ki_flags & IOCB_HIPRI) - dio->op_flags |= REQ_HIPRI; /* * For AIO O_(D)SYNC writes we need to defer completions to a workqueue From 71fc3f5e2c00c966e6a2ffebadfbcc6914249d32 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:12 +0200 Subject: [PATCH 067/174] block: don't try to poll multi-bio I/Os in __blkdev_direct_IO If an iocb is split into multiple bios we can't poll for both. So don't even bother to try to poll in that case. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012111226.760968-3-hch@lst.de Signed-off-by: Jens Axboe --- block/fops.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/block/fops.c b/block/fops.c index 7bb9581a146c..15324f2e5a91 100644 --- a/block/fops.c +++ b/block/fops.c @@ -197,7 +197,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, struct blk_plug plug; struct blkdev_dio *dio; struct bio *bio; - bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; + bool do_poll = (iocb->ki_flags & IOCB_HIPRI); bool is_read = (iov_iter_rw(iter) == READ), is_sync; loff_t pos = iocb->ki_pos; blk_qc_t qc = BLK_QC_T_NONE; @@ -226,7 +226,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, * Don't plug for HIPRI/polled IO, as those should go straight * to issue */ - if (!is_poll) + if (!(iocb->ki_flags & IOCB_HIPRI)) blk_start_plug(&plug); for (;;) { @@ -260,20 +260,13 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); if (!nr_pages) { - bool polled = false; - - if (iocb->ki_flags & IOCB_HIPRI) { + if (do_poll) bio_set_polled(bio, iocb); - polled = true; - } - qc = submit_bio(bio); - - if (polled) + if (do_poll) WRITE_ONCE(iocb->ki_cookie, qc); break; } - if (!dio->multi_bio) { /* * AIO needs an extra reference to ensure the dio @@ -284,6 +277,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio_get(bio); dio->multi_bio = true; atomic_set(&dio->ref, 2); + do_poll = false; } else { atomic_inc(&dio->ref); } @@ -292,7 +286,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio = bio_alloc(GFP_KERNEL, nr_pages); } - if (!is_poll) + if (!(iocb->ki_flags & IOCB_HIPRI)) blk_finish_plug(&plug); if (!is_sync) @@ -303,8 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->waiter)) break; - if (!(iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(bdev_get_queue(bdev), qc, true)) + if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); From f79d474905fec0bfae1244e75571b7916fe02ea2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:13 +0200 Subject: [PATCH 068/174] iomap: don't try to poll multi-bio I/Os in __iomap_dio_rw If an iocb is split into multiple bios we can't poll for both. So don't bother to even try to poll in that case. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-4-hch@lst.de Signed-off-by: Jens Axboe --- fs/iomap/direct-io.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 4ecd255e0511..560ae967f70e 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -282,6 +282,13 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, if (!iov_iter_count(dio->submit.iter)) goto out; + /* + * We can only poll for single bio I/Os. + */ + if (need_zeroout || + ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) + dio->iocb->ki_flags &= ~IOCB_HIPRI; + if (need_zeroout) { /* zero out from the start of the block to the write offset */ pad = pos & (fs_block_size - 1); @@ -339,6 +346,11 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); + /* + * We can only poll for single bio I/Os. + */ + if (nr_pages) + dio->iocb->ki_flags &= ~IOCB_HIPRI; iomap_dio_submit_bio(iter, dio, bio, pos); pos += n; } while (nr_pages); @@ -565,8 +577,15 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, inode_dio_begin(inode); blk_start_plug(&plug); - while ((ret = iomap_iter(&iomi, ops)) > 0) + while ((ret = iomap_iter(&iomi, ops)) > 0) { iomi.processed = iomap_dio_iter(&iomi, dio); + + /* + * We can only poll for single bio I/Os. + */ + iocb->ki_flags &= ~IOCB_HIPRI; + } + blk_finish_plug(&plug); /* From 30da1b45b130c70945b033900f45c9d61d6f3b4a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:14 +0200 Subject: [PATCH 069/174] io_uring: fix a layering violation in io_iopoll_req_issued syscall-level code can't just poke into the details of the poll cookie, which is private information of the block layer. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012111226.760968-5-hch@lst.de Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index e68d27829bb2..d2e86788c872 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2738,19 +2738,12 @@ static void io_iopoll_req_issued(struct io_kiocb *req) ctx->poll_multi_queue = false; } else if (!ctx->poll_multi_queue) { struct io_kiocb *list_req; - unsigned int queue_num0, queue_num1; list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb, inflight_entry); - if (list_req->file != req->file) { + if (list_req->file != req->file) ctx->poll_multi_queue = true; - } else { - queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie); - queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie); - if (queue_num0 != queue_num1) - ctx->poll_multi_queue = true; - } } /* From f70299f0d58e0e21f7f5f5ab27e601e8d3f0373e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:15 +0200 Subject: [PATCH 070/174] blk-mq: factor out a blk_qc_to_hctx helper Add a helper to get the hctx from a request_queue and cookie, and fold the blk_qc_t_to_queue_num helper into it as no other callers are left. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-6-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 +++++++- include/linux/blk_types.h | 5 ----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 97b911866de1..35b2ab0a373a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -65,6 +65,12 @@ static int blk_mq_poll_stats_bkt(const struct request *rq) return bucket; } +static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, + blk_qc_t qc) +{ + return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; +} + /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. @@ -4071,7 +4077,7 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) if (current->plug) blk_flush_plug_list(current->plug, false); - hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; + hctx = blk_qc_to_hctx(q, cookie); /* * If we sleep, have the caller restart the poll loop to reset diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 3b967053e9f5..000351c5312a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -505,11 +505,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie) return cookie != BLK_QC_T_NONE; } -static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) -{ - return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; -} - static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) { return cookie & ((1u << BLK_QC_T_SHIFT) - 1); From c6699d6fe0ffe4d9fdc652d1acf5a94b4f9627ba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:16 +0200 Subject: [PATCH 071/174] blk-mq: factor out a "classic" poll helper Factor the code to do the classic full metal polling out of blk_poll into a separate blk_mq_poll_classic helper. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-7-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 120 +++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 64 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 35b2ab0a373a..636773056874 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -71,6 +71,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; } +static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, + blk_qc_t qc) +{ + if (blk_qc_t_is_internal(qc)) + return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc)); + return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc)); +} + /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. @@ -3975,15 +3983,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q, return ret; } -static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, - struct request *rq) +static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) { + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); + struct request *rq = blk_qc_to_rq(hctx, qc); struct hrtimer_sleeper hs; enum hrtimer_mode mode; unsigned int nsecs; ktime_t kt; - if (rq->rq_flags & RQF_MQ_POLL_SLEPT) + /* + * If a request has completed on queue that uses an I/O scheduler, we + * won't get back a request from blk_qc_to_rq. + */ + if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) return false; /* @@ -4025,32 +4038,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, __set_current_state(TASK_RUNNING); destroy_hrtimer_on_stack(&hs.timer); + + /* + * If we sleep, have the caller restart the poll loop to reset the + * state. Like for the other success return cases, the caller is + * responsible for checking if the IO completed. If the IO isn't + * complete, we'll get called again and will go straight to the busy + * poll loop. + */ return true; } -static bool blk_mq_poll_hybrid(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) +static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, + bool spin) { - struct request *rq; + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); + long state = get_current_state(); + int ret; - if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) - return false; + hctx->poll_considered++; - if (!blk_qc_t_is_internal(cookie)) - rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); - else { - rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); - /* - * With scheduling, if the request has completed, we'll - * get a NULL return here, as we clear the sched tag when - * that happens. The request still remains valid, like always, - * so we should be safe with just the NULL check. - */ - if (!rq) - return false; - } + do { + hctx->poll_invoked++; - return blk_mq_poll_hybrid_sleep(q, rq); + ret = q->mq_ops->poll(hctx); + if (ret > 0) { + hctx->poll_success++; + __set_current_state(TASK_RUNNING); + return ret; + } + + if (signal_pending_state(state, current)) + __set_current_state(TASK_RUNNING); + if (task_is_running(current)) + return 1; + + if (ret < 0 || !spin) + break; + cpu_relax(); + } while (!need_resched()); + + __set_current_state(TASK_RUNNING); + return 0; } /** @@ -4067,9 +4096,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, */ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) { - struct blk_mq_hw_ctx *hctx; - unsigned int state; - if (!blk_qc_t_valid(cookie) || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; @@ -4077,46 +4103,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) if (current->plug) blk_flush_plug_list(current->plug, false); - hctx = blk_qc_to_hctx(q, cookie); - - /* - * If we sleep, have the caller restart the poll loop to reset - * the state. Like for the other success return cases, the - * caller is responsible for checking if the IO completed. If - * the IO isn't complete, we'll get called again and will go - * straight to the busy poll loop. If specified not to spin, - * we also should not sleep. - */ - if (spin && blk_mq_poll_hybrid(q, hctx, cookie)) - return 1; - - hctx->poll_considered++; - - state = get_current_state(); - do { - int ret; - - hctx->poll_invoked++; - - ret = q->mq_ops->poll(hctx); - if (ret > 0) { - hctx->poll_success++; - __set_current_state(TASK_RUNNING); - return ret; - } - - if (signal_pending_state(state, current)) - __set_current_state(TASK_RUNNING); - - if (task_is_running(current)) + /* If specified not to spin, we also should not sleep. */ + if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (blk_mq_poll_hybrid(q, cookie)) return 1; - if (ret < 0 || !spin) - break; - cpu_relax(); - } while (!need_resched()); - - __set_current_state(TASK_RUNNING); - return 0; + } + return blk_mq_poll_classic(q, cookie, spin); } EXPORT_SYMBOL_GPL(blk_poll); From efbabbe121f96d4b1a98a2c2ef5d2e8f7fb41c87 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:17 +0200 Subject: [PATCH 072/174] blk-mq: remove blk_qc_t_to_tag and blk_qc_t_is_internal Merge both functions into their only caller to keep the blk-mq tag to blk_qc_t mapping as private as possible in blk-mq.c. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-8-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 +++++--- include/linux/blk_types.h | 10 ---------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 636773056874..24a10c973f79 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -74,9 +74,11 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, blk_qc_t qc) { - if (blk_qc_t_is_internal(qc)) - return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc)); - return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc)); + unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1); + + if (qc & BLK_QC_T_INTERNAL) + return blk_mq_tag_to_rq(hctx->sched_tags, tag); + return blk_mq_tag_to_rq(hctx->tags, tag); } /* diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 000351c5312a..fb7c1477617b 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -505,16 +505,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie) return cookie != BLK_QC_T_NONE; } -static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) -{ - return cookie & ((1u << BLK_QC_T_SHIFT) - 1); -} - -static inline bool blk_qc_t_is_internal(blk_qc_t cookie) -{ - return (cookie & BLK_QC_T_INTERNAL) != 0; -} - struct blk_rq_stat { u64 mean; u64 min; From 28a1ae6b9daba6ac65700eeb38479bd6fadec089 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:18 +0200 Subject: [PATCH 073/174] blk-mq: remove blk_qc_t_valid Move the trivial check into the only caller. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-9-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- include/linux/blk_types.h | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 24a10c973f79..7d0d947921a6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4098,7 +4098,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, */ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) { - if (!blk_qc_t_valid(cookie) || + if (cookie == BLK_QC_T_NONE || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index fb7c1477617b..5017ba8fc539 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -500,11 +500,6 @@ typedef unsigned int blk_qc_t; #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) -static inline bool blk_qc_t_valid(blk_qc_t cookie) -{ - return cookie != BLK_QC_T_NONE; -} - struct blk_rq_stat { u64 mean; u64 min; From ef99b2d37666b7a600baab9e1c4944436652b0a2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:19 +0200 Subject: [PATCH 074/174] block: replace the spin argument to blk_iopoll with a flags argument Switch the boolean spin argument to blk_poll to passing a set of flags instead. This will allow to control polling behavior in a more fine grained way. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-10-hch@lst.de [axboe: adapt to changed io_uring iopoll] Signed-off-by: Jens Axboe --- block/blk-exec.c | 2 +- block/blk-mq.c | 17 +++++++---------- block/fops.c | 8 ++++---- fs/io_uring.c | 9 +++++---- fs/iomap/direct-io.c | 6 +++--- include/linux/blkdev.h | 4 +++- include/linux/fs.h | 2 +- include/linux/iomap.h | 2 +- mm/page_io.c | 2 +- 9 files changed, 26 insertions(+), 26 deletions(-) diff --git a/block/blk-exec.c b/block/blk-exec.c index d6cd501c0d34..1fa7f25e5726 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -71,7 +71,7 @@ static bool blk_rq_is_poll(struct request *rq) static void blk_rq_poll_completion(struct request *rq, struct completion *wait) { do { - blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true); + blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0); cond_resched(); } while (!completion_done(wait)); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 7d0d947921a6..6609e10657a8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4052,7 +4052,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) } static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, - bool spin) + unsigned int flags) { struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); long state = get_current_state(); @@ -4075,7 +4075,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, if (task_is_running(current)) return 1; - if (ret < 0 || !spin) + if (ret < 0 || (flags & BLK_POLL_ONESHOT)) break; cpu_relax(); } while (!need_resched()); @@ -4088,15 +4088,13 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, * blk_poll - poll for IO completions * @q: the queue * @cookie: cookie passed back at IO submission time - * @spin: whether to spin for completions + * @flags: BLK_POLL_* flags that control the behavior * * Description: * Poll for completions on the passed in queue. Returns number of - * completed entries found. If @spin is true, then blk_poll will continue - * looping until at least one completion is found, unless the task is - * otherwise marked running (or we need to reschedule). + * completed entries found. */ -int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) +int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) { if (cookie == BLK_QC_T_NONE || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) @@ -4105,12 +4103,11 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) if (current->plug) blk_flush_plug_list(current->plug, false); - /* If specified not to spin, we also should not sleep. */ - if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) return 1; } - return blk_mq_poll_classic(q, cookie, spin); + return blk_mq_poll_classic(q, cookie, flags); } EXPORT_SYMBOL_GPL(blk_poll); diff --git a/block/fops.c b/block/fops.c index 15324f2e5a91..db8f2fe68dd2 100644 --- a/block/fops.c +++ b/block/fops.c @@ -108,7 +108,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, if (!READ_ONCE(bio.bi_private)) break; if (!(iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(bdev_get_queue(bdev), qc, true)) + !blk_poll(bdev_get_queue(bdev), qc, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -141,12 +141,12 @@ struct blkdev_dio { static struct bio_set blkdev_dio_pool; -static int blkdev_iopoll(struct kiocb *kiocb, bool wait) +static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags) { struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); struct request_queue *q = bdev_get_queue(bdev); - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); + return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); } static void blkdev_bio_end_io(struct bio *bio) @@ -297,7 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->waiter)) break; - if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true)) + if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/fs/io_uring.c b/fs/io_uring.c index d2e86788c872..541fec2bd49a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2457,14 +2457,15 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { struct io_kiocb *req, *tmp; + unsigned int poll_flags = 0; LIST_HEAD(done); - bool spin; /* * Only spin for completions if we don't have multiple devices hanging * off our complete list, and we're under the requested amount. */ - spin = !ctx->poll_multi_queue && *nr_events < min; + if (ctx->poll_multi_queue || *nr_events >= min) + poll_flags |= BLK_POLL_ONESHOT; list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { struct kiocb *kiocb = &req->rw.kiocb; @@ -2482,11 +2483,11 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) break; - ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); + ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags); if (unlikely(ret < 0)) return ret; else if (ret) - spin = false; + poll_flags |= BLK_POLL_ONESHOT; /* iopoll may have completed current req */ if (READ_ONCE(req->iopoll_completed)) diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 560ae967f70e..236aba256cd1 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -49,13 +49,13 @@ struct iomap_dio { }; }; -int iomap_dio_iopoll(struct kiocb *kiocb, bool spin) +int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags) { struct request_queue *q = READ_ONCE(kiocb->private); if (!q) return 0; - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin); + return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); } EXPORT_SYMBOL_GPL(iomap_dio_iopoll); @@ -642,7 +642,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (!(iocb->ki_flags & IOCB_HIPRI) || !dio->submit.last_queue || !blk_poll(dio->submit.last_queue, - dio->submit.cookie, true)) + dio->submit.cookie, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 17705c970d7e..e177346bc020 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -564,7 +564,9 @@ extern const char *blk_op_str(unsigned int op); int blk_status_to_errno(blk_status_t status); blk_status_t errno_to_blk_status(int errno); -int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); +/* only poll the hardware once, don't continue until a completion was found */ +#define BLK_POLL_ONESHOT (1 << 0) +int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { diff --git a/include/linux/fs.h b/include/linux/fs.h index e7a633353fd2..c443cddf414f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2075,7 +2075,7 @@ struct file_operations { ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); - int (*iopoll)(struct kiocb *kiocb, bool spin); + int (*iopoll)(struct kiocb *kiocb, unsigned int flags); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 24f8489583ca..1e86b65567c2 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -337,7 +337,7 @@ struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags); ssize_t iomap_dio_complete(struct iomap_dio *dio); -int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); +int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags); #ifdef CONFIG_SWAP struct file; diff --git a/mm/page_io.c b/mm/page_io.c index c493ce9ebcf5..5d5543fcefa4 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -428,7 +428,7 @@ int swap_readpage(struct page *page, bool synchronous) if (!READ_ONCE(bio->bi_private)) break; - if (!blk_poll(disk->queue, qc, true)) + if (!blk_poll(disk->queue, qc, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); From d729cf9acb9311956c8a37113dcfa0160a2d9665 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:20 +0200 Subject: [PATCH 075/174] io_uring: don't sleep when polling for I/O There is no point in sleeping for the expected I/O completion timeout in the io_uring async polling model as we never poll for a specific I/O. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-11-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 3 ++- fs/io_uring.c | 2 +- include/linux/blkdev.h | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 6609e10657a8..97c24e461d0a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4103,7 +4103,8 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) if (current->plug) blk_flush_plug_list(current->plug, false); - if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (!(flags & BLK_POLL_NOSLEEP) && + q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) return 1; } diff --git a/fs/io_uring.c b/fs/io_uring.c index 541fec2bd49a..c5066146b8de 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2457,7 +2457,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { struct io_kiocb *req, *tmp; - unsigned int poll_flags = 0; + unsigned int poll_flags = BLK_POLL_NOSLEEP; LIST_HEAD(done); /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e177346bc020..2b80c98fc373 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -566,6 +566,8 @@ blk_status_t errno_to_blk_status(int errno); /* only poll the hardware once, don't continue until a completion was found */ #define BLK_POLL_ONESHOT (1 << 0) +/* do not sleep to wait for the expected completion time */ +#define BLK_POLL_NOSLEEP (1 << 1) int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) From 6ce913fe3eee14f40f778e85999c9e599dda8c6b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:21 +0200 Subject: [PATCH 076/174] block: rename REQ_HIPRI to REQ_POLLED Unlike the RWF_HIPRI userspace ABI which is intentionally kept vague, the bio flag is specific to the polling implementation, so rename and document it properly. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-12-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-merge.c | 3 +-- block/blk-mq-debugfs.c | 2 +- block/blk-mq.c | 4 ++-- block/blk-mq.h | 4 ++-- block/blk.h | 4 ++-- drivers/nvme/host/core.c | 2 +- drivers/scsi/scsi_debug.c | 10 +++++----- include/linux/bio.h | 2 +- include/linux/blk_types.h | 4 ++-- mm/page_io.c | 2 +- 11 files changed, 19 insertions(+), 20 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 80fa7aa394c7..8eb0e08d5395 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -842,7 +842,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio) } if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) - bio_clear_hipri(bio); + bio_clear_polled(bio); switch (bio_op(bio)) { case REQ_OP_DISCARD: diff --git a/block/blk-merge.c b/block/blk-merge.c index 9b77b4d6c2a1..f88d7863f997 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -318,8 +318,7 @@ split: * iopoll in direct IO routine. Given performance gain of iopoll for * big IO can be trival, disable iopoll when split needed. */ - bio_clear_hipri(bio); - + bio_clear_polled(bio); return bio_split(bio, sectors, GFP_NOIO, bs); } diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 3daea160d670..409a8256d9ff 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -287,7 +287,7 @@ static const char *const cmd_flag_name[] = { CMD_FLAG_NAME(BACKGROUND), CMD_FLAG_NAME(NOWAIT), CMD_FLAG_NAME(NOUNMAP), - CMD_FLAG_NAME(HIPRI), + CMD_FLAG_NAME(POLLED), }; #undef CMD_FLAG_NAME diff --git a/block/blk-mq.c b/block/blk-mq.c index 97c24e461d0a..a34ffcf861c3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -732,7 +732,7 @@ bool blk_mq_complete_request_remote(struct request *rq) * For a polled request, always complete locallly, it's pointless * to redirect the completion. */ - if (rq->cmd_flags & REQ_HIPRI) + if (rq->cmd_flags & REQ_POLLED) return false; if (blk_mq_complete_need_ipi(rq)) { @@ -2278,7 +2278,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq_qos_throttle(q, bio); - hipri = bio->bi_opf & REQ_HIPRI; + hipri = bio->bi_opf & REQ_POLLED; plug = blk_mq_plug(q, bio); if (plug && plug->cached_rq) { diff --git a/block/blk-mq.h b/block/blk-mq.h index 5da970bb8865..a9fe01e14951 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -106,9 +106,9 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, enum hctx_type type = HCTX_TYPE_DEFAULT; /* - * The caller ensure that if REQ_HIPRI, poll must be enabled. + * The caller ensure that if REQ_POLLED, poll must be enabled. */ - if (flags & REQ_HIPRI) + if (flags & REQ_POLLED) type = HCTX_TYPE_POLL; else if ((flags & REQ_OP_MASK) == REQ_OP_READ) type = HCTX_TYPE_READ; diff --git a/block/blk.h b/block/blk.h index cab8d659d8a6..fa05d3f07976 100644 --- a/block/blk.h +++ b/block/blk.h @@ -416,11 +416,11 @@ extern struct device_attribute dev_attr_events; extern struct device_attribute dev_attr_events_async; extern struct device_attribute dev_attr_events_poll_msecs; -static inline void bio_clear_hipri(struct bio *bio) +static inline void bio_clear_polled(struct bio *bio) { /* can't support alloc cache if we turn off polling */ bio_clear_flag(bio, BIO_PERCPU_CACHE); - bio->bi_opf &= ~REQ_HIPRI; + bio->bi_opf &= ~REQ_POLLED; } long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 3d444b13cd69..ae15cb714596 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -632,7 +632,7 @@ static inline void nvme_init_request(struct request *req, req->cmd_flags |= REQ_FAILFAST_DRIVER; if (req->mq_hctx->type == HCTX_TYPE_POLL) - req->cmd_flags |= REQ_HIPRI; + req->cmd_flags |= REQ_POLLED; nvme_clear_nvme_request(req); memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 66f507469a31..40b473eea357 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5384,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, { bool new_sd_dp; bool inject = false; - bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI; + bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED; int k, num_in_q, qdepth; unsigned long iflags; u64 ns_from_boot = 0; @@ -5471,7 +5471,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (sdebug_host_max_queue) sd_dp->hc_idx = get_tag(cmnd); - if (hipri) + if (polled) ns_from_boot = ktime_get_boottime_ns(); /* one of the resp_*() response functions is called here */ @@ -5531,7 +5531,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, kt -= d; } } - if (hipri) { + if (polled) { sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); spin_lock_irqsave(&sqp->qc_lock, iflags); if (!sd_dp->init_poll) { @@ -5562,7 +5562,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) && atomic_read(&sdeb_inject_pending))) sd_dp->aborted = true; - if (hipri) { + if (polled) { sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); spin_lock_irqsave(&sqp->qc_lock, iflags); if (!sd_dp->init_poll) { @@ -7331,7 +7331,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) if (kt_from_boot < sd_dp->cmpl_ts) continue; - } else /* ignoring non REQ_HIPRI requests */ + } else /* ignoring non REQ_POLLED requests */ continue; devip = (struct sdebug_dev_info *)scp->device->hostdata; if (likely(devip)) diff --git a/include/linux/bio.h b/include/linux/bio.h index d8d27742a75f..c7a2d880e927 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -706,7 +706,7 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page, */ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) { - bio->bi_opf |= REQ_HIPRI; + bio->bi_opf |= REQ_POLLED; if (!is_sync_kiocb(kiocb)) bio->bi_opf |= REQ_NOWAIT; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 5017ba8fc539..f8b9fce68834 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -384,7 +384,7 @@ enum req_flag_bits { /* command specific flags for REQ_OP_WRITE_ZEROES: */ __REQ_NOUNMAP, /* do not free blocks when zeroing */ - __REQ_HIPRI, + __REQ_POLLED, /* caller polls for completion using blk_poll */ /* for driver use */ __REQ_DRV, @@ -409,7 +409,7 @@ enum req_flag_bits { #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) -#define REQ_HIPRI (1ULL << __REQ_HIPRI) +#define REQ_POLLED (1ULL << __REQ_POLLED) #define REQ_DRV (1ULL << __REQ_DRV) #define REQ_SWAP (1ULL << __REQ_SWAP) diff --git a/mm/page_io.c b/mm/page_io.c index 5d5543fcefa4..ed2eded74f3a 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -416,7 +416,7 @@ int swap_readpage(struct page *page, bool synchronous) * attempt to access it in the page fault retry time check. */ if (synchronous) { - bio->bi_opf |= REQ_HIPRI; + bio->bi_opf |= REQ_POLLED; get_task_struct(current); bio->bi_private = current; } From 1a7e76e4f130332b5d3b0c72c4f664e59deb1239 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:22 +0200 Subject: [PATCH 077/174] block: use SLAB_TYPESAFE_BY_RCU for the bio slab This flags ensures that the pages will not be reused for non-bio allocations before the end of an RCU grace period. With that we can safely use a RCU lookup for bio polling as long as we are fine with occasionally polling the wrong device. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-13-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/bio.c b/block/bio.c index d5120451c36a..df45f4b996ac 100644 --- a/block/bio.c +++ b/block/bio.c @@ -87,7 +87,8 @@ static struct bio_slab *create_bio_slab(unsigned int size) snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); bslab->slab = kmem_cache_create(bslab->name, size, - ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL); + ARCH_KMALLOC_MINALIGN, + SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); if (!bslab->slab) goto fail_alloc_slab; From 19416123ab3e1348b3532347af221d8f60838431 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 12 Oct 2021 13:12:23 +0200 Subject: [PATCH 078/174] block: define 'struct bvec_iter' as packed 'struct bvec_iter' is embedded into 'struct bio', define it as packed so that we can get one extra 4bytes for other uses without expanding bio. 'struct bvec_iter' is often allocated on stack, so making it packed doesn't affect performance. Also I have run io_uring on both nvme/null_blk, and not observe performance effect in this way. Suggested-by: Christoph Hellwig Signed-off-by: Ming Lei Reviewed-by: Sagi Grimberg Reviewed-by: Hannes Reinecke Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-14-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/bvec.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 0e9bdd42dafb..35c25dff651a 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -44,7 +44,7 @@ struct bvec_iter { unsigned int bi_bvec_done; /* number of bytes completed in current bvec */ -}; +} __packed; struct bvec_iter_all { struct bio_vec bv; From 3e08773c3841e9db7a520908cc2b136a77d275ff Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:24 +0200 Subject: [PATCH 079/174] block: switch polling to be bio based Replace the blk_poll interface that requires the caller to keep a queue and cookie from the submissions with polling based on the bio. Polling for the bio itself leads to a few advantages: - the cookie construction can made entirely private in blk-mq.c - the caller does not need to remember the request_queue and cookie separately and thus sidesteps their lifetime issues - keeping the device and the cookie inside the bio allows to trivially support polling BIOs remapping by stacking drivers - a lot of code to propagate the cookie back up the submission path can be removed entirely. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-15-hch@lst.de Signed-off-by: Jens Axboe --- arch/m68k/emu/nfblock.c | 3 +- arch/xtensa/platforms/iss/simdisk.c | 3 +- block/bio.c | 1 + block/blk-core.c | 127 +++++++++++++++++++++------- block/blk-exec.c | 10 ++- block/blk-mq.c | 72 +++++----------- block/blk-mq.h | 2 + block/fops.c | 25 ++---- drivers/block/brd.c | 12 ++- drivers/block/drbd/drbd_int.h | 2 +- drivers/block/drbd/drbd_req.c | 3 +- drivers/block/n64cart.c | 12 ++- drivers/block/null_blk/main.c | 3 +- drivers/block/pktcdvd.c | 7 +- drivers/block/ps3vram.c | 6 +- drivers/block/rsxx/dev.c | 7 +- drivers/block/zram/zram_drv.c | 10 +-- drivers/md/bcache/request.c | 13 ++- drivers/md/bcache/request.h | 4 +- drivers/md/dm.c | 28 +++--- drivers/md/md.c | 10 +-- drivers/nvdimm/blk.c | 5 +- drivers/nvdimm/btt.c | 5 +- drivers/nvdimm/pmem.c | 3 +- drivers/nvme/host/multipath.c | 6 +- drivers/s390/block/dcssblk.c | 7 +- fs/btrfs/inode.c | 8 +- fs/ext4/file.c | 2 +- fs/gfs2/file.c | 4 +- fs/iomap/direct-io.c | 36 +++----- fs/xfs/xfs_file.c | 2 +- fs/zonefs/super.c | 2 +- include/linux/bio.h | 2 +- include/linux/blk-mq.h | 15 +--- include/linux/blk_types.h | 12 ++- include/linux/blkdev.h | 8 +- include/linux/fs.h | 6 +- include/linux/iomap.h | 5 +- mm/page_io.c | 8 +- 39 files changed, 232 insertions(+), 264 deletions(-) diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 9a8394e96388..4ef457ba5220 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -58,7 +58,7 @@ struct nfhd_device { struct gendisk *disk; }; -static blk_qc_t nfhd_submit_bio(struct bio *bio) +static void nfhd_submit_bio(struct bio *bio) { struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data; struct bio_vec bvec; @@ -76,7 +76,6 @@ static blk_qc_t nfhd_submit_bio(struct bio *bio) sec += len; } bio_endio(bio); - return BLK_QC_T_NONE; } static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 3cdfa00738e0..ddd1fe3db474 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -100,7 +100,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector, spin_unlock(&dev->lock); } -static blk_qc_t simdisk_submit_bio(struct bio *bio) +static void simdisk_submit_bio(struct bio *bio) { struct simdisk *dev = bio->bi_bdev->bd_disk->private_data; struct bio_vec bvec; @@ -118,7 +118,6 @@ static blk_qc_t simdisk_submit_bio(struct bio *bio) } bio_endio(bio); - return BLK_QC_T_NONE; } static int simdisk_open(struct block_device *bdev, fmode_t mode) diff --git a/block/bio.c b/block/bio.c index df45f4b996ac..a3c9ff23a036 100644 --- a/block/bio.c +++ b/block/bio.c @@ -282,6 +282,7 @@ void bio_init(struct bio *bio, struct bio_vec *table, atomic_set(&bio->__bi_remaining, 1); atomic_set(&bio->__bi_cnt, 1); + bio->bi_cookie = BLK_QC_T_NONE; bio->bi_max_vecs = max_vecs; bio->bi_io_vec = table; diff --git a/block/blk-core.c b/block/blk-core.c index 8eb0e08d5395..f008c38ae967 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -915,25 +915,22 @@ end_io: return false; } -static blk_qc_t __submit_bio(struct bio *bio) +static void __submit_bio(struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; - blk_qc_t ret = BLK_QC_T_NONE; if (unlikely(bio_queue_enter(bio) != 0)) - return BLK_QC_T_NONE; + return; if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio)) goto queue_exit; - if (disk->fops->submit_bio) { - ret = disk->fops->submit_bio(bio); - goto queue_exit; + if (!disk->fops->submit_bio) { + blk_mq_submit_bio(bio); + return; } - return blk_mq_submit_bio(bio); - + disk->fops->submit_bio(bio); queue_exit: blk_queue_exit(disk->queue); - return ret; } /* @@ -955,10 +952,9 @@ queue_exit: * bio_list_on_stack[1] contains bios that were submitted before the current * ->submit_bio_bio, but that haven't been processed yet. */ -static blk_qc_t __submit_bio_noacct(struct bio *bio) +static void __submit_bio_noacct(struct bio *bio) { struct bio_list bio_list_on_stack[2]; - blk_qc_t ret = BLK_QC_T_NONE; BUG_ON(bio->bi_next); @@ -975,7 +971,7 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio) bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); - ret = __submit_bio(bio); + __submit_bio(bio); /* * Sort new bios into those for a lower level and those for the @@ -998,22 +994,19 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio) } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); current->bio_list = NULL; - return ret; } -static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) +static void __submit_bio_noacct_mq(struct bio *bio) { struct bio_list bio_list[2] = { }; - blk_qc_t ret; current->bio_list = bio_list; do { - ret = __submit_bio(bio); + __submit_bio(bio); } while ((bio = bio_list_pop(&bio_list[0]))); current->bio_list = NULL; - return ret; } /** @@ -1025,7 +1018,7 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) * systems and other upper level users of the block layer should use * submit_bio() instead. */ -blk_qc_t submit_bio_noacct(struct bio *bio) +void submit_bio_noacct(struct bio *bio) { /* * We only want one ->submit_bio to be active at a time, else stack @@ -1033,14 +1026,12 @@ blk_qc_t submit_bio_noacct(struct bio *bio) * to collect a list of requests submited by a ->submit_bio method while * it is active, and then process them after it returned. */ - if (current->bio_list) { + if (current->bio_list) bio_list_add(¤t->bio_list[0], bio); - return BLK_QC_T_NONE; - } - - if (!bio->bi_bdev->bd_disk->fops->submit_bio) - return __submit_bio_noacct_mq(bio); - return __submit_bio_noacct(bio); + else if (!bio->bi_bdev->bd_disk->fops->submit_bio) + __submit_bio_noacct_mq(bio); + else + __submit_bio_noacct(bio); } EXPORT_SYMBOL(submit_bio_noacct); @@ -1057,10 +1048,10 @@ EXPORT_SYMBOL(submit_bio_noacct); * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has * been called. */ -blk_qc_t submit_bio(struct bio *bio) +void submit_bio(struct bio *bio) { if (blkcg_punt_bio_submit(bio)) - return BLK_QC_T_NONE; + return; /* * If it's a regular read/write or a barrier with data attached, @@ -1092,19 +1083,91 @@ blk_qc_t submit_bio(struct bio *bio) if (unlikely(bio_op(bio) == REQ_OP_READ && bio_flagged(bio, BIO_WORKINGSET))) { unsigned long pflags; - blk_qc_t ret; psi_memstall_enter(&pflags); - ret = submit_bio_noacct(bio); + submit_bio_noacct(bio); psi_memstall_leave(&pflags); - - return ret; + return; } - return submit_bio_noacct(bio); + submit_bio_noacct(bio); } EXPORT_SYMBOL(submit_bio); +/** + * bio_poll - poll for BIO completions + * @bio: bio to poll for + * @flags: BLK_POLL_* flags that control the behavior + * + * Poll for completions on queue associated with the bio. Returns number of + * completed entries found. + * + * Note: the caller must either be the context that submitted @bio, or + * be in a RCU critical section to prevent freeing of @bio. + */ +int bio_poll(struct bio *bio, unsigned int flags) +{ + struct request_queue *q = bio->bi_bdev->bd_disk->queue; + blk_qc_t cookie = READ_ONCE(bio->bi_cookie); + int ret; + + if (cookie == BLK_QC_T_NONE || + !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + return 0; + + if (current->plug) + blk_flush_plug_list(current->plug, false); + + if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) + return 0; + if (WARN_ON_ONCE(!queue_is_mq(q))) + ret = 0; /* not yet implemented, should not happen */ + else + ret = blk_mq_poll(q, cookie, flags); + blk_queue_exit(q); + return ret; +} +EXPORT_SYMBOL_GPL(bio_poll); + +/* + * Helper to implement file_operations.iopoll. Requires the bio to be stored + * in iocb->private, and cleared before freeing the bio. + */ +int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags) +{ + struct bio *bio; + int ret = 0; + + /* + * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can + * point to a freshly allocated bio at this point. If that happens + * we have a few cases to consider: + * + * 1) the bio is beeing initialized and bi_bdev is NULL. We can just + * simply nothing in this case + * 2) the bio points to a not poll enabled device. bio_poll will catch + * this and return 0 + * 3) the bio points to a poll capable device, including but not + * limited to the one that the original bio pointed to. In this + * case we will call into the actual poll method and poll for I/O, + * even if we don't need to, but it won't cause harm either. + * + * For cases 2) and 3) above the RCU grace period ensures that bi_bdev + * is still allocated. Because partitions hold a reference to the whole + * device bdev and thus disk, the disk is also still valid. Grabbing + * a reference to the queue in bio_poll() ensures the hctxs and requests + * are still valid as well. + */ + rcu_read_lock(); + bio = READ_ONCE(kiocb->private); + if (bio && bio->bi_bdev) + ret = bio_poll(bio, flags); + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL_GPL(iocb_bio_iopoll); + /** * blk_cloned_rq_check_limits - Helper function to check a cloned request * for the new queue limits diff --git a/block/blk-exec.c b/block/blk-exec.c index 1fa7f25e5726..55f0cd34b37b 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -65,13 +65,19 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); static bool blk_rq_is_poll(struct request *rq) { - return rq->mq_hctx && rq->mq_hctx->type == HCTX_TYPE_POLL; + if (!rq->mq_hctx) + return false; + if (rq->mq_hctx->type != HCTX_TYPE_POLL) + return false; + if (WARN_ON_ONCE(!rq->bio)) + return false; + return true; } static void blk_rq_poll_completion(struct request *rq, struct completion *wait) { do { - blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0); + bio_poll(rq->bio, 0); cond_resched(); } while (!completion_done(wait)); } diff --git a/block/blk-mq.c b/block/blk-mq.c index a34ffcf861c3..0860f622099f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -65,6 +65,9 @@ static int blk_mq_poll_stats_bkt(const struct request *rq) return bucket; } +#define BLK_QC_T_SHIFT 16 +#define BLK_QC_T_INTERNAL (1U << 31) + static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, blk_qc_t qc) { @@ -81,6 +84,13 @@ static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, return blk_mq_tag_to_rq(hctx->tags, tag); } +static inline blk_qc_t blk_rq_to_qc(struct request *rq) +{ + return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) | + (rq->tag != -1 ? + rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL)); +} + /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. @@ -819,6 +829,8 @@ void blk_mq_start_request(struct request *rq) if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) q->integrity.profile->prepare_fn(rq); #endif + if (rq->bio && rq->bio->bi_opf & REQ_POLLED) + WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq)); } EXPORT_SYMBOL(blk_mq_start_request); @@ -2045,19 +2057,15 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, } static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, - struct request *rq, - blk_qc_t *cookie, bool last) + struct request *rq, bool last) { struct request_queue *q = rq->q; struct blk_mq_queue_data bd = { .rq = rq, .last = last, }; - blk_qc_t new_cookie; blk_status_t ret; - new_cookie = request_to_qc_t(hctx, rq); - /* * For OK queue, we are done. For error, caller may kill it. * Any other error (busy), just add it to our list as we @@ -2067,7 +2075,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, switch (ret) { case BLK_STS_OK: blk_mq_update_dispatch_busy(hctx, false); - *cookie = new_cookie; break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: @@ -2076,7 +2083,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, break; default: blk_mq_update_dispatch_busy(hctx, false); - *cookie = BLK_QC_T_NONE; break; } @@ -2085,7 +2091,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, - blk_qc_t *cookie, bool bypass_insert, bool last) { struct request_queue *q = rq->q; @@ -2119,7 +2124,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, goto insert; } - return __blk_mq_issue_directly(hctx, rq, cookie, last); + return __blk_mq_issue_directly(hctx, rq, last); insert: if (bypass_insert) return BLK_STS_RESOURCE; @@ -2133,7 +2138,6 @@ insert: * blk_mq_try_issue_directly - Try to send a request directly to device driver. * @hctx: Pointer of the associated hardware queue. * @rq: Pointer to request to be sent. - * @cookie: Request queue cookie. * * If the device has enough resources to accept a new request now, send the * request directly to device driver. Else, insert at hctx->dispatch queue, so @@ -2141,7 +2145,7 @@ insert: * queue have higher priority. */ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, - struct request *rq, blk_qc_t *cookie) + struct request *rq) { blk_status_t ret; int srcu_idx; @@ -2150,7 +2154,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, hctx_lock(hctx, &srcu_idx); - ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); + ret = __blk_mq_try_issue_directly(hctx, rq, false, true); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) blk_mq_request_bypass_insert(rq, false, true); else if (ret != BLK_STS_OK) @@ -2163,11 +2167,10 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { blk_status_t ret; int srcu_idx; - blk_qc_t unused_cookie; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx_lock(hctx, &srcu_idx); - ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); + ret = __blk_mq_try_issue_directly(hctx, rq, true, last); hctx_unlock(hctx, srcu_idx); return ret; @@ -2247,10 +2250,8 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) * * It will not queue the request if there is an error with the bio, or at the * request creation. - * - * Returns: Request queue cookie. */ -blk_qc_t blk_mq_submit_bio(struct bio *bio) +void blk_mq_submit_bio(struct bio *bio) { struct request_queue *q = bio->bi_bdev->bd_disk->queue; const int is_sync = op_is_sync(bio->bi_opf); @@ -2259,9 +2260,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) struct blk_plug *plug; struct request *same_queue_rq = NULL; unsigned int nr_segs; - blk_qc_t cookie; blk_status_t ret; - bool hipri; blk_queue_bounce(q, &bio); __blk_queue_split(&bio, &nr_segs); @@ -2278,8 +2277,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq_qos_throttle(q, bio); - hipri = bio->bi_opf & REQ_POLLED; - plug = blk_mq_plug(q, bio); if (plug && plug->cached_rq) { rq = plug->cached_rq; @@ -2310,8 +2307,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq_qos_track(q, rq, bio); - cookie = request_to_qc_t(rq->mq_hctx, rq); - blk_mq_bio_to_request(rq, bio, nr_segs); ret = blk_crypto_init_request(rq); @@ -2319,7 +2314,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) bio->bi_status = ret; bio_endio(bio); blk_mq_free_request(rq); - return BLK_QC_T_NONE; + return; } if (unlikely(is_flush_fua)) { @@ -2375,7 +2370,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) if (same_queue_rq) { trace_block_unplug(q, 1, true); blk_mq_try_issue_directly(same_queue_rq->mq_hctx, - same_queue_rq, &cookie); + same_queue_rq); } } else if ((q->nr_hw_queues > 1 && is_sync) || !rq->mq_hctx->dispatch_busy) { @@ -2383,18 +2378,15 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) * There is no scheduler and we can try to send directly * to the hardware. */ - blk_mq_try_issue_directly(rq->mq_hctx, rq, &cookie); + blk_mq_try_issue_directly(rq->mq_hctx, rq); } else { /* Default case. */ blk_mq_sched_insert_request(rq, false, true, true); } - if (!hipri) - return BLK_QC_T_NONE; - return cookie; + return; queue_exit: blk_queue_exit(q); - return BLK_QC_T_NONE; } static size_t order_to_size(unsigned int order) @@ -4084,25 +4076,8 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, return 0; } -/** - * blk_poll - poll for IO completions - * @q: the queue - * @cookie: cookie passed back at IO submission time - * @flags: BLK_POLL_* flags that control the behavior - * - * Description: - * Poll for completions on the passed in queue. Returns number of - * completed entries found. - */ -int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) +int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) { - if (cookie == BLK_QC_T_NONE || - !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) - return 0; - - if (current->plug) - blk_flush_plug_list(current->plug, false); - if (!(flags & BLK_POLL_NOSLEEP) && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) @@ -4110,7 +4085,6 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) } return blk_mq_poll_classic(q, cookie, flags); } -EXPORT_SYMBOL_GPL(blk_poll); unsigned int blk_mq_rq_cpu(struct request *rq) { diff --git a/block/blk-mq.h b/block/blk-mq.h index a9fe01e14951..8be447995106 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -37,6 +37,8 @@ struct blk_mq_ctx { struct kobject kobj; } ____cacheline_aligned_in_smp; +void blk_mq_submit_bio(struct bio *bio); +int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); void blk_mq_exit_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); diff --git a/block/fops.c b/block/fops.c index db8f2fe68dd2..ce1255529ba2 100644 --- a/block/fops.c +++ b/block/fops.c @@ -61,7 +61,6 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, bool should_dirty = false; struct bio bio; ssize_t ret; - blk_qc_t qc; if ((pos | iov_iter_alignment(iter)) & (bdev_logical_block_size(bdev) - 1)) @@ -102,13 +101,12 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, if (iocb->ki_flags & IOCB_HIPRI) bio_set_polled(&bio, iocb); - qc = submit_bio(&bio); + submit_bio(&bio); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (!READ_ONCE(bio.bi_private)) break; - if (!(iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(bdev_get_queue(bdev), qc, 0)) + if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -141,14 +139,6 @@ struct blkdev_dio { static struct bio_set blkdev_dio_pool; -static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags) -{ - struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); - struct request_queue *q = bdev_get_queue(bdev); - - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); -} - static void blkdev_bio_end_io(struct bio *bio) { struct blkdev_dio *dio = bio->bi_private; @@ -162,6 +152,8 @@ static void blkdev_bio_end_io(struct bio *bio) struct kiocb *iocb = dio->iocb; ssize_t ret; + WRITE_ONCE(iocb->private, NULL); + if (likely(!dio->bio.bi_status)) { ret = dio->size; iocb->ki_pos += ret; @@ -200,7 +192,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bool do_poll = (iocb->ki_flags & IOCB_HIPRI); bool is_read = (iov_iter_rw(iter) == READ), is_sync; loff_t pos = iocb->ki_pos; - blk_qc_t qc = BLK_QC_T_NONE; int ret = 0; if ((pos | iov_iter_alignment(iter)) & @@ -262,9 +253,9 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!nr_pages) { if (do_poll) bio_set_polled(bio, iocb); - qc = submit_bio(bio); + submit_bio(bio); if (do_poll) - WRITE_ONCE(iocb->ki_cookie, qc); + WRITE_ONCE(iocb->private, bio); break; } if (!dio->multi_bio) { @@ -297,7 +288,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->waiter)) break; - if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0)) + if (!do_poll || !bio_poll(bio, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -594,7 +585,7 @@ const struct file_operations def_blk_fops = { .llseek = blkdev_llseek, .read_iter = blkdev_read_iter, .write_iter = blkdev_write_iter, - .iopoll = blkdev_iopoll, + .iopoll = iocb_bio_iopoll, .mmap = generic_file_mmap, .fsync = blkdev_fsync, .unlocked_ioctl = blkdev_ioctl, diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 530b31240203..aa0472718dce 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -282,7 +282,7 @@ out: return err; } -static blk_qc_t brd_submit_bio(struct bio *bio) +static void brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; sector_t sector = bio->bi_iter.bi_sector; @@ -299,16 +299,14 @@ static blk_qc_t brd_submit_bio(struct bio *bio) err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, bio_op(bio), sector); - if (err) - goto io_error; + if (err) { + bio_io_error(bio); + return; + } sector += len >> SECTOR_SHIFT; } bio_endio(bio); - return BLK_QC_T_NONE; -io_error: - bio_io_error(bio); - return BLK_QC_T_NONE; } static int brd_rw_page(struct block_device *bdev, sector_t sector, diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 5d9181382ce1..6674a0b88341 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1448,7 +1448,7 @@ extern void conn_free_crypto(struct drbd_connection *connection); /* drbd_req */ extern void do_submit(struct work_struct *ws); extern void __drbd_make_request(struct drbd_device *, struct bio *); -extern blk_qc_t drbd_submit_bio(struct bio *bio); +void drbd_submit_bio(struct bio *bio); extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); extern int is_valid_ar_handle(struct drbd_request *, sector_t); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 5ca233644d70..3235532ae077 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1596,7 +1596,7 @@ void do_submit(struct work_struct *ws) } } -blk_qc_t drbd_submit_bio(struct bio *bio) +void drbd_submit_bio(struct bio *bio) { struct drbd_device *device = bio->bi_bdev->bd_disk->private_data; @@ -1609,7 +1609,6 @@ blk_qc_t drbd_submit_bio(struct bio *bio) inc_ap_bio(device); __drbd_make_request(device, bio); - return BLK_QC_T_NONE; } static bool net_timeout_reached(struct drbd_request *net_req, diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c index 26798da661bd..b168ca25b6c9 100644 --- a/drivers/block/n64cart.c +++ b/drivers/block/n64cart.c @@ -84,7 +84,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos) return true; } -static blk_qc_t n64cart_submit_bio(struct bio *bio) +static void n64cart_submit_bio(struct bio *bio) { struct bio_vec bvec; struct bvec_iter iter; @@ -92,16 +92,14 @@ static blk_qc_t n64cart_submit_bio(struct bio *bio) u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT; bio_for_each_segment(bvec, bio, iter) { - if (!n64cart_do_bvec(dev, &bvec, pos)) - goto io_error; + if (!n64cart_do_bvec(dev, &bvec, pos)) { + bio_io_error(bio); + return; + } pos += bvec.bv_len; } bio_endio(bio); - return BLK_QC_T_NONE; -io_error: - bio_io_error(bio); - return BLK_QC_T_NONE; } static const struct block_device_operations n64cart_fops = { diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 187d779c8ca0..e5cbcf582233 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1422,7 +1422,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb) return &nullb->queues[index]; } -static blk_qc_t null_submit_bio(struct bio *bio) +static void null_submit_bio(struct bio *bio) { sector_t sector = bio->bi_iter.bi_sector; sector_t nr_sectors = bio_sectors(bio); @@ -1434,7 +1434,6 @@ static blk_qc_t null_submit_bio(struct bio *bio) cmd->bio = bio; null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio)); - return BLK_QC_T_NONE; } static bool should_timeout_request(struct request *rq) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 0f26b2510a75..cb52cce6fb03 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2400,7 +2400,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) } } -static blk_qc_t pkt_submit_bio(struct bio *bio) +static void pkt_submit_bio(struct bio *bio) { struct pktcdvd_device *pd; char b[BDEVNAME_SIZE]; @@ -2423,7 +2423,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio) */ if (bio_data_dir(bio) == READ) { pkt_make_request_read(pd, bio); - return BLK_QC_T_NONE; + return; } if (!test_bit(PACKET_WRITABLE, &pd->flags)) { @@ -2455,10 +2455,9 @@ static blk_qc_t pkt_submit_bio(struct bio *bio) pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split); } while (split != bio); - return BLK_QC_T_NONE; + return; end_io: bio_io_error(bio); - return BLK_QC_T_NONE; } static void pkt_init_queue(struct pktcdvd_device *pd) diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index c7b19e128b03..d1ebf193cb9a 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -578,7 +578,7 @@ out: return next; } -static blk_qc_t ps3vram_submit_bio(struct bio *bio) +static void ps3vram_submit_bio(struct bio *bio) { struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data; struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); @@ -594,13 +594,11 @@ static blk_qc_t ps3vram_submit_bio(struct bio *bio) spin_unlock_irq(&priv->lock); if (busy) - return BLK_QC_T_NONE; + return; do { bio = ps3vram_do_bio(dev, bio); } while (bio); - - return BLK_QC_T_NONE; } static const struct block_device_operations ps3vram_fops = { diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 1cc40b0ea761..268252380e88 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -50,7 +50,7 @@ struct rsxx_bio_meta { static struct kmem_cache *bio_meta_pool; -static blk_qc_t rsxx_submit_bio(struct bio *bio); +static void rsxx_submit_bio(struct bio *bio); /*----------------- Block Device Operations -----------------*/ static int rsxx_blkdev_ioctl(struct block_device *bdev, @@ -120,7 +120,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card, } } -static blk_qc_t rsxx_submit_bio(struct bio *bio) +static void rsxx_submit_bio(struct bio *bio) { struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data; struct rsxx_bio_meta *bio_meta; @@ -169,7 +169,7 @@ static blk_qc_t rsxx_submit_bio(struct bio *bio) if (st) goto queue_err; - return BLK_QC_T_NONE; + return; queue_err: kmem_cache_free(bio_meta_pool, bio_meta); @@ -177,7 +177,6 @@ req_err: if (st) bio->bi_status = st; bio_endio(bio); - return BLK_QC_T_NONE; } /*----------------- Device Setup -------------------*/ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index fcaf2750f68f..a68297fb51a2 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1598,22 +1598,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) /* * Handler function for all zram I/O requests. */ -static blk_qc_t zram_submit_bio(struct bio *bio) +static void zram_submit_bio(struct bio *bio) { struct zram *zram = bio->bi_bdev->bd_disk->private_data; if (!valid_io_request(zram, bio->bi_iter.bi_sector, bio->bi_iter.bi_size)) { atomic64_inc(&zram->stats.invalid_io); - goto error; + bio_io_error(bio); + return; } __zram_make_request(zram, bio); - return BLK_QC_T_NONE; - -error: - bio_io_error(bio); - return BLK_QC_T_NONE; } static void zram_slot_free_notify(struct block_device *bdev, diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 6d1de889baeb..23b28edae90f 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1163,7 +1163,7 @@ static void quit_max_writeback_rate(struct cache_set *c, /* Cached devices - read & write stuff */ -blk_qc_t cached_dev_submit_bio(struct bio *bio) +void cached_dev_submit_bio(struct bio *bio) { struct search *s; struct block_device *orig_bdev = bio->bi_bdev; @@ -1176,7 +1176,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio) dc->io_disable)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); - return BLK_QC_T_NONE; + return; } if (likely(d->c)) { @@ -1222,8 +1222,6 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio) } else /* I/O request sent to backing device */ detached_dev_do_request(d, bio, orig_bdev, start_time); - - return BLK_QC_T_NONE; } static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, @@ -1273,7 +1271,7 @@ static void flash_dev_nodata(struct closure *cl) continue_at(cl, search_free, NULL); } -blk_qc_t flash_dev_submit_bio(struct bio *bio) +void flash_dev_submit_bio(struct bio *bio) { struct search *s; struct closure *cl; @@ -1282,7 +1280,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio) if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); - return BLK_QC_T_NONE; + return; } s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio)); @@ -1298,7 +1296,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio) continue_at_nobarrier(&s->cl, flash_dev_nodata, bcache_wq); - return BLK_QC_T_NONE; + return; } else if (bio_data_dir(bio)) { bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &KEY(d->id, bio->bi_iter.bi_sector, 0), @@ -1314,7 +1312,6 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio) } continue_at(cl, search_free, NULL); - return BLK_QC_T_NONE; } static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 82b38366a95d..38ab4856eaab 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c); void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); -blk_qc_t cached_dev_submit_bio(struct bio *bio); +void cached_dev_submit_bio(struct bio *bio); void bch_flash_dev_request_init(struct bcache_device *d); -blk_qc_t flash_dev_submit_bio(struct bio *bio); +void flash_dev_submit_bio(struct bio *bio); extern struct kmem_cache *bch_search_cache; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 76d9da49fda7..7870e6460633 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1183,14 +1183,13 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) mutex_unlock(&md->swap_bios_lock); } -static blk_qc_t __map_bio(struct dm_target_io *tio) +static void __map_bio(struct dm_target_io *tio) { int r; sector_t sector; struct bio *clone = &tio->clone; struct dm_io *io = tio->io; struct dm_target *ti = tio->ti; - blk_qc_t ret = BLK_QC_T_NONE; clone->bi_end_io = clone_endio; @@ -1226,7 +1225,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) case DM_MAPIO_REMAPPED: /* the bio has been remapped so dispatch it */ trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector); - ret = submit_bio_noacct(clone); + submit_bio_noacct(clone); break; case DM_MAPIO_KILL: if (unlikely(swap_bios_limit(ti, clone))) { @@ -1248,8 +1247,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) DMWARN("unimplemented target map return value: %d", r); BUG(); } - - return ret; } static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) @@ -1336,7 +1333,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, } } -static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, +static void __clone_and_map_simple_bio(struct clone_info *ci, struct dm_target_io *tio, unsigned *len) { struct bio *clone = &tio->clone; @@ -1346,8 +1343,7 @@ static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, __bio_clone_fast(clone, ci->bio); if (len) bio_setup_sector(clone, ci->sector, *len); - - return __map_bio(tio); + __map_bio(tio); } static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, @@ -1361,7 +1357,7 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, while ((bio = bio_list_pop(&blist))) { tio = container_of(bio, struct dm_target_io, clone); - (void) __clone_and_map_simple_bio(ci, tio, len); + __clone_and_map_simple_bio(ci, tio, len); } } @@ -1405,7 +1401,7 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, free_tio(tio); return r; } - (void) __map_bio(tio); + __map_bio(tio); return 0; } @@ -1520,11 +1516,10 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md, /* * Entry point to split a bio into clones and submit them to the targets. */ -static blk_qc_t __split_and_process_bio(struct mapped_device *md, +static void __split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { struct clone_info ci; - blk_qc_t ret = BLK_QC_T_NONE; int error = 0; init_clone_info(&ci, md, map, bio); @@ -1567,19 +1562,17 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, bio_chain(b, bio); trace_block_split(b, bio->bi_iter.bi_sector); - ret = submit_bio_noacct(bio); + submit_bio_noacct(bio); } } /* drop the extra reference count */ dm_io_dec_pending(ci.io, errno_to_blk_status(error)); - return ret; } -static blk_qc_t dm_submit_bio(struct bio *bio) +static void dm_submit_bio(struct bio *bio) { struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; - blk_qc_t ret = BLK_QC_T_NONE; int srcu_idx; struct dm_table *map; @@ -1609,10 +1602,9 @@ static blk_qc_t dm_submit_bio(struct bio *bio) if (is_abnormal_io(bio)) blk_queue_split(&bio); - ret = __split_and_process_bio(md, map, bio); + __split_and_process_bio(md, map, bio); out: dm_put_live_table(md, srcu_idx); - return ret; } /*----------------------------------------------------------------- diff --git a/drivers/md/md.c b/drivers/md/md.c index ec09083ff0ef..22310d5d8d41 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -443,19 +443,19 @@ check_suspended: } EXPORT_SYMBOL(md_handle_request); -static blk_qc_t md_submit_bio(struct bio *bio) +static void md_submit_bio(struct bio *bio) { const int rw = bio_data_dir(bio); struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; if (mddev == NULL || mddev->pers == NULL) { bio_io_error(bio); - return BLK_QC_T_NONE; + return; } if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { bio_io_error(bio); - return BLK_QC_T_NONE; + return; } blk_queue_split(&bio); @@ -464,15 +464,13 @@ static blk_qc_t md_submit_bio(struct bio *bio) if (bio_sectors(bio) != 0) bio->bi_status = BLK_STS_IOERR; bio_endio(bio); - return BLK_QC_T_NONE; + return; } /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; md_handle_request(mddev, bio); - - return BLK_QC_T_NONE; } /* mddev_suspend makes sure no new requests are submitted diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 088d3dd6f6fa..b6c6866f9259 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -162,7 +162,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk, return err; } -static blk_qc_t nd_blk_submit_bio(struct bio *bio) +static void nd_blk_submit_bio(struct bio *bio) { struct bio_integrity_payload *bip; struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data; @@ -173,7 +173,7 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio) bool do_acct; if (!bio_integrity_prep(bio)) - return BLK_QC_T_NONE; + return; bip = bio_integrity(bio); rw = bio_data_dir(bio); @@ -199,7 +199,6 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio) bio_end_io_acct(bio, start); bio_endio(bio); - return BLK_QC_T_NONE; } static int nsblk_rw_bytes(struct nd_namespace_common *ndns, diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 92dec4952297..4295fa809420 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1440,7 +1440,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, return ret; } -static blk_qc_t btt_submit_bio(struct bio *bio) +static void btt_submit_bio(struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); struct btt *btt = bio->bi_bdev->bd_disk->private_data; @@ -1451,7 +1451,7 @@ static blk_qc_t btt_submit_bio(struct bio *bio) bool do_acct; if (!bio_integrity_prep(bio)) - return BLK_QC_T_NONE; + return; do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); if (do_acct) @@ -1483,7 +1483,6 @@ static blk_qc_t btt_submit_bio(struct bio *bio) bio_end_io_acct(bio, start); bio_endio(bio); - return BLK_QC_T_NONE; } static int btt_rw_page(struct block_device *bdev, sector_t sector, diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index ef4950f80832..a67a3ad1d413 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -190,7 +190,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem, return rc; } -static blk_qc_t pmem_submit_bio(struct bio *bio) +static void pmem_submit_bio(struct bio *bio) { int ret = 0; blk_status_t rc = 0; @@ -229,7 +229,6 @@ static blk_qc_t pmem_submit_bio(struct bio *bio) bio->bi_status = errno_to_blk_status(ret); bio_endio(bio); - return BLK_QC_T_NONE; } static int pmem_rw_page(struct block_device *bdev, sector_t sector, diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index fba06618c6c2..ab78aa5d28c6 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -312,12 +312,11 @@ static bool nvme_available_path(struct nvme_ns_head *head) return false; } -static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) +static void nvme_ns_head_submit_bio(struct bio *bio) { struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; struct device *dev = disk_to_dev(head->disk); struct nvme_ns *ns; - blk_qc_t ret = BLK_QC_T_NONE; int srcu_idx; /* @@ -334,7 +333,7 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) bio->bi_opf |= REQ_NVME_MPATH; trace_block_bio_remap(bio, disk_devt(ns->head->disk), bio->bi_iter.bi_sector); - ret = submit_bio_noacct(bio); + submit_bio_noacct(bio); } else if (nvme_available_path(head)) { dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); @@ -349,7 +348,6 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) } srcu_read_unlock(&head->srcu, srcu_idx); - return ret; } static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 5be3d1c39a78..59e513d34b0f 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -30,7 +30,7 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode); -static blk_qc_t dcssblk_submit_bio(struct bio *bio); +static void dcssblk_submit_bio(struct bio *bio); static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); @@ -854,7 +854,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) up_write(&dcssblk_devices_sem); } -static blk_qc_t +static void dcssblk_submit_bio(struct bio *bio) { struct dcssblk_dev_info *dev_info; @@ -907,10 +907,9 @@ dcssblk_submit_bio(struct bio *bio) bytes_done += bvec.bv_len; } bio_endio(bio); - return BLK_QC_T_NONE; + return; fail: bio_io_error(bio); - return BLK_QC_T_NONE; } static long diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4a9077c52444..04090ba0ef73 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8248,7 +8248,7 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, return dip; } -static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter, +static void btrfs_submit_direct(const struct iomap_iter *iter, struct bio *dio_bio, loff_t file_offset) { struct inode *inode = iter->inode; @@ -8278,7 +8278,7 @@ static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter, } dio_bio->bi_status = BLK_STS_RESOURCE; bio_endio(dio_bio); - return BLK_QC_T_NONE; + return; } if (!write) { @@ -8372,15 +8372,13 @@ static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter, free_extent_map(em); } while (submit_len > 0); - return BLK_QC_T_NONE; + return; out_err_em: free_extent_map(em); out_err: dip->dio_bio->bi_status = status; btrfs_dio_private_put(dip); - - return BLK_QC_T_NONE; } const struct iomap_ops btrfs_dio_iomap_ops = { diff --git a/fs/ext4/file.c b/fs/ext4/file.c index ac0e11bbb445..9c5559faacda 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -915,7 +915,7 @@ const struct file_operations ext4_file_operations = { .llseek = ext4_llseek, .read_iter = ext4_file_read_iter, .write_iter = ext4_file_write_iter, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = ext4_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index c559827cb6f9..635f0e3f10ec 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1353,7 +1353,7 @@ const struct file_operations gfs2_file_fops = { .llseek = gfs2_llseek, .read_iter = gfs2_file_read_iter, .write_iter = gfs2_file_write_iter, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = gfs2_ioctl, .compat_ioctl = gfs2_compat_ioctl, .mmap = gfs2_mmap, @@ -1386,7 +1386,7 @@ const struct file_operations gfs2_file_fops_nolock = { .llseek = gfs2_llseek, .read_iter = gfs2_file_read_iter, .write_iter = gfs2_file_write_iter, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = gfs2_ioctl, .compat_ioctl = gfs2_compat_ioctl, .mmap = gfs2_mmap, diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 236aba256cd1..8efab177011d 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -38,8 +38,7 @@ struct iomap_dio { struct { struct iov_iter *iter; struct task_struct *waiter; - struct request_queue *last_queue; - blk_qc_t cookie; + struct bio *poll_bio; } submit; /* used for aio completion: */ @@ -49,29 +48,20 @@ struct iomap_dio { }; }; -int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags) -{ - struct request_queue *q = READ_ONCE(kiocb->private); - - if (!q) - return 0; - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); -} -EXPORT_SYMBOL_GPL(iomap_dio_iopoll); - static void iomap_dio_submit_bio(const struct iomap_iter *iter, struct iomap_dio *dio, struct bio *bio, loff_t pos) { atomic_inc(&dio->ref); - if (dio->iocb->ki_flags & IOCB_HIPRI) + if (dio->iocb->ki_flags & IOCB_HIPRI) { bio_set_polled(bio, dio->iocb); + dio->submit.poll_bio = bio; + } - dio->submit.last_queue = bdev_get_queue(iter->iomap.bdev); if (dio->dops && dio->dops->submit_io) - dio->submit.cookie = dio->dops->submit_io(iter, bio, pos); + dio->dops->submit_io(iter, bio, pos); else - dio->submit.cookie = submit_bio(bio); + submit_bio(bio); } ssize_t iomap_dio_complete(struct iomap_dio *dio) @@ -164,9 +154,11 @@ static void iomap_dio_bio_end_io(struct bio *bio) } else if (dio->flags & IOMAP_DIO_WRITE) { struct inode *inode = file_inode(dio->iocb->ki_filp); + WRITE_ONCE(dio->iocb->private, NULL); INIT_WORK(&dio->aio.work, iomap_dio_complete_work); queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); } else { + WRITE_ONCE(dio->iocb->private, NULL); iomap_dio_complete_work(&dio->aio.work); } } @@ -497,8 +489,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, dio->submit.iter = iter; dio->submit.waiter = current; - dio->submit.cookie = BLK_QC_T_NONE; - dio->submit.last_queue = NULL; + dio->submit.poll_bio = NULL; if (iov_iter_rw(iter) == READ) { if (iomi.pos >= dio->i_size) @@ -611,8 +602,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (dio->flags & IOMAP_DIO_WRITE_FUA) dio->flags &= ~IOMAP_DIO_NEED_SYNC; - WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie); - WRITE_ONCE(iocb->private, dio->submit.last_queue); + WRITE_ONCE(iocb->private, dio->submit.poll_bio); /* * We are about to drop our additional submission reference, which @@ -639,10 +629,8 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->submit.waiter)) break; - if (!(iocb->ki_flags & IOCB_HIPRI) || - !dio->submit.last_queue || - !blk_poll(dio->submit.last_queue, - dio->submit.cookie, 0)) + if (!dio->submit.poll_bio || + !bio_poll(dio->submit.poll_bio, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 7aa943edfc02..62e7fbe4e54c 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1452,7 +1452,7 @@ const struct file_operations xfs_file_operations = { .write_iter = xfs_file_write_iter, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = xfs_file_compat_ioctl, diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index ddc346a9df9b..3ce5f47338cb 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -1128,7 +1128,7 @@ static const struct file_operations zonefs_file_operations = { .write_iter = zonefs_file_write_iter, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, }; static struct kmem_cache *zonefs_inode_cachep; diff --git a/include/linux/bio.h b/include/linux/bio.h index c7a2d880e927..62d684b7dd4c 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -349,7 +349,7 @@ static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs) return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); } -extern blk_qc_t submit_bio(struct bio *); +void submit_bio(struct bio *bio); extern void bio_endio(struct bio *); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2219e9277118..a9c1d0882550 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -359,9 +359,9 @@ struct blk_mq_hw_ctx { /** @kobj: Kernel object for sysfs. */ struct kobject kobj; - /** @poll_considered: Count times blk_poll() was called. */ + /** @poll_considered: Count times blk_mq_poll() was called. */ unsigned long poll_considered; - /** @poll_invoked: Count how many requests blk_poll() polled. */ + /** @poll_invoked: Count how many requests blk_mq_poll() polled. */ unsigned long poll_invoked; /** @poll_success: Count how many polled requests were completed. */ unsigned long poll_success; @@ -815,16 +815,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) for ((i) = 0; (i) < (hctx)->nr_ctx && \ ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) -static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, - struct request *rq) -{ - if (rq->tag != -1) - return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); - - return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | - BLK_QC_T_INTERNAL; -} - static inline void blk_mq_cleanup_rq(struct request *rq) { if (rq->q->mq_ops->cleanup_rq) @@ -843,7 +833,6 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, rq->rq_disk = bio->bi_bdev->bd_disk; } -blk_qc_t blk_mq_submit_bio(struct bio *bio); void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, struct lock_class_key *key); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index f8b9fce68834..72736b4c057c 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -208,6 +208,9 @@ static inline void bio_issue_init(struct bio_issue *issue, ((u64)size << BIO_ISSUE_SIZE_SHIFT)); } +typedef unsigned int blk_qc_t; +#define BLK_QC_T_NONE -1U + /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) @@ -227,8 +230,8 @@ struct bio { struct bvec_iter bi_iter; + blk_qc_t bi_cookie; bio_end_io_t *bi_end_io; - void *bi_private; #ifdef CONFIG_BLK_CGROUP /* @@ -384,7 +387,7 @@ enum req_flag_bits { /* command specific flags for REQ_OP_WRITE_ZEROES: */ __REQ_NOUNMAP, /* do not free blocks when zeroing */ - __REQ_POLLED, /* caller polls for completion using blk_poll */ + __REQ_POLLED, /* caller polls for completion using bio_poll */ /* for driver use */ __REQ_DRV, @@ -495,11 +498,6 @@ static inline int op_stat_group(unsigned int op) return op_is_write(op); } -typedef unsigned int blk_qc_t; -#define BLK_QC_T_NONE -1U -#define BLK_QC_T_SHIFT 16 -#define BLK_QC_T_INTERNAL (1U << 31) - struct blk_rq_stat { u64 mean; u64 min; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2b80c98fc373..2a8689e949b4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -25,6 +25,7 @@ struct request; struct sg_io_hdr; struct blkcg_gq; struct blk_flush_queue; +struct kiocb; struct pr_ops; struct rq_qos; struct blk_queue_stats; @@ -550,7 +551,7 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); -blk_qc_t submit_bio_noacct(struct bio *bio); +void submit_bio_noacct(struct bio *bio); extern int blk_lld_busy(struct request_queue *q); extern void blk_queue_split(struct bio **); @@ -568,7 +569,8 @@ blk_status_t errno_to_blk_status(int errno); #define BLK_POLL_ONESHOT (1 << 0) /* do not sleep to wait for the expected completion time */ #define BLK_POLL_NOSLEEP (1 << 1) -int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); +int bio_poll(struct bio *bio, unsigned int flags); +int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { @@ -1176,7 +1178,7 @@ static inline void blk_ksm_unregister(struct request_queue *q) { } struct block_device_operations { - blk_qc_t (*submit_bio) (struct bio *bio); + void (*submit_bio)(struct bio *bio); int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); diff --git a/include/linux/fs.h b/include/linux/fs.h index c443cddf414f..f595f4097cb7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -334,11 +334,7 @@ struct kiocb { int ki_flags; u16 ki_hint; u16 ki_ioprio; /* See linux/ioprio.h */ - union { - unsigned int ki_cookie; /* for ->iopoll */ - struct wait_page_queue *ki_waitq; /* for async buffered IO */ - }; - + struct wait_page_queue *ki_waitq; /* for async buffered IO */ randomized_struct_fields_end }; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 1e86b65567c2..63f4ea4dac9b 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -313,8 +313,8 @@ int iomap_writepages(struct address_space *mapping, struct iomap_dio_ops { int (*end_io)(struct kiocb *iocb, ssize_t size, int error, unsigned flags); - blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio, - loff_t file_offset); + void (*submit_io)(const struct iomap_iter *iter, struct bio *bio, + loff_t file_offset); }; /* @@ -337,7 +337,6 @@ struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags); ssize_t iomap_dio_complete(struct iomap_dio *dio); -int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags); #ifdef CONFIG_SWAP struct file; diff --git a/mm/page_io.c b/mm/page_io.c index ed2eded74f3a..a68faab5b310 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -358,8 +358,6 @@ int swap_readpage(struct page *page, bool synchronous) struct bio *bio; int ret = 0; struct swap_info_struct *sis = page_swap_info(page); - blk_qc_t qc; - struct gendisk *disk; unsigned long pflags; VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page); @@ -409,8 +407,6 @@ int swap_readpage(struct page *page, bool synchronous) bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_end_io = end_swap_bio_read; bio_add_page(bio, page, thp_size(page), 0); - - disk = bio->bi_bdev->bd_disk; /* * Keep this task valid during swap readpage because the oom killer may * attempt to access it in the page fault retry time check. @@ -422,13 +418,13 @@ int swap_readpage(struct page *page, bool synchronous) } count_vm_event(PSWPIN); bio_get(bio); - qc = submit_bio(bio); + submit_bio(bio); while (synchronous) { set_current_state(TASK_UNINTERRUPTIBLE); if (!READ_ONCE(bio->bi_private)) break; - if (!blk_poll(disk->queue, qc, 0)) + if (!bio_poll(bio, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); From a614dd2280356df0c79300c49d82b7e0c8b31f24 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:25 +0200 Subject: [PATCH 080/174] block: don't allow writing to the poll queue attribute The poll attribute is a historic artefact from before when we had explicit poll queues that require driver specific configuration. Just print a warning when writing to the attribute. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-16-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index da883efcba33..36f14d658e81 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -433,26 +433,11 @@ static ssize_t queue_poll_show(struct request_queue *q, char *page) static ssize_t queue_poll_store(struct request_queue *q, const char *page, size_t count) { - unsigned long poll_on; - ssize_t ret; - - if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || - !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) + if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return -EINVAL; - - ret = queue_var_store(&poll_on, page, count); - if (ret < 0) - return ret; - - if (poll_on) { - blk_queue_flag_set(QUEUE_FLAG_POLL, q); - } else { - blk_mq_freeze_queue(q); - blk_queue_flag_clear(QUEUE_FLAG_POLL, q); - blk_mq_unfreeze_queue(q); - } - - return ret; + pr_info_ratelimited("writes to the poll attribute are ignored.\n"); + pr_info_ratelimited("please use driver specific parameters instead.\n"); + return count; } static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) From c712dccc64357b94f93e57882373e1365f0e2a56 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:26 +0200 Subject: [PATCH 081/174] nvme-multipath: enable polled I/O Set the poll queue flag to enable polling, given that the multipath node just dispatches the bios to a lower queue. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-17-hch@lst.de Signed-off-by: Jens Axboe --- drivers/nvme/host/multipath.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index ab78aa5d28c6..11440c86881e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -85,8 +85,13 @@ void nvme_failover_req(struct request *req) } spin_lock_irqsave(&ns->head->requeue_lock, flags); - for (bio = req->bio; bio; bio = bio->bi_next) + for (bio = req->bio; bio; bio = bio->bi_next) { bio_set_dev(bio, ns->head->disk->part0); + if (bio->bi_opf & REQ_POLLED) { + bio->bi_opf &= ~REQ_POLLED; + bio->bi_cookie = BLK_QC_T_NONE; + } + } blk_steal_bios(&ns->head->requeue_list, req); spin_unlock_irqrestore(&ns->head->requeue_lock, flags); @@ -477,6 +482,15 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue); blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); + /* + * This assumes all controllers that refer to a namespace either + * support poll queues or not. That is not a strict guarantee, + * but if the assumption is wrong the effect is only suboptimal + * performance but not correctness problem. + */ + if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL && + ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) + blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); /* set to a default value of 512 until the disk is validated */ blk_queue_logical_block_size(head->disk->queue, 512); From fac7c6d529acf2b5428ad08c1b1127e29e570790 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 13 Oct 2021 09:57:11 +0100 Subject: [PATCH 082/174] block: cache bdev in struct file for raw bdev IO bdev = &BDEV_I(file->f_mapping->host)->bdev Getting struct block_device from a file requires 2 memory dereferences as illustrated above, that takes a toll on performance, so cache it in yet unused file->private_data. That gives a noticeable peak performance improvement. Signed-off-by: Pavel Begunkov Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/8415f9fe12e544b9da89593dfbca8de2b52efe03.1634115360.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/fops.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/block/fops.c b/block/fops.c index ce1255529ba2..551b71af6d90 100644 --- a/block/fops.c +++ b/block/fops.c @@ -17,7 +17,7 @@ #include #include "blk.h" -static struct inode *bdev_file_inode(struct file *file) +static inline struct inode *bdev_file_inode(struct file *file) { return file->f_mapping->host; } @@ -54,8 +54,7 @@ static void blkdev_bio_end_io_simple(struct bio *bio) static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, unsigned int nr_pages) { - struct file *file = iocb->ki_filp; - struct block_device *bdev = I_BDEV(bdev_file_inode(file)); + struct block_device *bdev = iocb->ki_filp->private_data; struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; loff_t pos = iocb->ki_pos; bool should_dirty = false; @@ -183,9 +182,7 @@ static void blkdev_bio_end_io(struct bio *bio) static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, unsigned int nr_pages) { - struct file *file = iocb->ki_filp; - struct inode *inode = bdev_file_inode(file); - struct block_device *bdev = I_BDEV(inode); + struct block_device *bdev = iocb->ki_filp->private_data; struct blk_plug plug; struct blkdev_dio *dio; struct bio *bio; @@ -389,8 +386,7 @@ static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { - struct inode *bd_inode = bdev_file_inode(filp); - struct block_device *bdev = I_BDEV(bd_inode); + struct block_device *bdev = filp->private_data; int error; error = file_write_and_wait_range(filp, start, end); @@ -432,6 +428,8 @@ static int blkdev_open(struct inode *inode, struct file *filp) bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp); if (IS_ERR(bdev)) return PTR_ERR(bdev); + + filp->private_data = bdev; filp->f_mapping = bdev->bd_inode->i_mapping; filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); return 0; @@ -439,7 +437,7 @@ static int blkdev_open(struct inode *inode, struct file *filp) static int blkdev_close(struct inode *inode, struct file *filp) { - struct block_device *bdev = I_BDEV(bdev_file_inode(filp)); + struct block_device *bdev = filp->private_data; blkdev_put(bdev, filp->f_mode); return 0; @@ -454,14 +452,14 @@ static int blkdev_close(struct inode *inode, struct file *filp) */ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) { - struct file *file = iocb->ki_filp; - struct inode *bd_inode = bdev_file_inode(file); + struct block_device *bdev = iocb->ki_filp->private_data; + struct inode *bd_inode = bdev->bd_inode; loff_t size = i_size_read(bd_inode); struct blk_plug plug; size_t shorted = 0; ssize_t ret; - if (bdev_read_only(I_BDEV(bd_inode))) + if (bdev_read_only(bdev)) return -EPERM; if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) @@ -493,9 +491,8 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct file *file = iocb->ki_filp; - struct inode *bd_inode = bdev_file_inode(file); - loff_t size = i_size_read(bd_inode); + struct block_device *bdev = iocb->ki_filp->private_data; + loff_t size = i_size_read(bdev->bd_inode); loff_t pos = iocb->ki_pos; size_t shorted = 0; ssize_t ret; From 09ce8744253a038eb658c14f9dc3b77fa021fc9f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 Oct 2021 11:17:43 -0600 Subject: [PATCH 083/174] block: use flags instead of bit fields for blkdev_dio This generates a lot better code for me, and bumps performance from 7650K IOPS to 7750K IOPS. Looking at profiles for the run and running perf diff, it confirms that we're now sending a lot less time there: 6.38% -2.80% [kernel.vmlinux] [k] blkdev_direct_IO Taking it from the 2nd most cycle consumer to only the 9th most at 3.35% of the CPU time. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/fops.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/block/fops.c b/block/fops.c index 551b71af6d90..1d4f862950bb 100644 --- a/block/fops.c +++ b/block/fops.c @@ -123,6 +123,12 @@ out: return ret; } +enum { + DIO_MULTI_BIO = 1, + DIO_SHOULD_DIRTY = 2, + DIO_IS_SYNC = 4, +}; + struct blkdev_dio { union { struct kiocb *iocb; @@ -130,9 +136,7 @@ struct blkdev_dio { }; size_t size; atomic_t ref; - bool multi_bio : 1; - bool should_dirty : 1; - bool is_sync : 1; + unsigned int flags; struct bio bio; }; @@ -141,13 +145,13 @@ static struct bio_set blkdev_dio_pool; static void blkdev_bio_end_io(struct bio *bio) { struct blkdev_dio *dio = bio->bi_private; - bool should_dirty = dio->should_dirty; + bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; if (bio->bi_status && !dio->bio.bi_status) dio->bio.bi_status = bio->bi_status; - if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { - if (!dio->is_sync) { + if (!(dio->flags & DIO_MULTI_BIO) || atomic_dec_and_test(&dio->ref)) { + if (!(dio->flags & DIO_IS_SYNC)) { struct kiocb *iocb = dio->iocb; ssize_t ret; @@ -161,7 +165,7 @@ static void blkdev_bio_end_io(struct bio *bio) } dio->iocb->ki_complete(iocb, ret, 0); - if (dio->multi_bio) + if (dio->flags & DIO_MULTI_BIO) bio_put(&dio->bio); } else { struct task_struct *waiter = dio->waiter; @@ -198,17 +202,19 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); - dio->is_sync = is_sync = is_sync_kiocb(iocb); - if (dio->is_sync) { + is_sync = is_sync_kiocb(iocb); + if (is_sync) { + dio->flags = DIO_IS_SYNC; dio->waiter = current; bio_get(bio); } else { + dio->flags = 0; dio->iocb = iocb; } dio->size = 0; - dio->multi_bio = false; - dio->should_dirty = is_read && iter_is_iovec(iter); + if (is_read && iter_is_iovec(iter)) + dio->flags |= DIO_SHOULD_DIRTY; /* * Don't plug for HIPRI/polled IO, as those should go straight @@ -234,7 +240,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (is_read) { bio->bi_opf = REQ_OP_READ; - if (dio->should_dirty) + if (dio->flags & DIO_SHOULD_DIRTY) bio_set_pages_dirty(bio); } else { bio->bi_opf = dio_bio_write_op(iocb); @@ -255,7 +261,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, WRITE_ONCE(iocb->private, bio); break; } - if (!dio->multi_bio) { + if (!(dio->flags & DIO_MULTI_BIO)) { /* * AIO needs an extra reference to ensure the dio * structure which is embedded into the first bio @@ -263,7 +269,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, */ if (!is_sync) bio_get(bio); - dio->multi_bio = true; + dio->flags |= DIO_MULTI_BIO; atomic_set(&dio->ref, 2); do_poll = false; } else { From abd45c159df5fcb7ac820e2825dac85de7c01c21 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Oct 2021 12:43:41 -0600 Subject: [PATCH 084/174] block: handle fast path of bio splitting inline The fast path is no splitting needed. Separate the handling into a check part we can inline, and an out-of-line handling path if we do need to split. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-merge.c | 24 ++++++------------------ block/blk-mq.c | 5 +++-- block/blk.h | 27 ++++++++++++++++++++++++++- 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index f88d7863f997..ec727234ac48 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -324,6 +324,7 @@ split: /** * __blk_queue_split - split a bio and submit the second half + * @q: [in] request_queue new bio is being queued at * @bio: [in, out] bio to be split * @nr_segs: [out] number of segments in the first bio * @@ -334,9 +335,9 @@ split: * of the caller to ensure that q->bio_split is only released after processing * of the split bio has finished. */ -void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) +void __blk_queue_split(struct request_queue *q, struct bio **bio, + unsigned int *nr_segs) { - struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue; struct bio *split = NULL; switch (bio_op(*bio)) { @@ -353,21 +354,6 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) nr_segs); break; default: - /* - * All drivers must accept single-segments bios that are <= - * PAGE_SIZE. This is a quick and dirty check that relies on - * the fact that bi_io_vec[0] is always valid if a bio has data. - * The check might lead to occasional false negatives when bios - * are cloned, but compared to the performance impact of cloned - * bios themselves the loop below doesn't matter anyway. - */ - if (!q->limits.chunk_sectors && - (*bio)->bi_vcnt == 1 && - ((*bio)->bi_io_vec[0].bv_len + - (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) { - *nr_segs = 1; - break; - } split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); break; } @@ -397,9 +383,11 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) */ void blk_queue_split(struct bio **bio) { + struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue; unsigned int nr_segs; - __blk_queue_split(bio, &nr_segs); + if (blk_may_split(q, *bio)) + __blk_queue_split(q, bio, &nr_segs); } EXPORT_SYMBOL(blk_queue_split); diff --git a/block/blk-mq.c b/block/blk-mq.c index 0860f622099f..09219080855f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2259,11 +2259,12 @@ void blk_mq_submit_bio(struct bio *bio) struct request *rq; struct blk_plug *plug; struct request *same_queue_rq = NULL; - unsigned int nr_segs; + unsigned int nr_segs = 1; blk_status_t ret; blk_queue_bounce(q, &bio); - __blk_queue_split(&bio, &nr_segs); + if (blk_may_split(q, bio)) + __blk_queue_split(q, &bio, &nr_segs); if (!bio_integrity_prep(bio)) goto queue_exit; diff --git a/block/blk.h b/block/blk.h index fa05d3f07976..447a2defe2c8 100644 --- a/block/blk.h +++ b/block/blk.h @@ -266,7 +266,32 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); -void __blk_queue_split(struct bio **bio, unsigned int *nr_segs); +static inline bool blk_may_split(struct request_queue *q, struct bio *bio) +{ + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: + case REQ_OP_WRITE_SAME: + return true; /* non-trivial splitting decisions */ + default: + break; + } + + /* + * All drivers must accept single-segments bios that are <= PAGE_SIZE. + * This is a quick and dirty check that relies on the fact that + * bi_io_vec[0] is always valid if a bio has data. The check might + * lead to occasional false negatives when bios are cloned, but compared + * to the performance impact of cloned bios themselves the loop below + * doesn't matter anyway. + */ + return q->limits.chunk_sectors || bio->bi_vcnt != 1 || + bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; +} + +void __blk_queue_split(struct request_queue *q, struct bio **bio, + unsigned int *nr_segs); int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs); bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, From 17220ca5ce9606c1b015c4316fca18734c2df0bb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:26 +0100 Subject: [PATCH 085/174] block: cache request queue in bdev There are tons of places where we need to get a request_queue only having bdev, which turns into bdev->bd_disk->queue. There are probably a hundred of such places considering inline helpers, and enough of them are in hot paths. Cache queue pointer in struct block_device and make use of it in bdev_get_queue(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a3bfaecdd28956f03629d0ca5c63ebc096e1c809.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bdev.c | 1 + block/genhd.c | 4 +++- include/linux/blk_types.h | 1 + include/linux/blkdev.h | 2 +- 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/block/bdev.c b/block/bdev.c index 93b1188d7e58..fed8d0c041c7 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -493,6 +493,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) spin_lock_init(&bdev->bd_size_lock); bdev->bd_partno = partno; bdev->bd_inode = inode; + bdev->bd_queue = disk->queue; bdev->bd_stats = alloc_percpu(struct disk_stats); if (!bdev->bd_stats) { iput(inode); diff --git a/block/genhd.c b/block/genhd.c index ffbdb9b24555..d148c38450d7 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1267,6 +1267,9 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, if (!disk->bdi) goto out_free_disk; + /* bdev_alloc() might need the queue, set before the first call */ + disk->queue = q; + disk->part0 = bdev_alloc(disk, 0); if (!disk->part0) goto out_free_bdi; @@ -1282,7 +1285,6 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, disk_to_dev(disk)->type = &disk_type; device_initialize(disk_to_dev(disk)); inc_diskseq(disk); - disk->queue = q; q->disk = disk; lockdep_init_map(&disk->lockdep_map, "(bio completion)", lkclass, 0); #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 72736b4c057c..1e370929c89e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -38,6 +38,7 @@ struct block_device { u8 bd_partno; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ struct gendisk * bd_disk; + struct request_queue * bd_queue; /* The counter of freeze processes */ int bd_fsfreeze_count; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2a8689e949b4..d5b21fc8f49e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -574,7 +574,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { - return bdev->bd_disk->queue; /* this is never NULL */ + return bdev->bd_queue; /* this is never NULL */ } /* From 025a38651ba6178a709ecf351ca90d11fa2908cd Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:27 +0100 Subject: [PATCH 086/174] block: use bdev_get_queue() in bdev.c Convert bdev->bd_disk->queue to bdev_get_queue(), it's uses a cached queue pointer and so is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a352936ce5d9ac719645b1e29b173d931ebcdc02.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bdev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/block/bdev.c b/block/bdev.c index fed8d0c041c7..cff0bb3a4578 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -327,12 +327,12 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, if (!ops->rw_page || bdev_get_integrity(bdev)) return result; - result = blk_queue_enter(bdev->bd_disk->queue, 0); + result = blk_queue_enter(bdev_get_queue(bdev), 0); if (result) return result; result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, REQ_OP_READ); - blk_queue_exit(bdev->bd_disk->queue); + blk_queue_exit(bdev_get_queue(bdev)); return result; } @@ -363,7 +363,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, if (!ops->rw_page || bdev_get_integrity(bdev)) return -EOPNOTSUPP; - result = blk_queue_enter(bdev->bd_disk->queue, 0); + result = blk_queue_enter(bdev_get_queue(bdev), 0); if (result) return result; @@ -376,7 +376,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, clean_page_buffers(page); unlock_page(page); } - blk_queue_exit(bdev->bd_disk->queue); + blk_queue_exit(bdev_get_queue(bdev)); return result; } From 3caee4634be68e755d2fb130962f1623661dbd5b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:28 +0100 Subject: [PATCH 087/174] block: use bdev_get_queue() in bio.c Convert bdev->bd_disk->queue to bdev_get_queue(), it's uses a cached queue pointer and so is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/85c36ea784d285a5075baa10049e6b59e15fb484.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bio.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/block/bio.c b/block/bio.c index a3c9ff23a036..5fb8092577bf 100644 --- a/block/bio.c +++ b/block/bio.c @@ -965,7 +965,7 @@ EXPORT_SYMBOL(bio_add_pc_page); int bio_add_zone_append_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); bool same_page = false; if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) @@ -1070,7 +1070,7 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct iov_iter i = *iter; iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9); @@ -1148,7 +1148,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) { unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); unsigned int max_append_sectors = queue_max_zone_append_sectors(q); struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct page **pages = (struct page **)bv; @@ -1485,10 +1485,10 @@ again: return; if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED)) - rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio); + rq_qos_done_bio(bdev_get_queue(bio->bi_bdev), bio); if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { - trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio); + trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); bio_clear_flag(bio, BIO_TRACE_COMPLETION); } From eab4e02733699cdf76fbe5e542d248c28454b3af Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:29 +0100 Subject: [PATCH 088/174] block: use bdev_get_queue() in blk-core.c Convert bdev->bd_disk->queue to bdev_get_queue(), it's uses a cached queue pointer and so is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/efc41f880262517c8dc32f932f1b23112f21b255.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-core.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index f008c38ae967..96ee996c0577 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -472,10 +472,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) static inline int bio_queue_enter(struct bio *bio) { - struct gendisk *disk = bio->bi_bdev->bd_disk; - struct request_queue *q = disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); while (!blk_try_enter_queue(q, false)) { + struct gendisk *disk = bio->bi_bdev->bd_disk; + if (bio->bi_opf & REQ_NOWAIT) { if (test_bit(GD_DEAD, &disk->state)) goto dead; @@ -800,7 +801,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q, static noinline_for_stack bool submit_bio_checks(struct bio *bio) { struct block_device *bdev = bio->bi_bdev; - struct request_queue *q = bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bdev); blk_status_t status = BLK_STS_IOERR; struct blk_plug *plug; @@ -962,7 +963,7 @@ static void __submit_bio_noacct(struct bio *bio) current->bio_list = bio_list_on_stack; do { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct bio_list lower, same; /* @@ -980,7 +981,7 @@ static void __submit_bio_noacct(struct bio *bio) bio_list_init(&lower); bio_list_init(&same); while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) - if (q == bio->bi_bdev->bd_disk->queue) + if (q == bdev_get_queue(bio->bi_bdev)) bio_list_add(&same, bio); else bio_list_add(&lower, bio); @@ -1062,7 +1063,7 @@ void submit_bio(struct bio *bio) if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) count = queue_logical_block_size( - bio->bi_bdev->bd_disk->queue) >> 9; + bdev_get_queue(bio->bi_bdev)) >> 9; else count = bio_sectors(bio); From ed6cddefdfd361af23a25bdeaaa5e345ac714c38 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:30 +0100 Subject: [PATCH 089/174] block: convert the rest of block to bdev_get_queue Convert bdev->bd_disk->queue to bdev_get_queue(), it's uses a cached queue pointer and so is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/addf6ea988c04213697ba3684c853e4ed7642a39.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bio-integrity.c | 2 +- block/blk-cgroup.c | 16 ++++++++-------- block/blk-crypto.c | 2 +- block/blk-iocost.c | 12 ++++++------ block/blk-mq.c | 2 +- block/blk-throttle.c | 2 +- block/genhd.c | 4 ++-- block/partitions/core.c | 4 ++-- 8 files changed, 22 insertions(+), 22 deletions(-) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 21234ff966d9..d25114715459 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -134,7 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, iv = bip->bip_vec + bip->bip_vcnt; if (bip->bip_vcnt && - bvec_gap_to_prev(bio->bi_bdev->bd_disk->queue, + bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), &bip->bip_vec[bip->bip_vcnt - 1], offset)) return 0; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 166104d92b5e..8908298d6ad3 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -621,7 +621,7 @@ struct block_device *blkcg_conf_open_bdev(char **inputp) */ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, char *input, struct blkg_conf_ctx *ctx) - __acquires(rcu) __acquires(&bdev->bd_disk->queue->queue_lock) + __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock) { struct block_device *bdev; struct request_queue *q; @@ -632,7 +632,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, if (IS_ERR(bdev)) return PTR_ERR(bdev); - q = bdev->bd_disk->queue; + q = bdev_get_queue(bdev); rcu_read_lock(); spin_lock_irq(&q->queue_lock); @@ -737,9 +737,9 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep); * with blkg_conf_prep(). */ void blkg_conf_finish(struct blkg_conf_ctx *ctx) - __releases(&ctx->bdev->bd_disk->queue->queue_lock) __releases(rcu) + __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu) { - spin_unlock_irq(&ctx->bdev->bd_disk->queue->queue_lock); + spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); rcu_read_unlock(); blkdev_put_no_open(ctx->bdev); } @@ -842,7 +842,7 @@ static void blkcg_fill_root_iostats(void) while ((dev = class_dev_iter_next(&iter))) { struct block_device *bdev = dev_to_bdev(dev); struct blkcg_gq *blkg = - blk_queue_root_blkg(bdev->bd_disk->queue); + blk_queue_root_blkg(bdev_get_queue(bdev)); struct blkg_iostat tmp; int cpu; @@ -1801,7 +1801,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, rcu_read_lock(); blkg = blkg_lookup_create(css_to_blkcg(css), - bio->bi_bdev->bd_disk->queue); + bdev_get_queue(bio->bi_bdev)); while (blkg) { if (blkg_tryget(blkg)) { ret_blkg = blkg; @@ -1837,8 +1837,8 @@ void bio_associate_blkg_from_css(struct bio *bio, if (css && css->parent) { bio->bi_blkg = blkg_tryget_closest(bio, css); } else { - blkg_get(bio->bi_bdev->bd_disk->queue->root_blkg); - bio->bi_blkg = bio->bi_bdev->bd_disk->queue->root_blkg; + blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); + bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; } } EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 103c2e2d50d6..8f53f4a1f9e2 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -280,7 +280,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr) * Success if device supports the encryption context, or if we succeeded * in falling back to the crypto API. */ - if (blk_ksm_crypto_cfg_supported(bio->bi_bdev->bd_disk->queue->ksm, + if (blk_ksm_crypto_cfg_supported(bdev_get_queue(bio->bi_bdev)->ksm, &bc_key->crypto_cfg)) return true; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index b3880e4ba22a..a5b37cc65b17 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3165,12 +3165,12 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, if (IS_ERR(bdev)) return PTR_ERR(bdev); - ioc = q_to_ioc(bdev->bd_disk->queue); + ioc = q_to_ioc(bdev_get_queue(bdev)); if (!ioc) { - ret = blk_iocost_init(bdev->bd_disk->queue); + ret = blk_iocost_init(bdev_get_queue(bdev)); if (ret) goto err; - ioc = q_to_ioc(bdev->bd_disk->queue); + ioc = q_to_ioc(bdev_get_queue(bdev)); } spin_lock_irq(&ioc->lock); @@ -3332,12 +3332,12 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, if (IS_ERR(bdev)) return PTR_ERR(bdev); - ioc = q_to_ioc(bdev->bd_disk->queue); + ioc = q_to_ioc(bdev_get_queue(bdev)); if (!ioc) { - ret = blk_iocost_init(bdev->bd_disk->queue); + ret = blk_iocost_init(bdev_get_queue(bdev)); if (ret) goto err; - ioc = q_to_ioc(bdev->bd_disk->queue); + ioc = q_to_ioc(bdev_get_queue(bdev)); } spin_lock_irq(&ioc->lock); diff --git a/block/blk-mq.c b/block/blk-mq.c index 09219080855f..b58878221f17 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2253,7 +2253,7 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) */ void blk_mq_submit_bio(struct bio *bio) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); const int is_sync = op_is_sync(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf); struct request *rq; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8cefd14deed5..39bb6e68a9a2 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2063,7 +2063,7 @@ void blk_throtl_charge_bio_split(struct bio *bio) bool __blk_throtl_bio(struct bio *bio) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blkcg_gq *blkg = bio->bi_blkg; struct throtl_qnode *qn = NULL; struct throtl_grp *tg = blkg_to_tg(blkg); diff --git a/block/genhd.c b/block/genhd.c index d148c38450d7..759bc06810f8 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -883,7 +883,7 @@ ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct block_device *bdev = dev_to_bdev(dev); - struct request_queue *q = bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bdev); struct disk_stats stat; unsigned int inflight; @@ -927,7 +927,7 @@ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, char *buf) { struct block_device *bdev = dev_to_bdev(dev); - struct request_queue *q = bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bdev); unsigned int inflight[2]; if (queue_is_mq(q)) diff --git a/block/partitions/core.c b/block/partitions/core.c index 3a4898433c43..9dbddc355b40 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -204,7 +204,7 @@ static ssize_t part_alignment_offset_show(struct device *dev, struct block_device *bdev = dev_to_bdev(dev); return sprintf(buf, "%u\n", - queue_limit_alignment_offset(&bdev->bd_disk->queue->limits, + queue_limit_alignment_offset(&bdev_get_queue(bdev)->limits, bdev->bd_start_sect)); } @@ -214,7 +214,7 @@ static ssize_t part_discard_alignment_show(struct device *dev, struct block_device *bdev = dev_to_bdev(dev); return sprintf(buf, "%u\n", - queue_limit_discard_alignment(&bdev->bd_disk->queue->limits, + queue_limit_discard_alignment(&bdev_get_queue(bdev)->limits, bdev->bd_start_sect)); } From d4aa57a1cac3c99ffd641f7c8e0a7aff5656de0d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Oct 2021 09:01:43 -0600 Subject: [PATCH 090/174] block: don't bother iter advancing a fully done bio If we're completing nbytes and nbytes is the size of the bio, don't bother with calling into the iterator increment helpers. Just clear the bio size and we're done. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 15 ++------------- include/linux/bio.h | 24 ++++++++++++++++++++++-- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/block/bio.c b/block/bio.c index 5fb8092577bf..4f397ba47db5 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1278,18 +1278,7 @@ int submit_bio_wait(struct bio *bio) } EXPORT_SYMBOL(submit_bio_wait); -/** - * bio_advance - increment/complete a bio by some number of bytes - * @bio: bio to advance - * @bytes: number of bytes to complete - * - * This updates bi_sector, bi_size and bi_idx; if the number of bytes to - * complete doesn't align with a bvec boundary, then bv_len and bv_offset will - * be updated on the last bvec as well. - * - * @bio will then represent the remaining, uncompleted portion of the io. - */ -void bio_advance(struct bio *bio, unsigned bytes) +void __bio_advance(struct bio *bio, unsigned bytes) { if (bio_integrity(bio)) bio_integrity_advance(bio, bytes); @@ -1297,7 +1286,7 @@ void bio_advance(struct bio *bio, unsigned bytes) bio_crypt_advance(bio, bytes); bio_advance_iter(bio, &bio->bi_iter, bytes); } -EXPORT_SYMBOL(bio_advance); +EXPORT_SYMBOL(__bio_advance); void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter) diff --git a/include/linux/bio.h b/include/linux/bio.h index 62d684b7dd4c..9538f20ffaa5 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -119,6 +119,28 @@ static inline void bio_advance_iter_single(const struct bio *bio, bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); } +void __bio_advance(struct bio *, unsigned bytes); + +/** + * bio_advance - increment/complete a bio by some number of bytes + * @bio: bio to advance + * @bytes: number of bytes to complete + * + * This updates bi_sector, bi_size and bi_idx; if the number of bytes to + * complete doesn't align with a bvec boundary, then bv_len and bv_offset will + * be updated on the last bvec as well. + * + * @bio will then represent the remaining, uncompleted portion of the io. + */ +static inline void bio_advance(struct bio *bio, unsigned int nbytes) +{ + if (nbytes == bio->bi_iter.bi_size) { + bio->bi_iter.bi_size = 0; + return; + } + __bio_advance(bio, nbytes); +} + #define __bio_for_each_segment(bvl, bio, iter, start) \ for (iter = (start); \ (iter).bi_size && \ @@ -381,8 +403,6 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) struct request_queue; extern int submit_bio_wait(struct bio *bio); -extern void bio_advance(struct bio *, unsigned); - extern void bio_init(struct bio *bio, struct bio_vec *table, unsigned short max_vecs); extern void bio_uninit(struct bio *); From c477b7977838ac97dd9d20625591a5d23c8079b7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 Oct 2021 09:15:40 -0600 Subject: [PATCH 091/174] block: remove useless caller argument to print_req_error() We have exactly one caller of this, just get rid of adding the useless function name to the output. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 96ee996c0577..b4094b31c99c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -216,8 +216,7 @@ int blk_status_to_errno(blk_status_t status) } EXPORT_SYMBOL_GPL(blk_status_to_errno); -static void print_req_error(struct request *req, blk_status_t status, - const char *caller) +static void print_req_error(struct request *req, blk_status_t status) { int idx = (__force int)status; @@ -225,9 +224,9 @@ static void print_req_error(struct request *req, blk_status_t status, return; printk_ratelimited(KERN_ERR - "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " + "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " "phys_seg %u prio class %u\n", - caller, blk_errors[idx].name, + blk_errors[idx].name, req->rq_disk ? req->rq_disk->disk_name : "?", blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), req->cmd_flags & ~REQ_OP_MASK, @@ -1471,7 +1470,7 @@ bool blk_update_request(struct request *req, blk_status_t error, if (unlikely(error && !blk_rq_is_passthrough(req) && !(req->rq_flags & RQF_QUIET))) - print_req_error(req, error, __func__); + print_req_error(req, error); blk_account_io_completion(req, nr_bytes); From 9be3e06fb75abcca00c955af740fabff46a13452 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 Oct 2021 09:17:01 -0600 Subject: [PATCH 092/174] block: move update request helpers into blk-mq.c For some reason we still have them in blk-core, with the rest of the request completion being in blk-mq. That causes and out-of-line call for each completion. Move them into blk-mq.c instead, where they belong. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 146 +---------------------------------------------- block/blk-mq.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++ block/blk.h | 1 + 3 files changed, 146 insertions(+), 145 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index b4094b31c99c..20b6cc06461a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -216,7 +216,7 @@ int blk_status_to_errno(blk_status_t status) } EXPORT_SYMBOL_GPL(blk_status_to_errno); -static void print_req_error(struct request *req, blk_status_t status) +void blk_print_req_error(struct request *req, blk_status_t status) { int idx = (__force int)status; @@ -234,33 +234,6 @@ static void print_req_error(struct request *req, blk_status_t status) IOPRIO_PRIO_CLASS(req->ioprio)); } -static void req_bio_endio(struct request *rq, struct bio *bio, - unsigned int nbytes, blk_status_t error) -{ - if (error) - bio->bi_status = error; - - if (unlikely(rq->rq_flags & RQF_QUIET)) - bio_set_flag(bio, BIO_QUIET); - - bio_advance(bio, nbytes); - - if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { - /* - * Partial zone append completions cannot be supported as the - * BIO fragments may end up not being written sequentially. - */ - if (bio->bi_iter.bi_size) - bio->bi_status = BLK_STS_IOERR; - else - bio->bi_iter.bi_sector = rq->__sector; - } - - /* don't actually finish bio if it's part of flush sequence */ - if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) - bio_endio(bio); -} - void blk_dump_rq_flags(struct request *rq, char *msg) { printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, @@ -1311,17 +1284,6 @@ again: } } -static void blk_account_io_completion(struct request *req, unsigned int bytes) -{ - if (req->part && blk_do_io_stat(req)) { - const int sgrp = op_stat_group(req_op(req)); - - part_stat_lock(); - part_stat_add(req->part, sectors[sgrp], bytes >> 9); - part_stat_unlock(); - } -} - void __blk_account_io_done(struct request *req, u64 now) { const int sgrp = op_stat_group(req_op(req)); @@ -1430,112 +1392,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq) } EXPORT_SYMBOL_GPL(blk_steal_bios); -/** - * blk_update_request - Complete multiple bytes without completing the request - * @req: the request being processed - * @error: block status code - * @nr_bytes: number of bytes to complete for @req - * - * Description: - * Ends I/O on a number of bytes attached to @req, but doesn't complete - * the request structure even if @req doesn't have leftover. - * If @req has leftover, sets it up for the next range of segments. - * - * Passing the result of blk_rq_bytes() as @nr_bytes guarantees - * %false return from this function. - * - * Note: - * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function - * except in the consistency check at the end of this function. - * - * Return: - * %false - this request doesn't have any more data - * %true - this request has more data - **/ -bool blk_update_request(struct request *req, blk_status_t error, - unsigned int nr_bytes) -{ - int total_bytes; - - trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); - - if (!req->bio) - return false; - -#ifdef CONFIG_BLK_DEV_INTEGRITY - if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && - error == BLK_STS_OK) - req->q->integrity.profile->complete_fn(req, nr_bytes); -#endif - - if (unlikely(error && !blk_rq_is_passthrough(req) && - !(req->rq_flags & RQF_QUIET))) - print_req_error(req, error); - - blk_account_io_completion(req, nr_bytes); - - total_bytes = 0; - while (req->bio) { - struct bio *bio = req->bio; - unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); - - if (bio_bytes == bio->bi_iter.bi_size) - req->bio = bio->bi_next; - - /* Completion has already been traced */ - bio_clear_flag(bio, BIO_TRACE_COMPLETION); - req_bio_endio(req, bio, bio_bytes, error); - - total_bytes += bio_bytes; - nr_bytes -= bio_bytes; - - if (!nr_bytes) - break; - } - - /* - * completely done - */ - if (!req->bio) { - /* - * Reset counters so that the request stacking driver - * can find how many bytes remain in the request - * later. - */ - req->__data_len = 0; - return false; - } - - req->__data_len -= total_bytes; - - /* update sector only for requests with clear definition of sector */ - if (!blk_rq_is_passthrough(req)) - req->__sector += total_bytes >> 9; - - /* mixed attributes always follow the first bio */ - if (req->rq_flags & RQF_MIXED_MERGE) { - req->cmd_flags &= ~REQ_FAILFAST_MASK; - req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; - } - - if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { - /* - * If total number of sectors is less than the first segment - * size, something has gone terribly wrong. - */ - if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { - blk_dump_rq_flags(req, "request botched"); - req->__data_len = blk_rq_cur_bytes(req); - } - - /* recalculate the number of segments */ - req->nr_phys_segments = blk_recalc_rq_segments(req); - } - - return true; -} -EXPORT_SYMBOL_GPL(blk_update_request); - #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE /** * rq_flush_dcache_pages - Helper function to flush all pages in a request diff --git a/block/blk-mq.c b/block/blk-mq.c index b58878221f17..9cff9e8eada4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -628,6 +628,150 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug) } } +static void req_bio_endio(struct request *rq, struct bio *bio, + unsigned int nbytes, blk_status_t error) +{ + if (error) + bio->bi_status = error; + + if (unlikely(rq->rq_flags & RQF_QUIET)) + bio_set_flag(bio, BIO_QUIET); + + bio_advance(bio, nbytes); + + if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { + /* + * Partial zone append completions cannot be supported as the + * BIO fragments may end up not being written sequentially. + */ + if (bio->bi_iter.bi_size) + bio->bi_status = BLK_STS_IOERR; + else + bio->bi_iter.bi_sector = rq->__sector; + } + + /* don't actually finish bio if it's part of flush sequence */ + if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) + bio_endio(bio); +} + +static void blk_account_io_completion(struct request *req, unsigned int bytes) +{ + if (req->part && blk_do_io_stat(req)) { + const int sgrp = op_stat_group(req_op(req)); + + part_stat_lock(); + part_stat_add(req->part, sectors[sgrp], bytes >> 9); + part_stat_unlock(); + } +} + +/** + * blk_update_request - Complete multiple bytes without completing the request + * @req: the request being processed + * @error: block status code + * @nr_bytes: number of bytes to complete for @req + * + * Description: + * Ends I/O on a number of bytes attached to @req, but doesn't complete + * the request structure even if @req doesn't have leftover. + * If @req has leftover, sets it up for the next range of segments. + * + * Passing the result of blk_rq_bytes() as @nr_bytes guarantees + * %false return from this function. + * + * Note: + * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function + * except in the consistency check at the end of this function. + * + * Return: + * %false - this request doesn't have any more data + * %true - this request has more data + **/ +bool blk_update_request(struct request *req, blk_status_t error, + unsigned int nr_bytes) +{ + int total_bytes; + + trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); + + if (!req->bio) + return false; + +#ifdef CONFIG_BLK_DEV_INTEGRITY + if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && + error == BLK_STS_OK) + req->q->integrity.profile->complete_fn(req, nr_bytes); +#endif + + if (unlikely(error && !blk_rq_is_passthrough(req) && + !(req->rq_flags & RQF_QUIET))) + blk_print_req_error(req, error); + + blk_account_io_completion(req, nr_bytes); + + total_bytes = 0; + while (req->bio) { + struct bio *bio = req->bio; + unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); + + if (bio_bytes == bio->bi_iter.bi_size) + req->bio = bio->bi_next; + + /* Completion has already been traced */ + bio_clear_flag(bio, BIO_TRACE_COMPLETION); + req_bio_endio(req, bio, bio_bytes, error); + + total_bytes += bio_bytes; + nr_bytes -= bio_bytes; + + if (!nr_bytes) + break; + } + + /* + * completely done + */ + if (!req->bio) { + /* + * Reset counters so that the request stacking driver + * can find how many bytes remain in the request + * later. + */ + req->__data_len = 0; + return false; + } + + req->__data_len -= total_bytes; + + /* update sector only for requests with clear definition of sector */ + if (!blk_rq_is_passthrough(req)) + req->__sector += total_bytes >> 9; + + /* mixed attributes always follow the first bio */ + if (req->rq_flags & RQF_MIXED_MERGE) { + req->cmd_flags &= ~REQ_FAILFAST_MASK; + req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; + } + + if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { + /* + * If total number of sectors is less than the first segment + * size, something has gone terribly wrong. + */ + if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { + blk_dump_rq_flags(req, "request botched"); + req->__data_len = blk_rq_cur_bytes(req); + } + + /* recalculate the number of segments */ + req->nr_phys_segments = blk_recalc_rq_segments(req); + } + + return true; +} +EXPORT_SYMBOL_GPL(blk_update_request); + inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { if (blk_mq_need_time_stamp(rq)) { diff --git a/block/blk.h b/block/blk.h index 447a2defe2c8..e80350327e6d 100644 --- a/block/blk.h +++ b/block/blk.h @@ -215,6 +215,7 @@ static inline void blk_integrity_del(struct gendisk *disk) unsigned long blk_rq_timeout(unsigned long timeout); void blk_add_timer(struct request *req); +void blk_print_req_error(struct request *req, blk_status_t status); bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **same_queue_rq); From b60876296847e6cd7f1da4b8b7f0f31399d59aa1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 Oct 2021 15:03:52 -0600 Subject: [PATCH 093/174] block: improve layout of struct request It's been a while since this was analyzed, move some members around to better flow with the use case. Initial state up top, and queued state after that. This improves my peak case by about 1.5%, from 7750K to 7900K IOPS. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/blk-mq.h | 90 +++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 44 deletions(-) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a9c1d0882550..8ca9728cc7f2 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -83,6 +83,8 @@ struct request { int tag; int internal_tag; + unsigned int timeout; + /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ sector_t __sector; /* sector cursor */ @@ -95,49 +97,6 @@ struct request { struct request *rq_next; }; - /* - * The hash is used inside the scheduler, and killed once the - * request reaches the dispatch list. The ipi_list is only used - * to queue the request for softirq completion, which is long - * after the request has been unhashed (and even removed from - * the dispatch list). - */ - union { - struct hlist_node hash; /* merge hash */ - struct llist_node ipi_list; - }; - - /* - * The rb_node is only used inside the io scheduler, requests - * are pruned when moved to the dispatch queue. So let the - * completion_data share space with the rb_node. - */ - union { - struct rb_node rb_node; /* sort/lookup */ - struct bio_vec special_vec; - void *completion_data; - int error_count; /* for legacy drivers, don't use */ - }; - - /* - * Three pointers are available for the IO schedulers, if they need - * more they have to dynamically allocate it. Flush requests are - * never put on the IO scheduler. So let the flush fields share - * space with the elevator data. - */ - union { - struct { - struct io_cq *icq; - void *priv[2]; - } elv; - - struct { - unsigned int seq; - struct list_head list; - rq_end_io_fn *saved_end_io; - } flush; - }; - struct gendisk *rq_disk; struct block_device *part; #ifdef CONFIG_BLK_RQ_ALLOC_TIME @@ -180,9 +139,52 @@ struct request { enum mq_rq_state state; refcount_t ref; - unsigned int timeout; unsigned long deadline; + /* + * The hash is used inside the scheduler, and killed once the + * request reaches the dispatch list. The ipi_list is only used + * to queue the request for softirq completion, which is long + * after the request has been unhashed (and even removed from + * the dispatch list). + */ + union { + struct hlist_node hash; /* merge hash */ + struct llist_node ipi_list; + }; + + /* + * The rb_node is only used inside the io scheduler, requests + * are pruned when moved to the dispatch queue. So let the + * completion_data share space with the rb_node. + */ + union { + struct rb_node rb_node; /* sort/lookup */ + struct bio_vec special_vec; + void *completion_data; + int error_count; /* for legacy drivers, don't use */ + }; + + + /* + * Three pointers are available for the IO schedulers, if they need + * more they have to dynamically allocate it. Flush requests are + * never put on the IO scheduler. So let the flush fields share + * space with the elevator data. + */ + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + union { struct __call_single_data csd; u64 fifo_time; From 90b8faa0e8de1b02b619fb33f6c6e1e13e7d1d70 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 Oct 2021 20:06:18 -0600 Subject: [PATCH 094/174] block: only mark bio as tracked if it really is tracked We set BIO_TRACKED unconditionally when rq_qos_throttle() is called, even though we may not even have an rq_qos handler. Only mark it as TRACKED if it really is potentially tracked. This saves considerable time for the case where the bio isn't tracked: 2.64% -1.65% [kernel.vmlinux] [k] bio_endio Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-rq-qos.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index f000f83e0621..3cfbc8668cba 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -189,9 +189,10 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) * BIO_TRACKED lets controllers know that a bio went through the * normal rq_qos path. */ - bio_set_flag(bio, BIO_TRACKED); - if (q->rq_qos) + if (q->rq_qos) { + bio_set_flag(bio, BIO_TRACKED); __rq_qos_throttle(q->rq_qos, bio); + } } static inline void rq_qos_track(struct request_queue *q, struct request *rq, From 2ff0682da6e09c1e0db63a2d2abcd4efb531c8db Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 Oct 2021 09:44:38 -0600 Subject: [PATCH 095/174] block: store elevator state in request Add an rq private RQF_ELV flag, which tells the block layer that this request was initialized on a queue that has an IO scheduler attached. This allows for faster checking in the fast path, rather than having to deference rq->q later on. Elevator switching does full quiesce of the queue before detaching an IO scheduler, so it's safe to cache this in the request itself. Signed-off-by: Jens Axboe --- block/blk-mq-sched.h | 27 ++++++++++++++++----------- block/blk-mq.c | 20 +++++++++++--------- include/linux/blk-mq.h | 2 ++ 3 files changed, 29 insertions(+), 20 deletions(-) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index fe252278ed9a..98836106b25f 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -56,29 +56,34 @@ static inline bool blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) { - struct elevator_queue *e = q->elevator; - - if (e && e->type->ops.allow_merge) - return e->type->ops.allow_merge(q, rq, bio); + if (rq->rq_flags & RQF_ELV) { + struct elevator_queue *e = q->elevator; + if (e->type->ops.allow_merge) + return e->type->ops.allow_merge(q, rq, bio); + } return true; } static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) { - struct elevator_queue *e = rq->q->elevator; + if (rq->rq_flags & RQF_ELV) { + struct elevator_queue *e = rq->q->elevator; - if (e && e->type->ops.completed_request) - e->type->ops.completed_request(rq, now); + if (e->type->ops.completed_request) + e->type->ops.completed_request(rq, now); + } } static inline void blk_mq_sched_requeue_request(struct request *rq) { - struct request_queue *q = rq->q; - struct elevator_queue *e = q->elevator; + if (rq->rq_flags & RQF_ELV) { + struct request_queue *q = rq->q; + struct elevator_queue *e = q->elevator; - if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) - e->type->ops.requeue_request(rq); + if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request) + e->type->ops.requeue_request(rq); + } } static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) diff --git a/block/blk-mq.c b/block/blk-mq.c index 9cff9e8eada4..28eb1f3c6f76 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -306,7 +306,7 @@ void blk_mq_wake_waiters(struct request_queue *q) */ static inline bool blk_mq_need_time_stamp(struct request *rq) { - return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; + return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); } static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, @@ -316,9 +316,11 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct request *rq = tags->static_rqs[tag]; if (data->q->elevator) { + rq->rq_flags = RQF_ELV; rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { + rq->rq_flags = 0; rq->tag = tag; rq->internal_tag = BLK_MQ_NO_TAG; } @@ -327,7 +329,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->q = data->q; rq->mq_ctx = data->ctx; rq->mq_hctx = data->hctx; - rq->rq_flags = 0; rq->cmd_flags = data->cmd_flags; if (data->flags & BLK_MQ_REQ_PM) rq->rq_flags |= RQF_PM; @@ -363,11 +364,11 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; refcount_set(&rq->ref, 1); - if (!op_is_flush(data->cmd_flags)) { + if (!op_is_flush(data->cmd_flags) && (rq->rq_flags & RQF_ELV)) { struct elevator_queue *e = data->q->elevator; rq->elv.icq = NULL; - if (e && e->type->ops.prepare_request) { + if (e->type->ops.prepare_request) { if (e->type->icq_cache) blk_mq_sched_assign_ioc(rq); @@ -588,12 +589,13 @@ static void __blk_mq_free_request(struct request *rq) void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; - struct elevator_queue *e = q->elevator; struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - if (rq->rq_flags & RQF_ELVPRIV) { - if (e && e->type->ops.finish_request) + if (rq->rq_flags & (RQF_ELVPRIV | RQF_ELV)) { + struct elevator_queue *e = q->elevator; + + if (e->type->ops.finish_request) e->type->ops.finish_request(rq); if (rq->elv.icq) { put_io_context(rq->elv.icq->ioc); @@ -2254,7 +2256,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, goto insert; } - if (q->elevator && !bypass_insert) + if ((rq->rq_flags & RQF_ELV) && !bypass_insert) goto insert; budget_token = blk_mq_get_dispatch_budget(q); @@ -2492,7 +2494,7 @@ void blk_mq_submit_bio(struct bio *bio) } blk_add_rq_to_plug(plug, rq); - } else if (q->elevator) { + } else if (rq->rq_flags & RQF_ELV) { /* Insert the request at the IO scheduler queue */ blk_mq_sched_insert_request(rq, false, true, true); } else if (plug && !blk_queue_nomerges(q)) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8ca9728cc7f2..95c3bd3a008e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -55,6 +55,8 @@ typedef __u32 __bitwise req_flags_t; #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) /* ->timeout has been called, don't expire again */ #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) +/* queue has elevator attached */ +#define RQF_ELV ((__force req_flags_t)(1 << 22)) /* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ From 4f266f2be822eacd70aca2a7a53c4a111be79acb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Oct 2021 21:37:27 +0100 Subject: [PATCH 096/174] block: skip elevator fields init for non-elv queue Don't init rq->hash and rq->rb_node in blk_mq_rq_ctx_init() if there is no elevator. Also, move some other initialisers that imply barriers to the end, so the compiler is free to rearrange and optimise other the rest of them. note: fold in a change from Jens leaving queue_list unconditional, as it might lead to problems otherwise. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/blk-mq.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 28eb1f3c6f76..1d2e2fd4043e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -325,6 +325,10 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->internal_tag = BLK_MQ_NO_TAG; } + if (blk_mq_need_time_stamp(rq)) + rq->start_time_ns = ktime_get_ns(); + else + rq->start_time_ns = 0; /* csd/requeue_work/fifo_time is initialized before use */ rq->q = data->q; rq->mq_ctx = data->ctx; @@ -334,41 +338,37 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->rq_flags |= RQF_PM; if (blk_queue_io_stat(data->q)) rq->rq_flags |= RQF_IO_STAT; - INIT_LIST_HEAD(&rq->queuelist); - INIT_HLIST_NODE(&rq->hash); - RB_CLEAR_NODE(&rq->rb_node); rq->rq_disk = NULL; rq->part = NULL; #ifdef CONFIG_BLK_RQ_ALLOC_TIME rq->alloc_time_ns = alloc_time_ns; #endif - if (blk_mq_need_time_stamp(rq)) - rq->start_time_ns = ktime_get_ns(); - else - rq->start_time_ns = 0; rq->io_start_time_ns = 0; rq->stats_sectors = 0; rq->nr_phys_segments = 0; #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; #endif - blk_crypto_rq_set_defaults(rq); - /* tag was already set */ - WRITE_ONCE(rq->deadline, 0); - rq->timeout = 0; - rq->end_io = NULL; rq->end_io_data = NULL; data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; + blk_crypto_rq_set_defaults(rq); + INIT_LIST_HEAD(&rq->queuelist); + /* tag was already set */ + WRITE_ONCE(rq->deadline, 0); refcount_set(&rq->ref, 1); - if (!op_is_flush(data->cmd_flags) && (rq->rq_flags & RQF_ELV)) { + if (rq->rq_flags & RQF_ELV) { struct elevator_queue *e = data->q->elevator; rq->elv.icq = NULL; - if (e->type->ops.prepare_request) { + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); + + if (!op_is_flush(data->cmd_flags) && + e->type->ops.prepare_request) { if (e->type->icq_cache) blk_mq_sched_assign_ioc(rq); From 605f784e4f5faecf6c78070c6bf446e920104f9f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Oct 2021 21:37:28 +0100 Subject: [PATCH 097/174] block: blk_mq_rq_ctx_init cache ctx/q/hctx We should have enough of registers in blk_mq_rq_ctx_init(), store them in local vars, so we don't keep reloading them. note: keeping q->elevator may look unnecessary, but it's also used inside inlined blk_mq_tags_from_data(). Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/blk-mq.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 1d2e2fd4043e..fa4de25c3bcb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -312,10 +312,14 @@ static inline bool blk_mq_need_time_stamp(struct request *rq) static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, unsigned int tag, u64 alloc_time_ns) { + struct blk_mq_ctx *ctx = data->ctx; + struct blk_mq_hw_ctx *hctx = data->hctx; + struct request_queue *q = data->q; + struct elevator_queue *e = q->elevator; struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct request *rq = tags->static_rqs[tag]; - if (data->q->elevator) { + if (e) { rq->rq_flags = RQF_ELV; rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; @@ -330,13 +334,13 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, else rq->start_time_ns = 0; /* csd/requeue_work/fifo_time is initialized before use */ - rq->q = data->q; - rq->mq_ctx = data->ctx; - rq->mq_hctx = data->hctx; + rq->q = q; + rq->mq_ctx = ctx; + rq->mq_hctx = hctx; rq->cmd_flags = data->cmd_flags; if (data->flags & BLK_MQ_REQ_PM) rq->rq_flags |= RQF_PM; - if (blk_queue_io_stat(data->q)) + if (blk_queue_io_stat(q)) rq->rq_flags |= RQF_IO_STAT; rq->rq_disk = NULL; rq->part = NULL; From 128459062bc994355027e190477c432ec5b5638a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Oct 2021 21:37:29 +0100 Subject: [PATCH 098/174] block: cache rq_flags inside blk_mq_rq_ctx_init() Add a local variable for rq_flags, it helps to compile out some of rq_flags reloads. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/blk-mq.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index fa4de25c3bcb..633d73580712 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -318,17 +318,23 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct elevator_queue *e = q->elevator; struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct request *rq = tags->static_rqs[tag]; + unsigned int rq_flags = 0; if (e) { - rq->rq_flags = RQF_ELV; + rq_flags = RQF_ELV; rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { - rq->rq_flags = 0; rq->tag = tag; rq->internal_tag = BLK_MQ_NO_TAG; } + if (data->flags & BLK_MQ_REQ_PM) + rq_flags |= RQF_PM; + if (blk_queue_io_stat(q)) + rq_flags |= RQF_IO_STAT; + rq->rq_flags = rq_flags; + if (blk_mq_need_time_stamp(rq)) rq->start_time_ns = ktime_get_ns(); else @@ -338,10 +344,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->mq_ctx = ctx; rq->mq_hctx = hctx; rq->cmd_flags = data->cmd_flags; - if (data->flags & BLK_MQ_REQ_PM) - rq->rq_flags |= RQF_PM; - if (blk_queue_io_stat(q)) - rq->rq_flags |= RQF_IO_STAT; rq->rq_disk = NULL; rq->part = NULL; #ifdef CONFIG_BLK_RQ_ALLOC_TIME From 9a14d6ce4135fa72705a926c894218a0d6988924 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 16 Oct 2021 17:27:20 -0600 Subject: [PATCH 099/174] block: remove debugfs blk_mq_ctx dispatched/merged/completed attributes These were added as part of early days debugging for blk-mq, and they are not really useful anymore. Rather than spend cycles updating them, just get rid of them. As a bonus, this shrinks the per-cpu software queue size from 256b to 192b. That's a whole cacheline less. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 54 ------------------------------------------ block/blk-mq-sched.c | 5 +--- block/blk-mq.c | 3 --- block/blk-mq.h | 7 ------ 4 files changed, 1 insertion(+), 68 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 409a8256d9ff..928a16af9175 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -663,57 +663,6 @@ CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT); CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ); CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL); -static int ctx_dispatched_show(void *data, struct seq_file *m) -{ - struct blk_mq_ctx *ctx = data; - - seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]); - return 0; -} - -static ssize_t ctx_dispatched_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_ctx *ctx = data; - - ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0; - return count; -} - -static int ctx_merged_show(void *data, struct seq_file *m) -{ - struct blk_mq_ctx *ctx = data; - - seq_printf(m, "%lu\n", ctx->rq_merged); - return 0; -} - -static ssize_t ctx_merged_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_ctx *ctx = data; - - ctx->rq_merged = 0; - return count; -} - -static int ctx_completed_show(void *data, struct seq_file *m) -{ - struct blk_mq_ctx *ctx = data; - - seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]); - return 0; -} - -static ssize_t ctx_completed_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_ctx *ctx = data; - - ctx->rq_completed[0] = ctx->rq_completed[1] = 0; - return count; -} - static int blk_mq_debugfs_show(struct seq_file *m, void *v) { const struct blk_mq_debugfs_attr *attr = m->private; @@ -803,9 +752,6 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops}, {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops}, {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops}, - {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write}, - {"merged", 0600, ctx_merged_show, ctx_merged_write}, - {"completed", 0600, ctx_completed_show, ctx_completed_write}, {}, }; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index efc1374b8831..e85b7556b096 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -387,13 +387,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, * potentially merge with. Currently includes a hand-wavy stop * count of 8, to not spend too much time checking for merges. */ - if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { - ctx->rq_merged++; + if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) ret = true; - } spin_unlock(&ctx->lock); - return ret; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 633d73580712..990d214a7658 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -359,7 +359,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->end_io = NULL; rq->end_io_data = NULL; - data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; blk_crypto_rq_set_defaults(rq); INIT_LIST_HEAD(&rq->queuelist); /* tag was already set */ @@ -595,7 +594,6 @@ static void __blk_mq_free_request(struct request *rq) void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; - struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if (rq->rq_flags & (RQF_ELVPRIV | RQF_ELV)) { @@ -609,7 +607,6 @@ void blk_mq_free_request(struct request *rq) } } - ctx->rq_completed[rq_is_sync(rq)]++; if (rq->rq_flags & RQF_MQ_INFLIGHT) __blk_mq_dec_active_requests(hctx); diff --git a/block/blk-mq.h b/block/blk-mq.h index 8be447995106..1b91a3fdaa01 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -25,13 +25,6 @@ struct blk_mq_ctx { unsigned short index_hw[HCTX_MAX_TYPES]; struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; - /* incremented at dispatch time */ - unsigned long rq_dispatched[2]; - unsigned long rq_merged; - - /* incremented at completion time */ - unsigned long ____cacheline_aligned_in_smp rq_completed[2]; - struct request_queue *queue; struct blk_mq_ctxs *ctxs; struct kobject kobj; From afd7de03c5268f74202c1dd4780a8532a11f4c6b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Oct 2021 08:53:19 -0600 Subject: [PATCH 100/174] block: remove some blk_mq_hw_ctx debugfs entries Just like the blk_mq_ctx counterparts, we've got a bunch of counters in here that are only for debugfs and are of questionnable value. They are: - dispatched, index of how many requests were dispatched in one go - poll_{considered,invoked,success}, which track poll sucess rates. We're confident in the iopoll implementation at this point, don't bother tracking these. As a bonus, this shrinks each hardware queue from 576 bytes to 512 bytes, dropping a whole cacheline. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 67 ------------------------------------------ block/blk-mq.c | 16 ---------- include/linux/blk-mq.h | 10 ------- 3 files changed, 93 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 928a16af9175..68ca5d21cda7 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -529,70 +529,6 @@ out: return res; } -static int hctx_io_poll_show(void *data, struct seq_file *m) -{ - struct blk_mq_hw_ctx *hctx = data; - - seq_printf(m, "considered=%lu\n", hctx->poll_considered); - seq_printf(m, "invoked=%lu\n", hctx->poll_invoked); - seq_printf(m, "success=%lu\n", hctx->poll_success); - return 0; -} - -static ssize_t hctx_io_poll_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_hw_ctx *hctx = data; - - hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0; - return count; -} - -static int hctx_dispatched_show(void *data, struct seq_file *m) -{ - struct blk_mq_hw_ctx *hctx = data; - int i; - - seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]); - - for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) { - unsigned int d = 1U << (i - 1); - - seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]); - } - - seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]); - return 0; -} - -static ssize_t hctx_dispatched_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_hw_ctx *hctx = data; - int i; - - for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) - hctx->dispatched[i] = 0; - return count; -} - -static int hctx_queued_show(void *data, struct seq_file *m) -{ - struct blk_mq_hw_ctx *hctx = data; - - seq_printf(m, "%lu\n", hctx->queued); - return 0; -} - -static ssize_t hctx_queued_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_hw_ctx *hctx = data; - - hctx->queued = 0; - return count; -} - static int hctx_run_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; @@ -738,9 +674,6 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { {"tags_bitmap", 0400, hctx_tags_bitmap_show}, {"sched_tags", 0400, hctx_sched_tags_show}, {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, - {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write}, - {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write}, - {"queued", 0600, hctx_queued_show, hctx_queued_write}, {"run", 0600, hctx_run_show, hctx_run_write}, {"active", 0400, hctx_active_show}, {"dispatch_busy", 0400, hctx_dispatch_busy_show}, diff --git a/block/blk-mq.c b/block/blk-mq.c index 990d214a7658..bd241fd7ee49 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -382,7 +382,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, } } - data->hctx->queued++; return rq; } @@ -1301,14 +1300,6 @@ struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, return data.rq; } -static inline unsigned int queued_to_index(unsigned int queued) -{ - if (!queued) - return 0; - - return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); -} - static bool __blk_mq_get_driver_tag(struct request *rq) { struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; @@ -1632,8 +1623,6 @@ out: if (!list_empty(&zone_list)) list_splice_tail_init(&zone_list, list); - hctx->dispatched[queued_to_index(queued)]++; - /* If we didn't flush the entire list, we could have told the driver * there was more coming, but that turned out to be a lie. */ @@ -4200,14 +4189,9 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, long state = get_current_state(); int ret; - hctx->poll_considered++; - do { - hctx->poll_invoked++; - ret = q->mq_ops->poll(hctx); if (ret > 0) { - hctx->poll_success++; __set_current_state(TASK_RUNNING); return ret; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 95c3bd3a008e..9fb8618fb957 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -341,9 +341,6 @@ struct blk_mq_hw_ctx { unsigned long queued; /** @run: Number of dispatched requests. */ unsigned long run; -#define BLK_MQ_MAX_DISPATCH_ORDER 7 - /** @dispatched: Number of dispatch requests by queue. */ - unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; /** @numa_node: NUMA node the storage adapter has been connected to. */ unsigned int numa_node; @@ -363,13 +360,6 @@ struct blk_mq_hw_ctx { /** @kobj: Kernel object for sysfs. */ struct kobject kobj; - /** @poll_considered: Count times blk_mq_poll() was called. */ - unsigned long poll_considered; - /** @poll_invoked: Count how many requests blk_mq_poll() polled. */ - unsigned long poll_invoked; - /** @poll_success: Count how many polled requests were completed. */ - unsigned long poll_success; - #ifdef CONFIG_BLK_DEBUG_FS /** * @debugfs_dir: debugfs directory for this hardware queue. Named From 013a7f95438144f4ab39a1017a0bff2765d2551a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Oct 2021 07:58:52 -0600 Subject: [PATCH 101/174] block: provide helpers for rq_list manipulation Instead of open-coding the list additions, traversal, and removal, provide a basic set of helpers. Suggested-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 19 +++++-------------- include/linux/blkdev.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index bd241fd7ee49..74505b545dd3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -404,17 +404,11 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, tag = tag_offset + i; tags &= ~(1UL << i); rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); - rq->rq_next = *data->cached_rq; - *data->cached_rq = rq; + rq_list_add(data->cached_rq, rq); } data->nr_tags -= nr; - if (!data->cached_rq) - return NULL; - - rq = *data->cached_rq; - *data->cached_rq = rq->rq_next; - return rq; + return rq_list_pop(data->cached_rq); } static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) @@ -622,11 +616,9 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request); void blk_mq_free_plug_rqs(struct blk_plug *plug) { - while (plug->cached_rq) { - struct request *rq; + struct request *rq; - rq = plug->cached_rq; - plug->cached_rq = rq->rq_next; + while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) { percpu_ref_get(&rq->q->q_usage_counter); blk_mq_free_request(rq); } @@ -2418,8 +2410,7 @@ void blk_mq_submit_bio(struct bio *bio) plug = blk_mq_plug(q, bio); if (plug && plug->cached_rq) { - rq = plug->cached_rq; - plug->cached_rq = rq->rq_next; + rq = rq_list_pop(&plug->cached_rq); INIT_LIST_HEAD(&rq->queuelist); } else { struct blk_mq_alloc_data data = { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d5b21fc8f49e..b0a322172965 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1298,4 +1298,33 @@ int fsync_bdev(struct block_device *bdev); int freeze_bdev(struct block_device *bdev); int thaw_bdev(struct block_device *bdev); +#define rq_list_add(listptr, rq) do { \ + (rq)->rq_next = *(listptr); \ + *(listptr) = rq; \ +} while (0) + +#define rq_list_pop(listptr) \ +({ \ + struct request *__req = NULL; \ + if ((listptr) && *(listptr)) { \ + __req = *(listptr); \ + *(listptr) = __req->rq_next; \ + } \ + __req; \ +}) + +#define rq_list_peek(listptr) \ +({ \ + struct request *__req = NULL; \ + if ((listptr) && *(listptr)) \ + __req = *(listptr); \ + __req; \ +}) + +#define rq_list_for_each(listptr, pos) \ + for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) \ + +#define rq_list_next(rq) (rq)->rq_next +#define rq_list_empty(list) ((list) == (struct request *) NULL) + #endif /* _LINUX_BLKDEV_H */ From 5a72e899ceb465d731c413d57c6c12cdbf88303c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 12 Oct 2021 09:24:29 -0600 Subject: [PATCH 102/174] block: add a struct io_comp_batch argument to fops->iopoll() struct io_comp_batch contains a list head and a completion handler, which will allow completions to more effciently completed batches of IO. For now, no functional changes in this patch, we just define the io_comp_batch structure and add the argument to the file_operations iopoll handler. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 9 +++++---- block/blk-exec.c | 2 +- block/blk-mq.c | 9 +++++---- block/blk-mq.h | 3 ++- block/fops.c | 4 ++-- drivers/block/rnbd/rnbd-clt.c | 2 +- drivers/nvme/host/pci.c | 4 ++-- drivers/nvme/host/rdma.c | 2 +- drivers/nvme/host/tcp.c | 2 +- drivers/scsi/scsi_lib.c | 2 +- fs/io_uring.c | 2 +- fs/iomap/direct-io.c | 2 +- include/linux/blk-mq.h | 2 +- include/linux/blkdev.h | 13 +++++++++++-- include/linux/fs.h | 4 +++- mm/page_io.c | 2 +- 16 files changed, 39 insertions(+), 25 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 20b6cc06461a..d0c2e11411d0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1078,7 +1078,7 @@ EXPORT_SYMBOL(submit_bio); * Note: the caller must either be the context that submitted @bio, or * be in a RCU critical section to prevent freeing of @bio. */ -int bio_poll(struct bio *bio, unsigned int flags) +int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) { struct request_queue *q = bio->bi_bdev->bd_disk->queue; blk_qc_t cookie = READ_ONCE(bio->bi_cookie); @@ -1096,7 +1096,7 @@ int bio_poll(struct bio *bio, unsigned int flags) if (WARN_ON_ONCE(!queue_is_mq(q))) ret = 0; /* not yet implemented, should not happen */ else - ret = blk_mq_poll(q, cookie, flags); + ret = blk_mq_poll(q, cookie, iob, flags); blk_queue_exit(q); return ret; } @@ -1106,7 +1106,8 @@ EXPORT_SYMBOL_GPL(bio_poll); * Helper to implement file_operations.iopoll. Requires the bio to be stored * in iocb->private, and cleared before freeing the bio. */ -int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags) +int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, + unsigned int flags) { struct bio *bio; int ret = 0; @@ -1134,7 +1135,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags) rcu_read_lock(); bio = READ_ONCE(kiocb->private); if (bio && bio->bi_bdev) - ret = bio_poll(bio, flags); + ret = bio_poll(bio, iob, flags); rcu_read_unlock(); return ret; diff --git a/block/blk-exec.c b/block/blk-exec.c index 55f0cd34b37b..1b8b47f6e79b 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -77,7 +77,7 @@ static bool blk_rq_is_poll(struct request *rq) static void blk_rq_poll_completion(struct request *rq, struct completion *wait) { do { - bio_poll(rq->bio, 0); + bio_poll(rq->bio, NULL, 0); cond_resched(); } while (!completion_done(wait)); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 74505b545dd3..79c25b64e8b0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4174,14 +4174,14 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) } static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, - unsigned int flags) + struct io_comp_batch *iob, unsigned int flags) { struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); long state = get_current_state(); int ret; do { - ret = q->mq_ops->poll(hctx); + ret = q->mq_ops->poll(hctx, iob); if (ret > 0) { __set_current_state(TASK_RUNNING); return ret; @@ -4201,14 +4201,15 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, return 0; } -int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) +int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, + unsigned int flags) { if (!(flags & BLK_POLL_NOSLEEP) && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) return 1; } - return blk_mq_poll_classic(q, cookie, flags); + return blk_mq_poll_classic(q, cookie, iob, flags); } unsigned int blk_mq_rq_cpu(struct request *rq) diff --git a/block/blk-mq.h b/block/blk-mq.h index 1b91a3fdaa01..ebf67f4d4f2e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -31,7 +31,8 @@ struct blk_mq_ctx { } ____cacheline_aligned_in_smp; void blk_mq_submit_bio(struct bio *bio); -int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); +int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, + unsigned int flags); void blk_mq_exit_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); diff --git a/block/fops.c b/block/fops.c index 1d4f862950bb..2c43e493e37c 100644 --- a/block/fops.c +++ b/block/fops.c @@ -105,7 +105,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, set_current_state(TASK_UNINTERRUPTIBLE); if (!READ_ONCE(bio.bi_private)) break; - if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, 0)) + if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, NULL, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -291,7 +291,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->waiter)) break; - if (!do_poll || !bio_poll(bio, 0)) + if (!do_poll || !bio_poll(bio, NULL, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index bd4a41afbbfc..0ec0191d4196 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1176,7 +1176,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, return ret; } -static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx) +static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct rnbd_queue *q = hctx->driver_data; struct rnbd_clt_dev *dev = q->dev; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 896328271471..bb0482dfab3c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1092,7 +1092,7 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } -static int nvme_poll(struct blk_mq_hw_ctx *hctx) +static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_queue *nvmeq = hctx->driver_data; bool found; @@ -1274,7 +1274,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) * Did we miss an interrupt? */ if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) - nvme_poll(req->mq_hctx); + nvme_poll(req->mq_hctx, NULL); else nvme_poll_irqdisable(nvmeq); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 40317e1b9183..1624da3702d4 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2106,7 +2106,7 @@ unmap_qe: return ret; } -static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx) +static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_rdma_queue *queue = hctx->driver_data; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 3c1c29dd3020..9ce3458ee1dd 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2429,7 +2429,7 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set) return 0; } -static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) +static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 33fd9a01330c..30f7d0b4eb73 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1784,7 +1784,7 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, } -static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx) +static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct Scsi_Host *shost = hctx->driver_data; diff --git a/fs/io_uring.c b/fs/io_uring.c index c5066146b8de..cd77a137f2d8 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2483,7 +2483,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) break; - ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags); + ret = kiocb->ki_filp->f_op->iopoll(kiocb, NULL, poll_flags); if (unlikely(ret < 0)) return ret; else if (ret) diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 8efab177011d..83ecfba53abe 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -630,7 +630,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, break; if (!dio->submit.poll_bio || - !bio_poll(dio->submit.poll_bio, 0)) + !bio_poll(dio->submit.poll_bio, NULL, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9fb8618fb957..4c79439af2f2 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -532,7 +532,7 @@ struct blk_mq_ops { /** * @poll: Called to poll for completion of a specific tag. */ - int (*poll)(struct blk_mq_hw_ctx *); + int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); /** * @complete: Mark the request as complete. diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b0a322172965..fd9771a1da09 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -569,8 +569,9 @@ blk_status_t errno_to_blk_status(int errno); #define BLK_POLL_ONESHOT (1 << 0) /* do not sleep to wait for the expected completion time */ #define BLK_POLL_NOSLEEP (1 << 1) -int bio_poll(struct bio *bio, unsigned int flags); -int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags); +int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); +int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, + unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { @@ -1298,6 +1299,14 @@ int fsync_bdev(struct block_device *bdev); int freeze_bdev(struct block_device *bdev); int thaw_bdev(struct block_device *bdev); +struct io_comp_batch { + struct request *req_list; + bool need_ts; + void (*complete)(struct io_comp_batch *); +}; + +#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } + #define rq_list_add(listptr, rq) do { \ (rq)->rq_next = *(listptr); \ *(listptr) = rq; \ diff --git a/include/linux/fs.h b/include/linux/fs.h index f595f4097cb7..31029a91f440 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -48,6 +48,7 @@ struct backing_dev_info; struct bdi_writeback; struct bio; +struct io_comp_batch; struct export_operations; struct fiemap_extent_info; struct hd_geometry; @@ -2071,7 +2072,8 @@ struct file_operations { ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); - int (*iopoll)(struct kiocb *kiocb, unsigned int flags); + int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *, + unsigned int flags); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); diff --git a/mm/page_io.c b/mm/page_io.c index a68faab5b310..6010fb07f231 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -424,7 +424,7 @@ int swap_readpage(struct page *page, bool synchronous) if (!READ_ONCE(bio->bi_private)) break; - if (!bio_poll(bio, 0)) + if (!bio_poll(bio, NULL, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); From 1aec5e4a2962f7e0b3fb3e7308dd726be2472c26 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 8 Oct 2021 05:44:23 -0600 Subject: [PATCH 103/174] sbitmap: add helper to clear a batch of tags sbitmap currently only supports clearing tags one-by-one, add a helper that allows the caller to pass in an array of tags to clear. Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 11 +++++++++++ lib/sbitmap.c | 44 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 52 insertions(+), 3 deletions(-) diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index e30b56023ead..4a6ff274335a 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -528,6 +528,17 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu); +/** + * sbitmap_queue_clear_batch() - Free a batch of allocated bits + * &struct sbitmap_queue. + * @sbq: Bitmap to free from. + * @offset: offset for each tag in array + * @tags: array of tags + * @nr_tags: number of tags in array + */ +void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, + int *tags, int nr_tags); + static inline int sbq_index_inc(int index) { return (index + 1) & (SBQ_WAIT_QUEUES - 1); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index f398e0ae548e..c6e2f1f2c4d2 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -628,6 +628,46 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) } EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); +static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) +{ + if (likely(!sb->round_robin && tag < sb->depth)) + *per_cpu_ptr(sb->alloc_hint, cpu) = tag; +} + +void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, + int *tags, int nr_tags) +{ + struct sbitmap *sb = &sbq->sb; + unsigned long *addr = NULL; + unsigned long mask = 0; + int i; + + smp_mb__before_atomic(); + for (i = 0; i < nr_tags; i++) { + const int tag = tags[i] - offset; + unsigned long *this_addr; + + /* since we're clearing a batch, skip the deferred map */ + this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; + if (!addr) { + addr = this_addr; + } else if (addr != this_addr) { + atomic_long_andnot(mask, (atomic_long_t *) addr); + mask = 0; + addr = this_addr; + } + mask |= (1UL << SB_NR_TO_BIT(sb, tag)); + } + + if (mask) + atomic_long_andnot(mask, (atomic_long_t *) addr); + + smp_mb__after_atomic(); + sbitmap_queue_wake_up(sbq); + sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), + tags[nr_tags - 1] - offset); +} + void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu) { @@ -652,9 +692,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, */ smp_mb__after_atomic(); sbitmap_queue_wake_up(sbq); - - if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth)) - *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr; + sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); } EXPORT_SYMBOL_GPL(sbitmap_queue_clear); From f794f3351f2672d782b8df0fa59f3cef38cffa59 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 8 Oct 2021 05:50:46 -0600 Subject: [PATCH 104/174] block: add support for blk_mq_end_request_batch() Instead of calling blk_mq_end_request() on a single request, add a helper that takes the new struct io_comp_batch and completes any request stored in there. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 6 +++ block/blk-mq-tag.h | 1 + block/blk-mq.c | 84 ++++++++++++++++++++++++++++++++---------- include/linux/blk-mq.h | 29 +++++++++++++++ 4 files changed, 100 insertions(+), 20 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index c43b97201161..b94c3e8ef392 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -207,6 +207,12 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, } } +void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags) +{ + sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags, + tag_array, nr_tags); +} + struct bt_iter_data { struct blk_mq_hw_ctx *hctx; busy_iter_fn *fn; diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 71c2f7d8e9b7..78ae2fb8e2a4 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -42,6 +42,7 @@ unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, unsigned int *offset); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); +void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tags, unsigned int depth, bool can_grow); diff --git a/block/blk-mq.c b/block/blk-mq.c index 79c25b64e8b0..9248edd8a7d3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -300,15 +300,6 @@ void blk_mq_wake_waiters(struct request_queue *q) blk_mq_tag_wakeup_all(hctx->tags, true); } -/* - * Only need start/end time stamping if we have iostat or - * blk stats enabled, or using an IO scheduler. - */ -static inline bool blk_mq_need_time_stamp(struct request *rq) -{ - return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); -} - static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, unsigned int tag, u64 alloc_time_ns) { @@ -768,19 +759,21 @@ bool blk_update_request(struct request *req, blk_status_t error, } EXPORT_SYMBOL_GPL(blk_update_request); +static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) +{ + if (rq->rq_flags & RQF_STATS) { + blk_mq_poll_stats_start(rq->q); + blk_stat_add(rq, now); + } + + blk_mq_sched_completed_request(rq, now); + blk_account_io_done(rq, now); +} + inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { - if (blk_mq_need_time_stamp(rq)) { - u64 now = ktime_get_ns(); - - if (rq->rq_flags & RQF_STATS) { - blk_mq_poll_stats_start(rq->q); - blk_stat_add(rq, now); - } - - blk_mq_sched_completed_request(rq, now); - blk_account_io_done(rq, now); - } + if (blk_mq_need_time_stamp(rq)) + __blk_mq_end_request_acct(rq, ktime_get_ns()); if (rq->end_io) { rq_qos_done(rq->q, rq); @@ -799,6 +792,57 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); +#define TAG_COMP_BATCH 32 + +static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, + int *tag_array, int nr_tags) +{ + struct request_queue *q = hctx->queue; + + blk_mq_put_tags(hctx->tags, tag_array, nr_tags); + percpu_ref_put_many(&q->q_usage_counter, nr_tags); +} + +void blk_mq_end_request_batch(struct io_comp_batch *iob) +{ + int tags[TAG_COMP_BATCH], nr_tags = 0; + struct blk_mq_hw_ctx *last_hctx = NULL; + struct request *rq; + u64 now = 0; + + if (iob->need_ts) + now = ktime_get_ns(); + + while ((rq = rq_list_pop(&iob->req_list)) != NULL) { + prefetch(rq->bio); + prefetch(rq->rq_next); + + blk_update_request(rq, BLK_STS_OK, blk_rq_bytes(rq)); + if (iob->need_ts) + __blk_mq_end_request_acct(rq, now); + + WRITE_ONCE(rq->state, MQ_RQ_IDLE); + if (!refcount_dec_and_test(&rq->ref)) + continue; + + blk_crypto_free_request(rq); + blk_pm_mark_last_busy(rq); + rq_qos_done(rq->q, rq); + + if (nr_tags == TAG_COMP_BATCH || + (last_hctx && last_hctx != rq->mq_hctx)) { + blk_mq_flush_tag_batch(last_hctx, tags, nr_tags); + nr_tags = 0; + } + tags[nr_tags++] = rq->tag; + last_hctx = rq->mq_hctx; + } + + if (nr_tags) + blk_mq_flush_tag_batch(last_hctx, tags, nr_tags); +} +EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); + static void blk_complete_reqs(struct llist_head *list) { struct llist_node *entry = llist_reverse_order(llist_del_all(list)); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 4c79439af2f2..656fe34bdb6c 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -728,6 +728,35 @@ static inline void blk_mq_set_request_complete(struct request *rq) void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); void __blk_mq_end_request(struct request *rq, blk_status_t error); +void blk_mq_end_request_batch(struct io_comp_batch *ib); + +/* + * Only need start/end time stamping if we have iostat or + * blk stats enabled, or using an IO scheduler. + */ +static inline bool blk_mq_need_time_stamp(struct request *rq) +{ + return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); +} + +/* + * Batched completions only work when there is no I/O error and no special + * ->end_io handler. + */ +static inline bool blk_mq_add_to_batch(struct request *req, + struct io_comp_batch *iob, int ioerror, + void (*complete)(struct io_comp_batch *)) +{ + if (!iob || (req->rq_flags & RQF_ELV) || req->end_io || ioerror) + return false; + if (!iob->complete) + iob->complete = complete; + else if (iob->complete != complete) + return false; + iob->need_ts |= blk_mq_need_time_stamp(req); + rq_list_add(&iob->req_list, req); + return true; +} void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); From c234a65392062504acf04afe0ae404cca61a8e1a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 8 Oct 2021 05:59:37 -0600 Subject: [PATCH 105/174] nvme: add support for batched completion of polled IO Take advantage of struct io_comp_batch, if passed in to the nvme poll handler. If it's set, rather than complete each request individually inline, store them in the io_comp_batch list. We only do so for requests that will complete successfully, anything else will be completed inline as before. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 17 ++++++++++++++--- drivers/nvme/host/nvme.h | 14 ++++++++++++++ drivers/nvme/host/pci.c | 32 +++++++++++++++++++++++--------- 3 files changed, 51 insertions(+), 12 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ae15cb714596..3109bdf137e4 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -346,15 +346,19 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) return RETRY; } -static inline void nvme_end_req(struct request *req) +static inline void nvme_end_req_zoned(struct request *req) { - blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && req_op(req) == REQ_OP_ZONE_APPEND) req->__sector = nvme_lba_to_sect(req->q->queuedata, le64_to_cpu(nvme_req(req)->result.u64)); +} +static inline void nvme_end_req(struct request *req) +{ + blk_status_t status = nvme_error_status(nvme_req(req)->status); + + nvme_end_req_zoned(req); nvme_trace_bio_complete(req); blk_mq_end_request(req, status); } @@ -381,6 +385,13 @@ void nvme_complete_rq(struct request *req) } EXPORT_SYMBOL_GPL(nvme_complete_rq); +void nvme_complete_batch_req(struct request *req) +{ + nvme_cleanup_cmd(req); + nvme_end_req_zoned(req); +} +EXPORT_SYMBOL_GPL(nvme_complete_batch_req); + /* * Called to unwind from ->queue_rq on a failed command submission so that the * multipathing code gets called to potentially failover to another path. diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ed79a6c7e804..ef2467b93adb 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -638,6 +638,20 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) } void nvme_complete_rq(struct request *req); +void nvme_complete_batch_req(struct request *req); + +static __always_inline void nvme_complete_batch(struct io_comp_batch *iob, + void (*fn)(struct request *rq)) +{ + struct request *req; + + rq_list_for_each(&iob->req_list, req) { + fn(req); + nvme_complete_batch_req(req); + } + blk_mq_end_request_batch(iob); +} + blk_status_t nvme_host_path_error(struct request *req); bool nvme_cancel_request(struct request *req, void *data, bool reserved); void nvme_cancel_tagset(struct nvme_ctrl *ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bb0482dfab3c..83d3503d5b88 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -959,7 +959,7 @@ out_free_cmd: return ret; } -static void nvme_pci_complete_rq(struct request *req) +static __always_inline void nvme_pci_unmap_rq(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_dev *dev = iod->nvmeq->dev; @@ -969,9 +969,19 @@ static void nvme_pci_complete_rq(struct request *req) rq_integrity_vec(req)->bv_len, rq_data_dir(req)); if (blk_rq_nr_phys_segments(req)) nvme_unmap_data(dev, req); +} + +static void nvme_pci_complete_rq(struct request *req) +{ + nvme_pci_unmap_rq(req); nvme_complete_rq(req); } +static void nvme_pci_complete_batch(struct io_comp_batch *iob) +{ + nvme_complete_batch(iob, nvme_pci_unmap_rq); +} + /* We read the CQE phase first to check if the rest of the entry is valid */ static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) { @@ -996,7 +1006,8 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; } -static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) +static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, + struct io_comp_batch *iob, u16 idx) { struct nvme_completion *cqe = &nvmeq->cqes[idx]; __u16 command_id = READ_ONCE(cqe->command_id); @@ -1023,7 +1034,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) } trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); - if (!nvme_try_complete_req(req, cqe->status, cqe->result)) + if (!nvme_try_complete_req(req, cqe->status, cqe->result) && + !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + nvme_pci_complete_batch)) nvme_pci_complete_rq(req); } @@ -1039,7 +1052,8 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) } } -static inline int nvme_process_cq(struct nvme_queue *nvmeq) +static inline int nvme_poll_cq(struct nvme_queue *nvmeq, + struct io_comp_batch *iob) { int found = 0; @@ -1050,7 +1064,7 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq) * the cqe requires a full read memory barrier */ dma_rmb(); - nvme_handle_cqe(nvmeq, nvmeq->cq_head); + nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); nvme_update_cq_head(nvmeq); } @@ -1063,7 +1077,7 @@ static irqreturn_t nvme_irq(int irq, void *data) { struct nvme_queue *nvmeq = data; - if (nvme_process_cq(nvmeq)) + if (nvme_poll_cq(nvmeq, NULL)) return IRQ_HANDLED; return IRQ_NONE; } @@ -1088,7 +1102,7 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); - nvme_process_cq(nvmeq); + nvme_poll_cq(nvmeq, NULL); enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } @@ -1101,7 +1115,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) return 0; spin_lock(&nvmeq->cq_poll_lock); - found = nvme_process_cq(nvmeq); + found = nvme_poll_cq(nvmeq, iob); spin_unlock(&nvmeq->cq_poll_lock); return found; @@ -1434,7 +1448,7 @@ static void nvme_reap_pending_cqes(struct nvme_dev *dev) for (i = dev->ctrl.queue_count - 1; i > 0; i--) { spin_lock(&dev->queues[i].cq_poll_lock); - nvme_process_cq(&dev->queues[i]); + nvme_poll_cq(&dev->queues[i], NULL); spin_unlock(&dev->queues[i].cq_poll_lock); } } From b688f11e86c9a22169a0e522530982735d2db19b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 12 Oct 2021 09:28:46 -0600 Subject: [PATCH 106/174] io_uring: utilize the io batching infrastructure for more efficient polled IO Wire up using an io_comp_batch for f_op->iopoll(). If the lower stack supports it, we can handle high rates of polled IO more efficiently. This raises the single core efficiency on my system from ~6.1M IOPS to ~6.6M IOPS running a random read workload at depth 128 on two gen2 Optane drives. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index cd77a137f2d8..d4631a55a692 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2458,6 +2458,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, { struct io_kiocb *req, *tmp; unsigned int poll_flags = BLK_POLL_NOSLEEP; + DEFINE_IO_COMP_BATCH(iob); LIST_HEAD(done); /* @@ -2483,17 +2484,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) break; - ret = kiocb->ki_filp->f_op->iopoll(kiocb, NULL, poll_flags); + ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags); if (unlikely(ret < 0)) return ret; else if (ret) poll_flags |= BLK_POLL_ONESHOT; /* iopoll may have completed current req */ - if (READ_ONCE(req->iopoll_completed)) + if (!rq_list_empty(iob.req_list) || + READ_ONCE(req->iopoll_completed)) list_move_tail(&req->inflight_entry, &done); } + if (!rq_list_empty(iob.req_list)) + iob.complete(&iob); if (!list_empty(&done)) io_iopoll_complete(ctx, nr_events, &done); From 4f5022453acd0f7b28012e20b7d048470f129894 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Oct 2021 08:45:39 -0600 Subject: [PATCH 107/174] nvme: wire up completion batching for the IRQ path Trivial to do now, just need our own io_comp_batch on the stack and pass that in to the usual command completion handling. I pondered making this dependent on how many entries we had to process, but even for a single entry there's no discernable difference in performance or latency. Running a sync workload over io_uring: t/io_uring -b512 -d1 -s1 -c1 -p0 -F1 -B1 -n2 /dev/nvme1n1 /dev/nvme2n1 yields the below performance before the patch: IOPS=254820, BW=124MiB/s, IOS/call=1/1, inflight=(1 1) IOPS=251174, BW=122MiB/s, IOS/call=1/1, inflight=(1 1) IOPS=250806, BW=122MiB/s, IOS/call=1/1, inflight=(1 1) and the following after: IOPS=255972, BW=124MiB/s, IOS/call=1/1, inflight=(1 1) IOPS=251920, BW=123MiB/s, IOS/call=1/1, inflight=(1 1) IOPS=251794, BW=122MiB/s, IOS/call=1/1, inflight=(1 1) which definitely isn't slower, about the same if you factor in a bit of variance. For peak performance workloads, benchmarking shows a 2% improvement. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 83d3503d5b88..ed684874842f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1076,9 +1076,13 @@ static inline int nvme_poll_cq(struct nvme_queue *nvmeq, static irqreturn_t nvme_irq(int irq, void *data) { struct nvme_queue *nvmeq = data; + DEFINE_IO_COMP_BATCH(iob); - if (nvme_poll_cq(nvmeq, NULL)) + if (nvme_poll_cq(nvmeq, &iob)) { + if (!rq_list_empty(iob.req_list)) + nvme_pci_complete_batch(&iob); return IRQ_HANDLED; + } return IRQ_NONE; } From e0d78afeb8d190164a823d5ef5821b0b3802af33 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Oct 2021 20:54:39 -0600 Subject: [PATCH 108/174] block: fix too broad elevator check in blk_mq_free_request() We added RQF_ELV to tell whether there's an IO scheduler attached, and RQF_ELVPRIV tells us whether there's an IO scheduler with private data attached. Don't check RQF_ELV in blk_mq_free_request(), what we care about here is just if we have scheduler private data attached. This fixes a boot crash Fixes: 2ff0682da6e0 ("block: store elevator state in request") Reported-by: Yi Zhang Reported-by: syzbot+eb8104072aeab6cc1195@syzkaller.appspotmail.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 9248edd8a7d3..bbc61394eef3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -580,7 +580,7 @@ void blk_mq_free_request(struct request *rq) struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - if (rq->rq_flags & (RQF_ELVPRIV | RQF_ELV)) { + if (rq->rq_flags & RQF_ELVPRIV) { struct elevator_queue *e = q->elevator; if (e->type->ops.finish_request) From db9a02baa23267c695a44234a0f2f4607992780e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 6 Oct 2021 06:15:04 -0600 Subject: [PATCH 109/174] block: move bdev_read_only() into the header This is called for every write in the fast path, move it inline next to get_disk_ro() which is called internally. Reviewed-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Signed-off-by: Jens Axboe --- block/genhd.c | 6 ------ include/linux/genhd.h | 5 +++++ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/block/genhd.c b/block/genhd.c index 759bc06810f8..80943c123c3e 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1389,12 +1389,6 @@ void set_disk_ro(struct gendisk *disk, bool read_only) } EXPORT_SYMBOL(set_disk_ro); -int bdev_read_only(struct block_device *bdev) -{ - return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); -} -EXPORT_SYMBOL(bdev_read_only); - void inc_diskseq(struct gendisk *disk) { disk->diskseq = atomic64_inc_return(&diskseq); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index cd4038fd5743..c70bc5fce4db 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -221,6 +221,11 @@ static inline int get_disk_ro(struct gendisk *disk) test_bit(GD_READ_ONLY, &disk->state); } +static inline int bdev_read_only(struct block_device *bdev) +{ + return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); +} + extern void disk_block_events(struct gendisk *disk); extern void disk_unblock_events(struct gendisk *disk); extern void disk_flush_events(struct gendisk *disk, unsigned int mask); From 8a7d267b4a2c71a5ff5dd9046abea7117c7d0ac2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 18 Oct 2021 10:45:18 +0200 Subject: [PATCH 110/174] block: don't call blk_status_to_errno in blk_update_request We only need to call it to resolve the blk_status_t -> errno mapping for tracing, so move the conversion into the tracepoints that are not called at all when tracing isn't enabled. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- include/trace/events/block.h | 6 +++--- kernel/trace/blktrace.c | 7 ++++--- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index bbc61394eef3..59809ec24303 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -680,7 +680,7 @@ bool blk_update_request(struct request *req, blk_status_t error, { int total_bytes; - trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); + trace_block_rq_complete(req, error, nr_bytes); if (!req->bio) return false; diff --git a/include/trace/events/block.h b/include/trace/events/block.h index cc5ab96a7471..a95daa4d4caa 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -114,7 +114,7 @@ TRACE_EVENT(block_rq_requeue, */ TRACE_EVENT(block_rq_complete, - TP_PROTO(struct request *rq, int error, unsigned int nr_bytes), + TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), TP_ARGS(rq, error, nr_bytes), @@ -122,7 +122,7 @@ TRACE_EVENT(block_rq_complete, __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) - __field( int, error ) + __field( int , error ) __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, 1 ) ), @@ -131,7 +131,7 @@ TRACE_EVENT(block_rq_complete, __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; __entry->sector = blk_rq_pos(rq); __entry->nr_sector = nr_bytes >> 9; - __entry->error = error; + __entry->error = blk_status_to_errno(error); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index fa91f398f28b..1183c88634aa 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -816,7 +816,7 @@ blk_trace_request_get_cgid(struct request *rq) * Records an action against a request. Will log the bio offset + size. * **/ -static void blk_add_trace_rq(struct request *rq, int error, +static void blk_add_trace_rq(struct request *rq, blk_status_t error, unsigned int nr_bytes, u32 what, u64 cgid) { struct blk_trace *bt; @@ -834,7 +834,8 @@ static void blk_add_trace_rq(struct request *rq, int error, what |= BLK_TC_ACT(BLK_TC_FS); __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), - rq->cmd_flags, what, error, 0, NULL, cgid); + rq->cmd_flags, what, blk_status_to_errno(error), 0, + NULL, cgid); rcu_read_unlock(); } @@ -863,7 +864,7 @@ static void blk_add_trace_rq_requeue(void *ignore, struct request *rq) } static void blk_add_trace_rq_complete(void *ignore, struct request *rq, - int error, unsigned int nr_bytes) + blk_status_t error, unsigned int nr_bytes) { blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE, blk_trace_request_get_cgid(rq)); From 87c037d11b83b93e9ab5eda9fb03c114f67024ff Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Oct 2021 10:07:09 -0600 Subject: [PATCH 111/174] block: return whether or not to unplug through boolean Instead of returning the same queue request through a request pointer, use a boolean to accomplish the same. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-merge.c | 11 +++++------ block/blk-mq.c | 16 +++++++++------- block/blk.h | 2 +- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index ec727234ac48..c273b58378ce 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -1067,9 +1067,8 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, * @q: request_queue new bio is being queued at * @bio: new bio being queued * @nr_segs: number of segments in @bio - * @same_queue_rq: pointer to &struct request that gets filled in when - * another request associated with @q is found on the plug list - * (optional, may be %NULL) + * @same_queue_rq: output value, will be true if there's an existing request + * from the passed in @q already in the plug list * * Determine whether @bio being queued on @q can be merged with the previous * request on %current's plugged list. Returns %true if merge was successful, @@ -1085,7 +1084,7 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, * Caller must ensure !blk_queue_nomerges(q) beforehand. */ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, - unsigned int nr_segs, struct request **same_queue_rq) + unsigned int nr_segs, bool *same_queue_rq) { struct blk_plug *plug; struct request *rq; @@ -1096,12 +1095,12 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, /* check the previously added entry for a quick merge attempt */ rq = list_last_entry(&plug->mq_list, struct request, queuelist); - if (rq->q == q && same_queue_rq) { + if (rq->q == q) { /* * Only blk-mq multiple hardware queues case checks the rq in * the same queue, there should be only one such rq in a queue */ - *same_queue_rq = rq; + *same_queue_rq = true; } if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == BIO_MERGE_OK) return true; diff --git a/block/blk-mq.c b/block/blk-mq.c index 59809ec24303..335ec3a7eab7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2432,7 +2432,7 @@ void blk_mq_submit_bio(struct bio *bio) const int is_flush_fua = op_is_flush(bio->bi_opf); struct request *rq; struct blk_plug *plug; - struct request *same_queue_rq = NULL; + bool same_queue_rq = false; unsigned int nr_segs = 1; blk_status_t ret; @@ -2525,6 +2525,8 @@ void blk_mq_submit_bio(struct bio *bio) /* Insert the request at the IO scheduler queue */ blk_mq_sched_insert_request(rq, false, true, true); } else if (plug && !blk_queue_nomerges(q)) { + struct request *next_rq = NULL; + /* * We do limited plugging. If the bio can be merged, do that. * Otherwise the existing request in the plug list will be @@ -2532,19 +2534,19 @@ void blk_mq_submit_bio(struct bio *bio) * The plug list might get flushed before this. If that happens, * the plug list is empty, and same_queue_rq is invalid. */ - if (list_empty(&plug->mq_list)) - same_queue_rq = NULL; if (same_queue_rq) { - list_del_init(&same_queue_rq->queuelist); + next_rq = list_last_entry(&plug->mq_list, + struct request, + queuelist); + list_del_init(&next_rq->queuelist); plug->rq_count--; } blk_add_rq_to_plug(plug, rq); trace_block_plug(q); - if (same_queue_rq) { + if (next_rq) { trace_block_unplug(q, 1, true); - blk_mq_try_issue_directly(same_queue_rq->mq_hctx, - same_queue_rq); + blk_mq_try_issue_directly(next_rq->mq_hctx, next_rq); } } else if ((q->nr_hw_queues > 1 && is_sync) || !rq->mq_hctx->dispatch_busy) { diff --git a/block/blk.h b/block/blk.h index e80350327e6d..b9729c12fd62 100644 --- a/block/blk.h +++ b/block/blk.h @@ -218,7 +218,7 @@ void blk_add_timer(struct request *req); void blk_print_req_error(struct request *req, blk_status_t status); bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, - unsigned int nr_segs, struct request **same_queue_rq); + unsigned int nr_segs, bool *same_queue_rq); bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); From df87eb0fce8fc891b43199447b9aeb3ea2d39bcf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Oct 2021 10:08:49 -0600 Subject: [PATCH 112/174] block: get rid of plug list sorting Even if we have multiple queues in the plug list, chances that they are very interspersed is minimal. Don't bother spending CPU cycles sorting the list. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 335ec3a7eab7..104019c0ea41 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -2161,20 +2160,6 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, spin_unlock(&ctx->lock); } -static int plug_rq_cmp(void *priv, const struct list_head *a, - const struct list_head *b) -{ - struct request *rqa = container_of(a, struct request, queuelist); - struct request *rqb = container_of(b, struct request, queuelist); - - if (rqa->mq_ctx != rqb->mq_ctx) - return rqa->mq_ctx > rqb->mq_ctx; - if (rqa->mq_hctx != rqb->mq_hctx) - return rqa->mq_hctx > rqb->mq_hctx; - - return blk_rq_pos(rqa) > blk_rq_pos(rqb); -} - void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) { LIST_HEAD(list); @@ -2182,10 +2167,6 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) if (list_empty(&plug->mq_list)) return; list_splice_init(&plug->mq_list, &list); - - if (plug->rq_count > 2 && plug->multiple_queues) - list_sort(NULL, &list, plug_rq_cmp); - plug->rq_count = 0; do { From e028f167eca5fc56938e6c1680d40eed0bc39e80 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 16 Oct 2021 16:38:14 -0600 Subject: [PATCH 113/174] block: move blk_mq_tag_to_rq() inline This is in the fast path of driver issue or completion, and it's a single array index operation. Move it inline to avoid a function call for it. This does mean making struct blk_mq_tags block layer public, but there's not really much in there. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-tag.h | 23 ----------------------- block/blk-mq.c | 11 ----------- include/linux/blk-mq.h | 36 +++++++++++++++++++++++++++++++++++- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 78ae2fb8e2a4..df787b5a23bd 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -4,29 +4,6 @@ struct blk_mq_alloc_data; -/* - * Tag address space map. - */ -struct blk_mq_tags { - unsigned int nr_tags; - unsigned int nr_reserved_tags; - - atomic_t active_queues; - - struct sbitmap_queue bitmap_tags; - struct sbitmap_queue breserved_tags; - - struct request **rqs; - struct request **static_rqs; - struct list_head page_list; - - /* - * used to clear request reference in rqs[] before freeing one - * request pool - */ - spinlock_t lock; -}; - extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy); diff --git a/block/blk-mq.c b/block/blk-mq.c index 104019c0ea41..8f5c1662335b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1120,17 +1120,6 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q, } EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); -struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) -{ - if (tag < tags->nr_tags) { - prefetch(tags->rqs[tag]); - return tags->rqs[tag]; - } - - return NULL; -} -EXPORT_SYMBOL(blk_mq_tag_to_rq); - static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 656fe34bdb6c..6cf35de151a9 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -7,6 +7,7 @@ #include #include #include +#include struct blk_mq_tags; struct blk_flush_queue; @@ -675,7 +676,40 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, struct request *blk_mq_alloc_request_hctx(struct request_queue *q, unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx); -struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); + +/* + * Tag address space map. + */ +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + + atomic_t active_queues; + + struct sbitmap_queue bitmap_tags; + struct sbitmap_queue breserved_tags; + + struct request **rqs; + struct request **static_rqs; + struct list_head page_list; + + /* + * used to clear request reference in rqs[] before freeing one + * request pool + */ + spinlock_t lock; +}; + +static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, + unsigned int tag) +{ + if (tag < tags->nr_tags) { + prefetch(tags->rqs[tag]); + return tags->rqs[tag]; + } + + return NULL; +} enum { BLK_MQ_UNIQUE_TAG_BITS = 16, From 6155631a0c3b7ca795a0edb44894e58dc3e3d798 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 Oct 2021 16:55:05 -0600 Subject: [PATCH 114/174] block: align blkdev_dio inlined bio to a cacheline We get all sorts of unreliable and funky results since the bio is designed to align on a cacheline, which it does not when inlined like this. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/fops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/fops.c b/block/fops.c index 2c43e493e37c..21d25ee0e4bf 100644 --- a/block/fops.c +++ b/block/fops.c @@ -137,7 +137,7 @@ struct blkdev_dio { size_t size; atomic_t ref; unsigned int flags; - struct bio bio; + struct bio bio ____cacheline_aligned_in_smp; }; static struct bio_set blkdev_dio_pool; From 480d42dc001bbfe953825a92073012fcd5a99161 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Tue, 19 Oct 2021 11:20:26 +0200 Subject: [PATCH 115/174] blk-wbt: prevent NULL pointer dereference in wb_timer_fn The timer callback used to evaluate if the latency is exceeded can be executed after the corresponding disk has been released, causing the following NULL pointer dereference: [ 119.987108] BUG: kernel NULL pointer dereference, address: 0000000000000098 [ 119.987617] #PF: supervisor read access in kernel mode [ 119.987971] #PF: error_code(0x0000) - not-present page [ 119.988325] PGD 7c4a4067 P4D 7c4a4067 PUD 7bf63067 PMD 0 [ 119.988697] Oops: 0000 [#1] SMP NOPTI [ 119.988959] CPU: 1 PID: 9353 Comm: cloud-init Not tainted 5.15-rc5+arighi #rc5+arighi [ 119.989520] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.14.0-2 04/01/2014 [ 119.990055] RIP: 0010:wb_timer_fn+0x44/0x3c0 [ 119.990376] Code: 41 8b 9c 24 98 00 00 00 41 8b 94 24 b8 00 00 00 41 8b 84 24 d8 00 00 00 4d 8b 74 24 28 01 d3 01 c3 49 8b 44 24 60 48 8b 40 78 <4c> 8b b8 98 00 00 00 4d 85 f6 0f 84 c4 00 00 00 49 83 7c 24 30 00 [ 119.991578] RSP: 0000:ffffb5f580957da8 EFLAGS: 00010246 [ 119.991937] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000004 [ 119.992412] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff88f476d7f780 [ 119.992895] RBP: ffffb5f580957dd0 R08: 0000000000000000 R09: 0000000000000000 [ 119.993371] R10: 0000000000000004 R11: 0000000000000002 R12: ffff88f476c84500 [ 119.993847] R13: ffff88f4434390c0 R14: 0000000000000000 R15: ffff88f4bdc98c00 [ 119.994323] FS: 00007fb90bcd9c00(0000) GS:ffff88f4bdc80000(0000) knlGS:0000000000000000 [ 119.994952] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 119.995380] CR2: 0000000000000098 CR3: 000000007c0d6000 CR4: 00000000000006e0 [ 119.995906] Call Trace: [ 119.996130] ? blk_stat_free_callback_rcu+0x30/0x30 [ 119.996505] blk_stat_timer_fn+0x138/0x140 [ 119.996830] call_timer_fn+0x2b/0x100 [ 119.997136] __run_timers.part.0+0x1d1/0x240 [ 119.997470] ? kvm_clock_get_cycles+0x11/0x20 [ 119.997826] ? ktime_get+0x3e/0xa0 [ 119.998110] ? native_apic_msr_write+0x2c/0x30 [ 119.998456] ? lapic_next_event+0x20/0x30 [ 119.998779] ? clockevents_program_event+0x94/0xf0 [ 119.999150] run_timer_softirq+0x2a/0x50 [ 119.999465] __do_softirq+0xcb/0x26f [ 119.999764] irq_exit_rcu+0x8c/0xb0 [ 120.000057] sysvec_apic_timer_interrupt+0x43/0x90 [ 120.000429] ? asm_sysvec_apic_timer_interrupt+0xa/0x20 [ 120.000836] asm_sysvec_apic_timer_interrupt+0x12/0x20 In this case simply return from the timer callback (no action required) to prevent the NULL pointer dereference. BugLink: https://bugs.launchpad.net/bugs/1947557 Link: https://lore.kernel.org/linux-mm/YWRNVTk9N8K0RMst@arighi-desktop/ Fixes: 34dbad5d26e2 ("blk-stat: convert to callback-based statistics reporting") Signed-off-by: Andrea Righi Link: https://lore.kernel.org/r/YW6N2qXpBU3oc50q@arighi-desktop Signed-off-by: Jens Axboe --- block/blk-wbt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 874c1c37bf0c..0c119be0e813 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -357,6 +357,9 @@ static void wb_timer_fn(struct blk_stat_callback *cb) unsigned int inflight = wbt_inflight(rwb); int status; + if (!rwb->rqos.q->disk) + return; + status = latency_exceeded(rwb, cb->stat); trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step, From bc490f81731e181b07b8d7577425c06ae91692c8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Oct 2021 10:12:12 -0600 Subject: [PATCH 116/174] block: change plugging to use a singly linked list Use a singly linked list for the blk_plug. This saves 8 bytes in the blk_plug struct, and makes for faster list manipulations than doubly linked lists. As we don't use the doubly linked lists for anything, singly linked is just fine. This yields a bump in default (merging enabled) performance from 7.0 to 7.1M IOPS, and ~7.5M IOPS with merging disabled. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 4 +-- block/blk-merge.c | 4 +-- block/blk-mq.c | 78 ++++++++++++++++++++++++------------------ include/linux/blkdev.h | 5 ++- 4 files changed, 50 insertions(+), 41 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index d0c2e11411d0..14d20909f61a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1550,7 +1550,7 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) if (tsk->plug) return; - INIT_LIST_HEAD(&plug->mq_list); + plug->mq_list = NULL; plug->cached_rq = NULL; plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); plug->rq_count = 0; @@ -1640,7 +1640,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { flush_plug_callbacks(plug, from_schedule); - if (!list_empty(&plug->mq_list)) + if (!rq_list_empty(plug->mq_list)) blk_mq_flush_plug_list(plug, from_schedule); if (unlikely(!from_schedule && plug->cached_rq)) blk_mq_free_plug_rqs(plug); diff --git a/block/blk-merge.c b/block/blk-merge.c index c273b58378ce..3e6fa449caff 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -1090,11 +1090,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, struct request *rq; plug = blk_mq_plug(q, bio); - if (!plug || list_empty(&plug->mq_list)) + if (!plug || rq_list_empty(plug->mq_list)) return false; /* check the previously added entry for a quick merge attempt */ - rq = list_last_entry(&plug->mq_list, struct request, queuelist); + rq = rq_list_peek(&plug->mq_list); if (rq->q == q) { /* * Only blk-mq multiple hardware queues case checks the rq in diff --git a/block/blk-mq.c b/block/blk-mq.c index 8f5c1662335b..7fa302730d4a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2151,34 +2151,46 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) { + struct blk_mq_hw_ctx *this_hctx; + struct blk_mq_ctx *this_ctx; + unsigned int depth; LIST_HEAD(list); - if (list_empty(&plug->mq_list)) + if (rq_list_empty(plug->mq_list)) return; - list_splice_init(&plug->mq_list, &list); plug->rq_count = 0; + this_hctx = NULL; + this_ctx = NULL; + depth = 0; do { - struct list_head rq_list; - struct request *rq, *head_rq = list_entry_rq(list.next); - struct list_head *pos = &head_rq->queuelist; /* skip first */ - struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx; - struct blk_mq_ctx *this_ctx = head_rq->mq_ctx; - unsigned int depth = 1; + struct request *rq; + + rq = rq_list_pop(&plug->mq_list); + + if (!this_hctx) { + this_hctx = rq->mq_hctx; + this_ctx = rq->mq_ctx; + } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) { + trace_block_unplug(this_hctx->queue, depth, + !from_schedule); + blk_mq_sched_insert_requests(this_hctx, this_ctx, + &list, from_schedule); + depth = 0; + this_hctx = rq->mq_hctx; + this_ctx = rq->mq_ctx; - list_for_each_continue(pos, &list) { - rq = list_entry_rq(pos); - BUG_ON(!rq->q); - if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) - break; - depth++; } - list_cut_before(&rq_list, &list, pos); - trace_block_unplug(head_rq->q, depth, !from_schedule); - blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list, + list_add(&rq->queuelist, &list); + depth++; + } while (!rq_list_empty(plug->mq_list)); + + if (!list_empty(&list)) { + trace_block_unplug(this_hctx->queue, depth, !from_schedule); + blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_schedule); - } while(!list_empty(&list)); + } } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, @@ -2358,16 +2370,15 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) { - list_add_tail(&rq->queuelist, &plug->mq_list); - plug->rq_count++; - if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) { - struct request *tmp; + if (!plug->multiple_queues) { + struct request *nxt = rq_list_peek(&plug->mq_list); - tmp = list_first_entry(&plug->mq_list, struct request, - queuelist); - if (tmp->q != rq->q) + if (nxt && nxt->q != rq->q) plug->multiple_queues = true; } + rq->rq_next = NULL; + rq_list_add(&plug->mq_list, rq); + plug->rq_count++; } /* @@ -2479,13 +2490,15 @@ void blk_mq_submit_bio(struct bio *bio) unsigned int request_count = plug->rq_count; struct request *last = NULL; - if (!request_count) + if (!request_count) { trace_block_plug(q); - else - last = list_entry_rq(plug->mq_list.prev); + } else if (!blk_queue_nomerges(q)) { + last = rq_list_peek(&plug->mq_list); + if (blk_rq_bytes(last) < BLK_PLUG_FLUSH_SIZE) + last = NULL; + } - if (request_count >= blk_plug_max_rq_count(plug) || (last && - blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { + if (request_count >= blk_plug_max_rq_count(plug) || last) { blk_flush_plug_list(plug, false); trace_block_plug(q); } @@ -2505,10 +2518,7 @@ void blk_mq_submit_bio(struct bio *bio) * the plug list is empty, and same_queue_rq is invalid. */ if (same_queue_rq) { - next_rq = list_last_entry(&plug->mq_list, - struct request, - queuelist); - list_del_init(&next_rq->queuelist); + next_rq = rq_list_pop(&plug->mq_list); plug->rq_count--; } blk_add_rq_to_plug(plug, rq); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index fd9771a1da09..4027112b9851 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -728,7 +728,7 @@ extern void blk_set_queue_dying(struct request_queue *); * schedule() where blk_schedule_flush_plug() is called. */ struct blk_plug { - struct list_head mq_list; /* blk-mq requests */ + struct request *mq_list; /* blk-mq requests */ /* if ios_left is > 1, we can batch tag/rq allocations */ struct request *cached_rq; @@ -777,8 +777,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) struct blk_plug *plug = tsk->plug; return plug && - (!list_empty(&plug->mq_list) || - !list_empty(&plug->cb_list)); + (plug->mq_list || !list_empty(&plug->cb_list)); } int blkdev_issue_flush(struct block_device *bdev); From dc5fc361d891e089dfd9c0a975dc78041036b906 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 19 Oct 2021 06:02:30 -0600 Subject: [PATCH 117/174] block: attempt direct issue of plug list If we have just one queue type in the plug list, then we can extend our direct issue to cover a full plug list as well. This allows sending a batch of requests for direct issue, which is more efficient than doing one-at-a-time kind of issue. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 1 + block/blk-mq.c | 60 ++++++++++++++++++++++++++++++++++++++++++ include/linux/blkdev.h | 1 + 3 files changed, 62 insertions(+) diff --git a/block/blk-core.c b/block/blk-core.c index 14d20909f61a..e6ad5b51d0c3 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1555,6 +1555,7 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); plug->rq_count = 0; plug->multiple_queues = false; + plug->has_elevator = false; plug->nowait = false; INIT_LIST_HEAD(&plug->cb_list); diff --git a/block/blk-mq.c b/block/blk-mq.c index 7fa302730d4a..71ab7521dd3d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2149,6 +2149,58 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, spin_unlock(&ctx->lock); } +static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued, + bool from_schedule) +{ + if (hctx->queue->mq_ops->commit_rqs) { + trace_block_unplug(hctx->queue, *queued, !from_schedule); + hctx->queue->mq_ops->commit_rqs(hctx); + } + *queued = 0; +} + +static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) +{ + struct blk_mq_hw_ctx *hctx = NULL; + struct request *rq; + int queued = 0; + int errors = 0; + + while ((rq = rq_list_pop(&plug->mq_list))) { + bool last = rq_list_empty(plug->mq_list); + blk_status_t ret; + + if (hctx != rq->mq_hctx) { + if (hctx) + blk_mq_commit_rqs(hctx, &queued, from_schedule); + hctx = rq->mq_hctx; + } + + ret = blk_mq_request_issue_directly(rq, last); + switch (ret) { + case BLK_STS_OK: + queued++; + break; + case BLK_STS_RESOURCE: + case BLK_STS_DEV_RESOURCE: + blk_mq_request_bypass_insert(rq, false, last); + blk_mq_commit_rqs(hctx, &queued, from_schedule); + return; + default: + blk_mq_end_request(rq, ret); + errors++; + break; + } + } + + /* + * If we didn't flush the entire list, we could have told the driver + * there was more coming, but that turned out to be a lie. + */ + if (errors) + blk_mq_commit_rqs(hctx, &queued, from_schedule); +} + void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct blk_mq_hw_ctx *this_hctx; @@ -2160,6 +2212,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) return; plug->rq_count = 0; + if (!plug->multiple_queues && !plug->has_elevator) { + blk_mq_plug_issue_direct(plug, from_schedule); + if (rq_list_empty(plug->mq_list)) + return; + } + this_hctx = NULL; this_ctx = NULL; depth = 0; @@ -2376,6 +2434,8 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) if (nxt && nxt->q != rq->q) plug->multiple_queues = true; } + if (!plug->has_elevator && (rq->rq_flags & RQF_ELV)) + plug->has_elevator = true; rq->rq_next = NULL; rq_list_add(&plug->mq_list, rq); plug->rq_count++; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4027112b9851..f13091d3d476 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -737,6 +737,7 @@ struct blk_plug { unsigned short rq_count; bool multiple_queues; + bool has_elevator; bool nowait; struct list_head cb_list; /* md requires an unplug callback */ From d92ca9d8348fb12c89eac5928bd651c3a485d7b9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 19 Oct 2021 14:25:53 +0200 Subject: [PATCH 118/174] blk-mq: don't handle non-flush requests in blk_insert_flush Return to the normal blk_mq_submit_bio flow if the bio did not end up actually being a flush because the device didn't support it. Note that this is basically impossible to hit without special instrumentation given that submit_bio_checks already clears these flags usually, so we'd need a tight race to actually hit this code path. With this the call to blk_mq_run_hw_queue for the flush requests can be removed given that the actual flush requests are always issued via the requeue workqueue which runs the queue unconditionally. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211019122553.2467817-1-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-flush.c | 12 ++++++------ block/blk-mq.c | 14 ++++++-------- block/blk.h | 2 +- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/block/blk-flush.c b/block/blk-flush.c index 4201728bf3a5..8e364bda5166 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) * @rq is being submitted. Analyze what needs to be done and put it on the * right queue. */ -void blk_insert_flush(struct request *rq) +bool blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; unsigned long fflags = q->queue_flags; /* may change, cache */ @@ -409,7 +409,7 @@ void blk_insert_flush(struct request *rq) */ if (!policy) { blk_mq_end_request(rq, 0); - return; + return true; } BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ @@ -420,10 +420,8 @@ void blk_insert_flush(struct request *rq) * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && - !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { - blk_mq_request_bypass_insert(rq, false, false); - return; - } + !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) + return false; /* * @rq should go through flush machinery. Mark it part of flush @@ -439,6 +437,8 @@ void blk_insert_flush(struct request *rq) spin_lock_irq(&fq->mq_flush_lock); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); spin_unlock_irq(&fq->mq_flush_lock); + + return true; } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index 71ab7521dd3d..3481a8712234 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2532,14 +2532,12 @@ void blk_mq_submit_bio(struct bio *bio) return; } - if (unlikely(is_flush_fua)) { - struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - /* Bypass scheduler for flush requests */ - blk_insert_flush(rq); - blk_mq_run_hw_queue(hctx, true); - } else if (plug && (q->nr_hw_queues == 1 || - blk_mq_is_shared_tags(rq->mq_hctx->flags) || - q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { + if (is_flush_fua && blk_insert_flush(rq)) + return; + + if (plug && (q->nr_hw_queues == 1 || + blk_mq_is_shared_tags(rq->mq_hctx->flags) || + q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { /* * Use plugging if we have a ->commit_rqs() hook as well, as * we know the driver uses bd->last in a smart fashion. diff --git a/block/blk.h b/block/blk.h index b9729c12fd62..6a039e6c7d07 100644 --- a/block/blk.h +++ b/block/blk.h @@ -236,7 +236,7 @@ void __blk_account_io_done(struct request *req, u64 now); */ #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) -void blk_insert_flush(struct request *rq); +bool blk_insert_flush(struct request *rq); int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e); From a808a9d545cdffb964f27239d1fc0c6e2330b424 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Oct 2021 08:28:14 -0600 Subject: [PATCH 119/174] block: inline fast path of driver tag allocation If we don't use an IO scheduler or have shared tags, then we don't need to call into this external function at all. This saves ~2% for such a setup. Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 +++----- block/blk-mq.h | 15 ++++++++++++++- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 3481a8712234..bf5936d72de8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1324,7 +1324,7 @@ struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, return data.rq; } -static bool __blk_mq_get_driver_tag(struct request *rq) +static bool __blk_mq_alloc_driver_tag(struct request *rq) { struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; @@ -1348,11 +1348,9 @@ static bool __blk_mq_get_driver_tag(struct request *rq) return true; } -bool blk_mq_get_driver_tag(struct request *rq) +bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - - if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq)) + if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq)) return false; if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && diff --git a/block/blk-mq.h b/block/blk-mq.h index ebf67f4d4f2e..d8ccb341e82e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -258,7 +258,20 @@ static inline void blk_mq_put_driver_tag(struct request *rq) __blk_mq_put_driver_tag(rq->mq_hctx, rq); } -bool blk_mq_get_driver_tag(struct request *rq); +bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq); + +static inline bool blk_mq_get_driver_tag(struct request *rq) +{ + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; + + if (rq->tag != BLK_MQ_NO_TAG && + !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { + hctx->tags->rqs[rq->tag] = rq; + return true; + } + + return __blk_mq_get_driver_tag(hctx, rq); +} static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) { From 2fc428f6b7ca80794cb9928c90d4de524366659f Mon Sep 17 00:00:00 2001 From: Zheng Liang Date: Mon, 18 Oct 2021 10:42:25 +0800 Subject: [PATCH 120/174] block, bfq: fix UAF problem in bfqg_stats_init() In bfq_pd_alloc(), the function bfqg_stats_init() init bfqg. If blkg_rwstat_init() init bfqg_stats->bytes successful and init bfqg_stats->ios failed, bfqg_stats_init() return failed, bfqg will be freed. But blkg_rwstat->cpu_cnt is not deleted from the list of percpu_counters. If we traverse the list of percpu_counters, It will have UAF problem. we should use blkg_rwstat_exit() to cleanup bfqg_stats bytes in the above scenario. Fixes: commit fd41e60331b ("bfq-iosched: stop using blkg->stat_bytes and ->stat_ios") Signed-off-by: Zheng Liang Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20211018024225.1493938-1-zhengliang6@huawei.com Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 882ec4bc51ad..24a5c5329bcd 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -463,7 +463,7 @@ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) { if (blkg_rwstat_init(&stats->bytes, gfp) || blkg_rwstat_init(&stats->ios, gfp)) - return -ENOMEM; + goto error; #ifdef CONFIG_BFQ_CGROUP_DEBUG if (blkg_rwstat_init(&stats->merged, gfp) || @@ -476,13 +476,15 @@ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) bfq_stat_init(&stats->dequeue, gfp) || bfq_stat_init(&stats->group_wait_time, gfp) || bfq_stat_init(&stats->idle_time, gfp) || - bfq_stat_init(&stats->empty_time, gfp)) { - bfqg_stats_exit(stats); - return -ENOMEM; - } + bfq_stat_init(&stats->empty_time, gfp)) + goto error; #endif return 0; + +error: + bfqg_stats_exit(stats); + return -ENOMEM; } static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) From a277654bafb51fb8b4cf23550f15926bb02536f4 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 14 Oct 2021 16:17:05 +0800 Subject: [PATCH 121/174] nvme: add APIs for stopping/starting admin queue Add two APIs for stopping and starting admin queue. Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211014081710.1871747-2-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 12 ++++++++++++ drivers/nvme/host/nvme.h | 2 ++ 2 files changed, 14 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 3109bdf137e4..5fde5f635ca0 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4579,6 +4579,18 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_start_queues); +void nvme_stop_admin_queue(struct nvme_ctrl *ctrl) +{ + blk_mq_quiesce_queue(ctrl->admin_q); +} +EXPORT_SYMBOL_GPL(nvme_stop_admin_queue); + +void nvme_start_admin_queue(struct nvme_ctrl *ctrl) +{ + blk_mq_unquiesce_queue(ctrl->admin_q); +} +EXPORT_SYMBOL_GPL(nvme_start_admin_queue); + void nvme_sync_io_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ef2467b93adb..e7af00860452 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -679,6 +679,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, void nvme_stop_queues(struct nvme_ctrl *ctrl); void nvme_start_queues(struct nvme_ctrl *ctrl); +void nvme_stop_admin_queue(struct nvme_ctrl *ctrl); +void nvme_start_admin_queue(struct nvme_ctrl *ctrl); void nvme_kill_queues(struct nvme_ctrl *ctrl); void nvme_sync_queues(struct nvme_ctrl *ctrl); void nvme_sync_io_queues(struct nvme_ctrl *ctrl); From 6ca1d9027e0d9ce5604a3e28de89456a76138034 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 14 Oct 2021 16:17:06 +0800 Subject: [PATCH 122/174] nvme: apply nvme API to quiesce/unquiesce admin queue Apply the added two APIs to quiesce/unquiesce admin queue. Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211014081710.1871747-3-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 2 +- drivers/nvme/host/fc.c | 8 ++++---- drivers/nvme/host/pci.c | 8 ++++---- drivers/nvme/host/rdma.c | 14 +++++++------- drivers/nvme/host/tcp.c | 16 ++++++++-------- drivers/nvme/target/loop.c | 4 ++-- 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 5fde5f635ca0..de222cfb5055 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4500,7 +4500,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) /* Forcibly unquiesce queues to avoid blocking dispatch */ if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) - blk_mq_unquiesce_queue(ctrl->admin_q); + nvme_start_admin_queue(ctrl); list_for_each_entry(ns, &ctrl->namespaces, list) nvme_set_queue_dying(ns); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index aa14ad963d91..580a216da619 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2382,7 +2382,7 @@ nvme_fc_ctrl_free(struct kref *ref) list_del(&ctrl->ctrl_list); spin_unlock_irqrestore(&ctrl->rport->lock, flags); - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_start_admin_queue(&ctrl->ctrl); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.fabrics_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); @@ -2510,7 +2510,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) /* * clean up the admin queue. Same thing as above. */ - blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_stop_admin_queue(&ctrl->ctrl); blk_sync_queue(ctrl->ctrl.admin_q); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_fc_terminate_exchange, &ctrl->ctrl); @@ -3095,7 +3095,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << (ilog2(SZ_4K) - 9); - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_start_admin_queue(&ctrl->ctrl); ret = nvme_init_ctrl_finish(&ctrl->ctrl); if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) @@ -3249,7 +3249,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) nvme_fc_free_queue(&ctrl->queues[0]); /* re-enable the admin_q so anything new can fast fail */ - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_start_admin_queue(&ctrl->ctrl); /* resume the io queues so that things will fast fail */ nvme_start_queues(&ctrl->ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ed684874842f..fde36c137900 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1414,7 +1414,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) nvmeq->dev->online_queues--; if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) - blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); + nvme_stop_admin_queue(&nvmeq->dev->ctrl); if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); return 0; @@ -1673,7 +1673,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev) * user requests may be waiting on a stopped queue. Start the * queue to flush these to completion. */ - blk_mq_unquiesce_queue(dev->ctrl.admin_q); + nvme_start_admin_queue(&dev->ctrl); blk_cleanup_queue(dev->ctrl.admin_q); blk_mq_free_tag_set(&dev->admin_tagset); } @@ -1707,7 +1707,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) return -ENODEV; } } else - blk_mq_unquiesce_queue(dev->ctrl.admin_q); + nvme_start_admin_queue(&dev->ctrl); return 0; } @@ -2642,7 +2642,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) if (shutdown) { nvme_start_queues(&dev->ctrl); if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) - blk_mq_unquiesce_queue(dev->ctrl.admin_q); + nvme_start_admin_queue(&dev->ctrl); } mutex_unlock(&dev->shutdown_lock); } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 1624da3702d4..7d9ac544db37 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -919,7 +919,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, else ctrl->ctrl.max_integrity_segments = 0; - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_start_admin_queue(&ctrl->ctrl); error = nvme_init_ctrl_finish(&ctrl->ctrl); if (error) @@ -928,7 +928,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, return 0; out_quiesce_queue: - blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_stop_admin_queue(&ctrl->ctrl); blk_sync_queue(ctrl->ctrl.admin_q); out_stop_queue: nvme_rdma_stop_queue(&ctrl->queues[0]); @@ -1026,12 +1026,12 @@ out_free_io_queues: static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { - blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_stop_admin_queue(&ctrl->ctrl); blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_cancel_admin_tagset(&ctrl->ctrl); if (remove) - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_start_admin_queue(&ctrl->ctrl); nvme_rdma_destroy_admin_queue(ctrl, remove); } @@ -1154,7 +1154,7 @@ destroy_io: nvme_rdma_destroy_io_queues(ctrl, new); } destroy_admin: - blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_stop_admin_queue(&ctrl->ctrl); blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); nvme_cancel_admin_tagset(&ctrl->ctrl); @@ -1194,7 +1194,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) nvme_rdma_teardown_io_queues(ctrl, false); nvme_start_queues(&ctrl->ctrl); nvme_rdma_teardown_admin_queue(ctrl, false); - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_start_admin_queue(&ctrl->ctrl); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { /* state change failure is ok if we started ctrl delete */ @@ -2232,7 +2232,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) cancel_delayed_work_sync(&ctrl->reconnect_work); nvme_rdma_teardown_io_queues(ctrl, shutdown); - blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_stop_admin_queue(&ctrl->ctrl); if (shutdown) nvme_shutdown_ctrl(&ctrl->ctrl); else diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 9ce3458ee1dd..56a574505985 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1915,7 +1915,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) if (error) goto out_stop_queue; - blk_mq_unquiesce_queue(ctrl->admin_q); + nvme_start_admin_queue(ctrl); error = nvme_init_ctrl_finish(ctrl); if (error) @@ -1924,7 +1924,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) return 0; out_quiesce_queue: - blk_mq_quiesce_queue(ctrl->admin_q); + nvme_stop_admin_queue(ctrl); blk_sync_queue(ctrl->admin_q); out_stop_queue: nvme_tcp_stop_queue(ctrl, 0); @@ -1946,12 +1946,12 @@ out_free_queue: static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, bool remove) { - blk_mq_quiesce_queue(ctrl->admin_q); + nvme_stop_admin_queue(ctrl); blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); nvme_cancel_admin_tagset(ctrl); if (remove) - blk_mq_unquiesce_queue(ctrl->admin_q); + nvme_start_admin_queue(ctrl); nvme_tcp_destroy_admin_queue(ctrl, remove); } @@ -1960,7 +1960,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, { if (ctrl->queue_count <= 1) return; - blk_mq_quiesce_queue(ctrl->admin_q); + nvme_stop_admin_queue(ctrl); nvme_start_freeze(ctrl); nvme_stop_queues(ctrl); nvme_sync_io_queues(ctrl); @@ -2055,7 +2055,7 @@ destroy_io: nvme_tcp_destroy_io_queues(ctrl, new); } destroy_admin: - blk_mq_quiesce_queue(ctrl->admin_q); + nvme_stop_admin_queue(ctrl); blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); nvme_cancel_admin_tagset(ctrl); @@ -2098,7 +2098,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) /* unquiesce to fail fast pending requests */ nvme_start_queues(ctrl); nvme_tcp_teardown_admin_queue(ctrl, false); - blk_mq_unquiesce_queue(ctrl->admin_q); + nvme_start_admin_queue(ctrl); if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { /* state change failure is ok if we started ctrl delete */ @@ -2116,7 +2116,7 @@ static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); nvme_tcp_teardown_io_queues(ctrl, shutdown); - blk_mq_quiesce_queue(ctrl->admin_q); + nvme_stop_admin_queue(ctrl); if (shutdown) nvme_shutdown_ctrl(ctrl); else diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 0285ccc7541f..440e1544033b 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -398,7 +398,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ctrl->ctrl.max_hw_sectors = (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_start_admin_queue(&ctrl->ctrl); error = nvme_init_ctrl_finish(&ctrl->ctrl); if (error) @@ -428,7 +428,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) nvme_loop_destroy_io_queues(ctrl); } - blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_stop_admin_queue(&ctrl->ctrl); if (ctrl->ctrl.state == NVME_CTRL_LIVE) nvme_shutdown_ctrl(&ctrl->ctrl); From ebc9b95260151d966728cf0063b3b4e465f934d9 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 14 Oct 2021 16:17:07 +0800 Subject: [PATCH 123/174] nvme: prepare for pairing quiescing and unquiescing Add two helpers so that we can prepare for pairing quiescing and unquiescing which will be done in next patch. Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211014081710.1871747-4-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 52 ++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 21 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index de222cfb5055..570da6ae7ed8 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -119,25 +119,6 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, struct nvme_command *cmd); -/* - * Prepare a queue for teardown. - * - * This must forcibly unquiesce queues to avoid blocking dispatch, and only set - * the capacity to 0 after that to avoid blocking dispatchers that may be - * holding bd_butex. This will end buffered writers dirtying pages that can't - * be synced. - */ -static void nvme_set_queue_dying(struct nvme_ns *ns) -{ - if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) - return; - - blk_set_queue_dying(ns->queue); - blk_mq_unquiesce_queue(ns->queue); - - set_capacity_and_notify(ns->disk, 0); -} - void nvme_queue_scan(struct nvme_ctrl *ctrl) { /* @@ -4485,6 +4466,35 @@ out: } EXPORT_SYMBOL_GPL(nvme_init_ctrl); +static void nvme_start_ns_queue(struct nvme_ns *ns) +{ + blk_mq_unquiesce_queue(ns->queue); +} + +static void nvme_stop_ns_queue(struct nvme_ns *ns) +{ + blk_mq_quiesce_queue(ns->queue); +} + +/* + * Prepare a queue for teardown. + * + * This must forcibly unquiesce queues to avoid blocking dispatch, and only set + * the capacity to 0 after that to avoid blocking dispatchers that may be + * holding bd_butex. This will end buffered writers dirtying pages that can't + * be synced. + */ +static void nvme_set_queue_dying(struct nvme_ns *ns) +{ + if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) + return; + + blk_set_queue_dying(ns->queue); + nvme_start_ns_queue(ns); + + set_capacity_and_notify(ns->disk, 0); +} + /** * nvme_kill_queues(): Ends all namespace queues * @ctrl: the dead controller that needs to end @@ -4563,7 +4573,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl) down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) - blk_mq_quiesce_queue(ns->queue); + nvme_stop_ns_queue(ns); up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_stop_queues); @@ -4574,7 +4584,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) - blk_mq_unquiesce_queue(ns->queue); + nvme_start_ns_queue(ns); up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_start_queues); From 9e6a6b1212100148c109675e003369e3e219dbd9 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 14 Oct 2021 16:17:08 +0800 Subject: [PATCH 124/174] nvme: paring quiesce/unquiesce The current blk_mq_quiesce_queue() and blk_mq_unquiesce_queue() always stops and starts the queue unconditionally. And there can be concurrent quiesce/unquiesce coming from different unrelated code paths, so unquiesce may come unexpectedly and start queue too early. Prepare for supporting concurrent quiesce/unquiesce from multiple contexts, so that we can address the above issue. NVMe has very complicated quiesce/unquiesce use pattern, add one atomic bit for makeiing sure that blk-mq quiece/unquiesce is always called in pair. Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211014081710.1871747-5-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 12 ++++++++---- drivers/nvme/host/nvme.h | 2 ++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 570da6ae7ed8..eb284f45fc44 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4468,12 +4468,14 @@ EXPORT_SYMBOL_GPL(nvme_init_ctrl); static void nvme_start_ns_queue(struct nvme_ns *ns) { - blk_mq_unquiesce_queue(ns->queue); + if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags)) + blk_mq_unquiesce_queue(ns->queue); } static void nvme_stop_ns_queue(struct nvme_ns *ns) { - blk_mq_quiesce_queue(ns->queue); + if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags)) + blk_mq_quiesce_queue(ns->queue); } /* @@ -4591,13 +4593,15 @@ EXPORT_SYMBOL_GPL(nvme_start_queues); void nvme_stop_admin_queue(struct nvme_ctrl *ctrl) { - blk_mq_quiesce_queue(ctrl->admin_q); + if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) + blk_mq_quiesce_queue(ctrl->admin_q); } EXPORT_SYMBOL_GPL(nvme_stop_admin_queue); void nvme_start_admin_queue(struct nvme_ctrl *ctrl) { - blk_mq_unquiesce_queue(ctrl->admin_q); + if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) + blk_mq_unquiesce_queue(ctrl->admin_q); } EXPORT_SYMBOL_GPL(nvme_start_admin_queue); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index e7af00860452..3652439a7458 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -342,6 +342,7 @@ struct nvme_ctrl { int nr_reconnects; unsigned long flags; #define NVME_CTRL_FAILFAST_EXPIRED 0 +#define NVME_CTRL_ADMIN_Q_STOPPED 1 struct nvmf_ctrl_options *opts; struct page *discard_page; @@ -463,6 +464,7 @@ struct nvme_ns { #define NVME_NS_ANA_PENDING 2 #define NVME_NS_FORCE_RO 3 #define NVME_NS_READY 4 +#define NVME_NS_STOPPED 5 struct cdev cdev; struct device cdev_device; From 1d35d519d8bf224ccdb43f9a235b8bda2d6d453c Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 14 Oct 2021 16:17:09 +0800 Subject: [PATCH 125/174] nvme: loop: clear NVME_CTRL_ADMIN_Q_STOPPED after admin queue is reallocated The nvme-loop's admin queue may be freed and reallocated, and we have to reset the flag of NVME_CTRL_ADMIN_Q_STOPPED so that the flag can match with the quiesce state of the admin queue. nvme-loop is the only driver to reallocate request queue, and not see such usage in other nvme drivers. Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211014081710.1871747-6-ming.lei@redhat.com Signed-off-by: Jens Axboe --- drivers/nvme/target/loop.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 440e1544033b..eb1094254c82 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -384,6 +384,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) error = PTR_ERR(ctrl->ctrl.admin_q); goto out_cleanup_fabrics_q; } + /* reset stopped state for the fresh admin queue */ + clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags); error = nvmf_connect_admin_queue(&ctrl->ctrl); if (error) From e70feb8b3e6886c525c88943b5f1508d02f5a683 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 14 Oct 2021 16:17:10 +0800 Subject: [PATCH 126/174] blk-mq: support concurrent queue quiesce/unquiesce blk_mq_quiesce_queue() has been used a bit wide now, so far we don't support concurrent/nested quiesce. One biggest issue is that unquiesce can happen unexpectedly in case that quiesce/unquiesce are run concurrently from more than one context. This patch introduces q->mq_quiesce_depth to deal concurrent quiesce, and we only unquiesce queue when it is the last/outer-most one of all contexts. Several kernel panic issue has been reported[1][2][3] when running stress quiesce test. And this patch has been verified in these reports. [1] https://lore.kernel.org/linux-block/9b21c797-e505-3821-4f5b-df7bf9380328@huawei.com/T/#m1fc52431fad7f33b1ffc3f12c4450e4238540787 [2] https://lore.kernel.org/linux-block/9b21c797-e505-3821-4f5b-df7bf9380328@huawei.com/T/#m10ad90afeb9c8cc318334190a7c24c8b5c5e0722 [3] https://listman.redhat.com/archives/dm-devel/2021-September/msg00189.html Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211014081710.1871747-7-ming.lei@redhat.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 22 +++++++++++++++++++--- include/linux/blkdev.h | 2 ++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index bf5936d72de8..31d9e612d236 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -241,7 +241,12 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); */ void blk_mq_quiesce_queue_nowait(struct request_queue *q) { - blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); + unsigned long flags; + + spin_lock_irqsave(&q->queue_lock, flags); + if (!q->quiesce_depth++) + blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); + spin_unlock_irqrestore(&q->queue_lock, flags); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); @@ -282,10 +287,21 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); */ void blk_mq_unquiesce_queue(struct request_queue *q) { - blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); + unsigned long flags; + bool run_queue = false; + + spin_lock_irqsave(&q->queue_lock, flags); + if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { + ; + } else if (!--q->quiesce_depth) { + blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); + run_queue = true; + } + spin_unlock_irqrestore(&q->queue_lock, flags); /* dispatch requests which are inserted during quiescing */ - blk_mq_run_hw_queues(q, true); + if (run_queue) + blk_mq_run_hw_queues(q, true); } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f13091d3d476..2b22fa36e568 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -315,6 +315,8 @@ struct request_queue { */ struct mutex mq_freeze_lock; + int quiesce_depth; + struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct bio_set bio_split; From cf6d6238cdd319eca404756dee05bf55a748b6a9 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 19 Oct 2021 22:24:10 +0100 Subject: [PATCH 127/174] block: turn macro helpers into inline functions Replace bio_set_dev() with an identical inline helper and move it further to fix a dependency problem with bio_associate_blkg(). Do the same for bio_copy_dev(). Reviewed-by: Christoph Hellwig Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- include/linux/bio.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/include/linux/bio.h b/include/linux/bio.h index 9538f20ffaa5..b12453d7b8a8 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -430,22 +430,6 @@ void zero_fill_bio(struct bio *bio); extern const char *bio_devname(struct bio *bio, char *buffer); -#define bio_set_dev(bio, bdev) \ -do { \ - bio_clear_flag(bio, BIO_REMAPPED); \ - if ((bio)->bi_bdev != (bdev)) \ - bio_clear_flag(bio, BIO_THROTTLED); \ - (bio)->bi_bdev = (bdev); \ - bio_associate_blkg(bio); \ -} while (0) - -#define bio_copy_dev(dst, src) \ -do { \ - bio_clear_flag(dst, BIO_REMAPPED); \ - (dst)->bi_bdev = (src)->bi_bdev; \ - bio_clone_blkg_association(dst, src); \ -} while (0) - #define bio_dev(bio) \ disk_devt((bio)->bi_bdev->bd_disk) @@ -463,6 +447,22 @@ static inline void bio_clone_blkg_association(struct bio *dst, struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ +static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) +{ + bio_clear_flag(bio, BIO_REMAPPED); + if (bio->bi_bdev != bdev) + bio_clear_flag(bio, BIO_THROTTLED); + bio->bi_bdev = bdev; + bio_associate_blkg(bio); +} + +static inline void bio_copy_dev(struct bio *dst, struct bio *src) +{ + bio_clear_flag(dst, BIO_REMAPPED); + dst->bi_bdev = src->bi_bdev; + bio_clone_blkg_association(dst, src); +} + /* * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. * From 859897c3fb9ad12fef429b026154e11350d0ceac Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 19 Oct 2021 22:24:11 +0100 Subject: [PATCH 128/174] block: convert leftovers to bdev_get_queue Convert bdev->bd_disk->queue to bdev_get_queue(), which is faster. Apparently, there are a few such spots in block that got lost during rebases. Reviewed-by: Chaitanya Kulkarni Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-merge.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index e6ad5b51d0c3..c1ba34777c6d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1080,7 +1080,7 @@ EXPORT_SYMBOL(submit_bio); */ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); blk_qc_t cookie = READ_ONCE(bio->bi_cookie); int ret; diff --git a/block/blk-merge.c b/block/blk-merge.c index 3e6fa449caff..df69f4bb7717 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -383,7 +383,7 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio, */ void blk_queue_split(struct bio **bio) { - struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue((*bio)->bi_bdev); unsigned int nr_segs; if (blk_may_split(q, *bio)) From 478eb72b815f33734723867ff236d96afa418d69 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 19 Oct 2021 22:24:12 +0100 Subject: [PATCH 129/174] block: optimise req_bio_endio() First, get rid of an extra branch and chain error checks. Also reshuffle it with bio_advance(), so it goes closer to the final check, with that the compiler loads rq->rq_flags only once, and also doesn't reload bio->bi_iter.bi_size if bio_advance() didn't actually advanced the iter. Reviewed-by: Christoph Hellwig Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/blk-mq.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 31d9e612d236..8b05a8f9bb33 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -633,25 +633,23 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug) static void req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, blk_status_t error) { - if (error) + if (unlikely(error)) { bio->bi_status = error; - - if (unlikely(rq->rq_flags & RQF_QUIET)) - bio_set_flag(bio, BIO_QUIET); - - bio_advance(bio, nbytes); - - if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { + } else if (req_op(rq) == REQ_OP_ZONE_APPEND) { /* * Partial zone append completions cannot be supported as the * BIO fragments may end up not being written sequentially. */ - if (bio->bi_iter.bi_size) + if (bio->bi_iter.bi_size == nbytes) bio->bi_status = BLK_STS_IOERR; else bio->bi_iter.bi_sector = rq->__sector; } + bio_advance(bio, nbytes); + + if (unlikely(rq->rq_flags & RQF_QUIET)) + bio_set_flag(bio, BIO_QUIET); /* don't actually finish bio if it's part of flush sequence */ if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) bio_endio(bio); From 1497a51a3287959a9eb74e0432203ba3e2dc7347 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 19 Oct 2021 22:24:13 +0100 Subject: [PATCH 130/174] block: don't bloat enter_queue with percpu_ref percpu_ref_put() are inlined for performance and bloat the binary, we don't care about the fail case of blk_try_enter_queue(), so we can replace it with a call to blk_queue_exit(). Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index c1ba34777c6d..88752e51d2b6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -404,7 +404,7 @@ static bool blk_try_enter_queue(struct request_queue *q, bool pm) return true; fail_put: - percpu_ref_put(&q->q_usage_counter); + blk_queue_exit(q); fail: rcu_read_unlock(); return false; From c809084ab033a8d4ee404e2ac3c5d3dc80cb65f7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 19 Oct 2021 22:24:14 +0100 Subject: [PATCH 131/174] block: inline a part of bio_release_pages() Inline BIO_NO_PAGE_REF check of bio_release_pages() to avoid function call. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/bio.c | 7 ++----- include/linux/bio.h | 8 +++++++- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/block/bio.c b/block/bio.c index 4f397ba47db5..46a87c72d2b4 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1033,21 +1033,18 @@ int bio_add_page(struct bio *bio, struct page *page, } EXPORT_SYMBOL(bio_add_page); -void bio_release_pages(struct bio *bio, bool mark_dirty) +void __bio_release_pages(struct bio *bio, bool mark_dirty) { struct bvec_iter_all iter_all; struct bio_vec *bvec; - if (bio_flagged(bio, BIO_NO_PAGE_REF)) - return; - bio_for_each_segment_all(bvec, bio, iter_all) { if (mark_dirty && !PageCompound(bvec->bv_page)) set_page_dirty_lock(bvec->bv_page); put_page(bvec->bv_page); } } -EXPORT_SYMBOL_GPL(bio_release_pages); +EXPORT_SYMBOL_GPL(__bio_release_pages); static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) { diff --git a/include/linux/bio.h b/include/linux/bio.h index b12453d7b8a8..c88700d1bdc3 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -417,7 +417,7 @@ int bio_add_zone_append_page(struct bio *bio, struct page *page, void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); -void bio_release_pages(struct bio *bio, bool mark_dirty); +void __bio_release_pages(struct bio *bio, bool mark_dirty); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); @@ -428,6 +428,12 @@ extern void bio_free_pages(struct bio *bio); void guard_bio_eod(struct bio *bio); void zero_fill_bio(struct bio *bio); +static inline void bio_release_pages(struct bio *bio, bool mark_dirty) +{ + if (!bio_flagged(bio, BIO_NO_PAGE_REF)) + __bio_release_pages(bio, mark_dirty); +} + extern const char *bio_devname(struct bio *bio, char *buffer); #define bio_dev(bio) \ From 037057a5a979c7eeb2ee5d12cf4c24b805192c75 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 20 Oct 2021 08:21:40 -0600 Subject: [PATCH 132/174] block: remove inaccurate requeue check This check is meant to catch cases where a requeue is attempted on a request that is still inserted. It's never really been useful to catch any misuse, and now it's actively wrong. Outside of that, this should not be a BUG_ON() to begin with. Remove the check as it's now causing active harm, as requeue off the plug path will trigger it even though the request state is just fine. Reported-by: Yi Zhang Link: https://lore.kernel.org/linux-block/CAHj4cs80zAUc2grnCZ015-2Rvd-=gXRfB_dFKy=RTm+wRo09HQ@mail.gmail.com/ Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 - 1 file changed, 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 8b05a8f9bb33..a71aeed7b987 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1053,7 +1053,6 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) /* this request will be re-inserted to io scheduler queue */ blk_mq_sched_requeue_request(rq); - BUG_ON(!list_empty(&rq->queuelist)); blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); } EXPORT_SYMBOL(blk_mq_requeue_request); From a214b949d8e365583dd67441f6f608f0b20f7f52 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 20 Oct 2021 16:41:16 +0200 Subject: [PATCH 133/174] blk-mq: only flush requests from the plug in blk_mq_submit_bio Replace the call to blk_flush_plug_list in blk_mq_submit_bio with a direct call to blk_mq_flush_plug_list. This means we do not flush plug callback from stackable devices, which doesn't really help with the accumulated requests anyway, and it also means the cached requests aren't freed here as they can still be used later on. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211020144119.142582-2-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index a71aeed7b987..101466ece4c4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2568,7 +2568,7 @@ void blk_mq_submit_bio(struct bio *bio) } if (request_count >= blk_plug_max_rq_count(plug) || last) { - blk_flush_plug_list(plug, false); + blk_mq_flush_plug_list(plug, false); trace_block_plug(q); } From dbb6f764a079d1dea883c6f2439d91db4f0fb2f2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 20 Oct 2021 16:41:17 +0200 Subject: [PATCH 134/174] blk-mq: move blk_mq_flush_plug_list to block/blk-mq.h This helper is internal to the block layer. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211020144119.142582-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.h | 1 + include/linux/blk-mq.h | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/block/blk-mq.h b/block/blk-mq.h index d8ccb341e82e..08fb5922e611 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -122,6 +122,7 @@ extern int blk_mq_sysfs_register(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); void blk_mq_free_plug_rqs(struct blk_plug *plug); +void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); void blk_mq_release(struct request_queue *q); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 6cf35de151a9..e13780236550 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -656,8 +656,6 @@ int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, unsigned int set_flags); void blk_mq_free_tag_set(struct blk_mq_tag_set *set); -void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); - void blk_mq_free_request(struct request *rq); bool blk_mq_queue_inflight(struct request_queue *q); From b600455d84307696b3cb7debdaf3081080748409 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 20 Oct 2021 16:41:18 +0200 Subject: [PATCH 135/174] block: optimise blk_flush_plug_list Don't call flush_plug_callbacks if there are no plug callbacks. Signed-off-by: Pavel Begunkov [hch: split from a larger patch] Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211020144119.142582-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 88752e51d2b6..db8b2fe0ceaf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1639,8 +1639,8 @@ EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { - flush_plug_callbacks(plug, from_schedule); - + if (!list_empty(&plug->cb_list)) + flush_plug_callbacks(plug, from_schedule); if (!rq_list_empty(plug->mq_list)) blk_mq_flush_plug_list(plug, from_schedule); if (unlikely(!from_schedule && plug->cached_rq)) From 008f75a20e7072d0840ec323c39b42206f3fa8a0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 20 Oct 2021 16:41:19 +0200 Subject: [PATCH 136/174] block: cleanup the flush plug helpers Consolidate the various helpers into a single blk_flush_plug helper that takes a plk_plug and the from_scheduler bool and switch all callsites to call it directly. Checks that the plug is non-NULL must be performed by the caller, something that most already do anyway. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211020144119.142582-5-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 13 ++++++------- fs/fs-writeback.c | 5 +++-- include/linux/blkdev.h | 29 ++++------------------------- kernel/sched/core.c | 5 +++-- 4 files changed, 16 insertions(+), 36 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index db8b2fe0ceaf..dfa199312c2f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1089,7 +1089,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) return 0; if (current->plug) - blk_flush_plug_list(current->plug, false); + blk_flush_plug(current->plug, false); if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) return 0; @@ -1637,7 +1637,7 @@ struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, } EXPORT_SYMBOL(blk_check_plugged); -void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) +void blk_flush_plug(struct blk_plug *plug, bool from_schedule) { if (!list_empty(&plug->cb_list)) flush_plug_callbacks(plug, from_schedule); @@ -1659,11 +1659,10 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) */ void blk_finish_plug(struct blk_plug *plug) { - if (plug != current->plug) - return; - blk_flush_plug_list(plug, false); - - current->plug = NULL; + if (plug == current->plug) { + blk_flush_plug(plug, false); + current->plug = NULL; + } } EXPORT_SYMBOL(blk_finish_plug); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 81ec192ce067..4124a89a1a5d 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1893,7 +1893,8 @@ static long writeback_sb_inodes(struct super_block *sb, * unplug, so get our IOs out the door before we * give up the CPU. */ - blk_flush_plug(current); + if (current->plug) + blk_flush_plug(current->plug, false); cond_resched(); } @@ -2291,7 +2292,7 @@ void wakeup_flusher_threads(enum wb_reason reason) * If we are expecting writeback progress we must submit plugged IO. */ if (blk_needs_flush_plug(current)) - blk_schedule_flush_plug(current); + blk_flush_plug(current->plug, true); rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2b22fa36e568..c7b1e9355123 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -725,9 +725,8 @@ extern void blk_set_queue_dying(struct request_queue *); * as the lock contention for request_queue lock is reduced. * * It is ok not to disable preemption when adding the request to the plug list - * or when attempting a merge, because blk_schedule_flush_list() will only flush - * the plug list when the task sleeps by itself. For details, please see - * schedule() where blk_schedule_flush_plug() is called. + * or when attempting a merge. For details, please see schedule() where + * blk_flush_plug() is called. */ struct blk_plug { struct request *mq_list; /* blk-mq requests */ @@ -757,23 +756,8 @@ extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, extern void blk_start_plug(struct blk_plug *); extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); extern void blk_finish_plug(struct blk_plug *); -extern void blk_flush_plug_list(struct blk_plug *, bool); -static inline void blk_flush_plug(struct task_struct *tsk) -{ - struct blk_plug *plug = tsk->plug; - - if (plug) - blk_flush_plug_list(plug, false); -} - -static inline void blk_schedule_flush_plug(struct task_struct *tsk) -{ - struct blk_plug *plug = tsk->plug; - - if (plug) - blk_flush_plug_list(plug, true); -} +void blk_flush_plug(struct blk_plug *plug, bool from_schedule); static inline bool blk_needs_flush_plug(struct task_struct *tsk) { @@ -802,15 +786,10 @@ static inline void blk_finish_plug(struct blk_plug *plug) { } -static inline void blk_flush_plug(struct task_struct *task) +static inline void blk_flush_plug(struct blk_plug *plug, bool async) { } -static inline void blk_schedule_flush_plug(struct task_struct *task) -{ -} - - static inline bool blk_needs_flush_plug(struct task_struct *tsk) { return false; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 92ef7b68198c..34f37502c27e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6343,7 +6343,7 @@ static inline void sched_submit_work(struct task_struct *tsk) * make sure to submit it to avoid deadlocks. */ if (blk_needs_flush_plug(tsk)) - blk_schedule_flush_plug(tsk); + blk_flush_plug(tsk->plug, true); } static void sched_update_worker(struct task_struct *tsk) @@ -8354,7 +8354,8 @@ int io_schedule_prepare(void) int old_iowait = current->in_iowait; current->in_iowait = 1; - blk_schedule_flush_plug(current); + if (current->plug) + blk_flush_plug(current->plug, true); return old_iowait; } From 0994c64eb4159ba019e7fedc7ba0dd6a69235b40 Mon Sep 17 00:00:00 2001 From: John Garry Date: Mon, 18 Oct 2021 17:41:23 +0800 Subject: [PATCH 137/174] blk-mq: Fix blk_mq_tagset_busy_iter() for shared tags Since it is now possible for a tagset to share a single set of tags, the iter function should not re-iter the tags for the count of #hw queues in that case. Rather it should just iter once. Fixes: e155b0c238b2 ("blk-mq: Use shared tags for shared sbitmap support") Reported-by: Kashyap Desai Signed-off-by: John Garry Reviewed-by: Ming Lei Tested-by: Kashyap Desai Link: https://lore.kernel.org/r/1634550083-202815-1-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index b94c3e8ef392..995336abee33 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -399,9 +399,12 @@ void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv) { - int i; + unsigned int flags = tagset->flags; + int i, nr_tags; - for (i = 0; i < tagset->nr_hw_queues; i++) { + nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues; + + for (i = 0; i < nr_tags; i++) { if (tagset->tags && tagset->tags[i]) __blk_mq_all_tag_iter(tagset->tags[i], fn, priv, BT_TAG_ITER_STARTED); From 057178cf518e699695a4b614a7a08c350b1fdcfd Mon Sep 17 00:00:00 2001 From: Jackie Liu Date: Thu, 21 Oct 2021 15:13:43 +0800 Subject: [PATCH 138/174] fs: bdev: fix conflicting comment from lookup_bdev We switched to directly use dev_t to get block device, lookup changed the meaning of use, now we fix this conflicting comment. Fixes: 4e7b5671c6a8 ("block: remove i_bdev") Cc: Jens Axboe Cc: Christoph Hellwig Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Jackie Liu Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211021071344.1600362-1-liu.yun@linux.dev Signed-off-by: Jens Axboe --- block/bdev.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/block/bdev.c b/block/bdev.c index cff0bb3a4578..7e6156203a71 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -964,9 +964,11 @@ EXPORT_SYMBOL(blkdev_put); * @pathname: special file representing the block device * @dev: return value of the block device's dev_t * - * Get a reference to the blockdevice at @pathname in the current - * namespace if possible and return it. Return ERR_PTR(error) - * otherwise. + * Lookup the block device's dev_t at @pathname in the current + * namespace if possible and return it by @dev. + * + * RETURNS: + * 0 if succeeded, errno otherwise. */ int lookup_bdev(const char *pathname, dev_t *dev) { From 6450fe1f668f410fe2ab69c79a52a0929a4a8296 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 20 Oct 2021 20:00:48 +0100 Subject: [PATCH 139/174] block: optimise boundary blkdev_read_iter's checks Combine pos and len checks and mark unlikely. Also, don't reexpand if it's not truncated. Reviewed-by: Christoph Hellwig Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/fff34e613aeaae1ad12977dc4592cb1a1f5d3190.1634755800.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/fops.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/block/fops.c b/block/fops.c index 21d25ee0e4bf..8f733c919ed1 100644 --- a/block/fops.c +++ b/block/fops.c @@ -503,17 +503,20 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) size_t shorted = 0; ssize_t ret; - if (pos >= size) - return 0; - - size -= pos; - if (iov_iter_count(to) > size) { - shorted = iov_iter_count(to) - size; - iov_iter_truncate(to, size); + if (unlikely(pos + iov_iter_count(to) > size)) { + if (pos >= size) + return 0; + size -= pos; + if (iov_iter_count(to) > size) { + shorted = iov_iter_count(to) - size; + iov_iter_truncate(to, size); + } } ret = generic_file_read_iter(iocb, to); - iov_iter_reexpand(to, iov_iter_count(to) + shorted); + + if (unlikely(shorted)) + iov_iter_reexpand(to, iov_iter_count(to) + shorted); return ret; } From 179ae84f7ef5225e03cd29cb9a75f6e90d2d7388 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 20 Oct 2021 20:00:49 +0100 Subject: [PATCH 140/174] block: clean up blk_mq_submit_bio() merging Combine blk_mq_sched_bio_merge() and blk_attempt_plug_merge() under a common if, so we don't check it twice. Signed-off-by: Pavel Begunkov Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/daedc90d4029a5d1d73344771632b1faca3aaf81.1634755800.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 2 +- block/blk-mq-sched.h | 12 +----------- block/blk-mq.c | 15 +++++++-------- 3 files changed, 9 insertions(+), 20 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index e85b7556b096..5b259fdea794 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -361,7 +361,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) } } -bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, +bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { struct elevator_queue *e = q->elevator; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 98836106b25f..25d1034952b6 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -12,7 +12,7 @@ void blk_mq_sched_assign_ioc(struct request *rq); bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **merged_request); -bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, +bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, struct list_head *free); @@ -42,16 +42,6 @@ static inline bool bio_mergeable(struct bio *bio) return !(bio->bi_opf & REQ_NOMERGE_FLAGS); } -static inline bool -blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, - unsigned int nr_segs) -{ - if (blk_queue_nomerges(q) || !bio_mergeable(bio)) - return false; - - return __blk_mq_sched_bio_merge(q, bio, nr_segs); -} - static inline bool blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) diff --git a/block/blk-mq.c b/block/blk-mq.c index 101466ece4c4..d04ee72ba125 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2481,7 +2481,6 @@ void blk_mq_submit_bio(struct bio *bio) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); const int is_sync = op_is_sync(bio->bi_opf); - const int is_flush_fua = op_is_flush(bio->bi_opf); struct request *rq; struct blk_plug *plug; bool same_queue_rq = false; @@ -2495,12 +2494,12 @@ void blk_mq_submit_bio(struct bio *bio) if (!bio_integrity_prep(bio)) goto queue_exit; - if (!is_flush_fua && !blk_queue_nomerges(q) && - blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) - goto queue_exit; - - if (blk_mq_sched_bio_merge(q, bio, nr_segs)) - goto queue_exit; + if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { + if (blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) + goto queue_exit; + if (blk_mq_sched_bio_merge(q, bio, nr_segs)) + goto queue_exit; + } rq_qos_throttle(q, bio); @@ -2543,7 +2542,7 @@ void blk_mq_submit_bio(struct bio *bio) return; } - if (is_flush_fua && blk_insert_flush(rq)) + if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) return; if (plug && (q->nr_hw_queues == 1 || From 6549a874fb65e7ad4885d066ec314191cc137b52 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 20 Oct 2021 20:00:50 +0100 Subject: [PATCH 141/174] block: convert fops.c magic constants to SHIFT_SECTOR Don't use shifting by a magic number 9 but replace with a more descriptive SHIFT_SECTOR. Signed-off-by: Pavel Begunkov Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/068782b9f7e97569fb59a99529b23bb17ea4c5e2.1634755800.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/fops.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/block/fops.c b/block/fops.c index 8f733c919ed1..396537598e3e 100644 --- a/block/fops.c +++ b/block/fops.c @@ -76,7 +76,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, bio_init(&bio, vecs, nr_pages); bio_set_dev(&bio, bdev); - bio.bi_iter.bi_sector = pos >> 9; + bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio.bi_write_hint = iocb->ki_hint; bio.bi_private = current; bio.bi_end_io = blkdev_bio_end_io_simple; @@ -225,7 +225,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, for (;;) { bio_set_dev(bio, bdev); - bio->bi_iter.bi_sector = pos >> 9; + bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; bio->bi_write_hint = iocb->ki_hint; bio->bi_private = dio; bio->bi_end_io = blkdev_bio_end_io; @@ -565,16 +565,18 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, switch (mode) { case FALLOC_FL_ZERO_RANGE: case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: - error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, - GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); + error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, + len >> SECTOR_SHIFT, GFP_KERNEL, + BLKDEV_ZERO_NOUNMAP); break; case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: - error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, - GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK); + error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, + len >> SECTOR_SHIFT, GFP_KERNEL, + BLKDEV_ZERO_NOFALLBACK); break; case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: - error = blkdev_issue_discard(bdev, start >> 9, len >> 9, - GFP_KERNEL, 0); + error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, + len >> SECTOR_SHIFT, GFP_KERNEL, 0); break; default: error = -EOPNOTSUPP; From 3b13c168186c115501ee7d194460ba2f8c825155 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 21 Oct 2021 14:30:51 +0100 Subject: [PATCH 142/174] percpu_ref: percpu_ref_tryget_live() version holding RCU Add percpu_ref_tryget_live_rcu(), which is a version of percpu_ref_tryget_live() but the user is responsible for enclosing it in a RCU read lock section. Signed-off-by: Pavel Begunkov Acked-by: Dennis Zhou Link: https://lore.kernel.org/r/3066500d7a6eb3e03f10adf98b87fdb3b1c49db8.1634822969.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- include/linux/percpu-refcount.h | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index ae16a9856305..b31d3f3312ce 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -266,6 +266,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) return percpu_ref_tryget_many(ref, 1); } +/** + * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the + * caller is responsible for taking RCU. + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; + bool ret = false; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (likely(__ref_is_percpu(ref, &percpu_count))) { + this_cpu_inc(*percpu_count); + ret = true; + } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { + ret = atomic_long_inc_not_zero(&ref->data->count); + } + return ret; +} + /** * percpu_ref_tryget_live - try to increment a live percpu refcount * @ref: percpu_ref to try-get @@ -283,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) */ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) { - unsigned long __percpu *percpu_count; bool ret = false; rcu_read_lock(); - - if (__ref_is_percpu(ref, &percpu_count)) { - this_cpu_inc(*percpu_count); - ret = true; - } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { - ret = atomic_long_inc_not_zero(&ref->data->count); - } - + ret = percpu_ref_tryget_live_rcu(ref); rcu_read_unlock(); - return ret; } From e94f68527a35271131cdf9d3fb4eb3c2513dc3d0 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 21 Oct 2021 14:30:52 +0100 Subject: [PATCH 143/174] block: kill extra rcu lock/unlock in queue enter blk_try_enter_queue() already takes rcu_read_lock/unlock, so we can avoid the second pair in percpu_ref_tryget_live(), use a newly added percpu_ref_tryget_live_rcu(). As rcu_read_lock/unlock imply barrier()s, it's pretty noticeable, especially for for !CONFIG_PREEMPT_RCU (default for some distributions), where __rcu_read_lock/unlock() are not inlined. 3.20% io_uring [kernel.vmlinux] [k] __rcu_read_unlock 3.05% io_uring [kernel.vmlinux] [k] __rcu_read_lock 2.52% io_uring [kernel.vmlinux] [k] __rcu_read_unlock 2.28% io_uring [kernel.vmlinux] [k] __rcu_read_lock Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6b11c67ea495ed9d44f067622d852de4a510ce65.1634822969.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index dfa199312c2f..fd389a16013c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -389,7 +389,7 @@ EXPORT_SYMBOL(blk_cleanup_queue); static bool blk_try_enter_queue(struct request_queue *q, bool pm) { rcu_read_lock(); - if (!percpu_ref_tryget_live(&q->q_usage_counter)) + if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) goto fail; /* From f059a1d2e23a165bf86e33673c6a7535a08c6341 Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Wed, 22 Sep 2021 20:37:08 +0800 Subject: [PATCH 144/174] block: Add invalidate_disk() helper to invalidate the gendisk To hide internal implementation and simplify some driver code, this adds a helper to invalidate the gendisk. It will clean the gendisk's associated buffer/page caches and reset its internal states. Signed-off-by: Xie Yongji Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210922123711.187-2-xieyongji@bytedance.com Signed-off-by: Jens Axboe --- block/genhd.c | 20 ++++++++++++++++++++ include/linux/genhd.h | 2 ++ 2 files changed, 22 insertions(+) diff --git a/block/genhd.c b/block/genhd.c index 80943c123c3e..64f83c4aee99 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -624,6 +624,26 @@ void del_gendisk(struct gendisk *disk) } EXPORT_SYMBOL(del_gendisk); +/** + * invalidate_disk - invalidate the disk + * @disk: the struct gendisk to invalidate + * + * A helper to invalidates the disk. It will clean the disk's associated + * buffer/page caches and reset its internal states so that the disk + * can be reused by the drivers. + * + * Context: can sleep + */ +void invalidate_disk(struct gendisk *disk) +{ + struct block_device *bdev = disk->part0; + + invalidate_bdev(bdev); + bdev->bd_inode->i_mapping->wb_err = 0; + set_capacity(disk, 0); +} +EXPORT_SYMBOL(invalidate_disk); + /* sysfs access to bad-blocks list. */ static ssize_t disk_badblocks_show(struct device *dev, struct device_attribute *attr, diff --git a/include/linux/genhd.h b/include/linux/genhd.h index c70bc5fce4db..13f313ab99e7 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -213,6 +213,8 @@ static inline int add_disk(struct gendisk *disk) } extern void del_gendisk(struct gendisk *gp); +void invalidate_disk(struct gendisk *disk); + void set_disk_ro(struct gendisk *disk, bool read_only); static inline int get_disk_ro(struct gendisk *disk) From e515be8f3b3e63be4c5e91dc6620483ed0990a0c Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Wed, 22 Sep 2021 20:37:09 +0800 Subject: [PATCH 145/174] loop: Use invalidate_disk() helper to invalidate gendisk Use invalidate_disk() helper to simplify the code for gendisk invalidation. Signed-off-by: Xie Yongji Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210922123711.187-3-xieyongji@bytedance.com Signed-off-by: Jens Axboe --- drivers/block/loop.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 7bf4686af774..eab6906326cc 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1395,11 +1395,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) blk_queue_logical_block_size(lo->lo_queue, 512); blk_queue_physical_block_size(lo->lo_queue, 512); blk_queue_io_min(lo->lo_queue, 512); - if (bdev) { - invalidate_bdev(bdev); - bdev->bd_inode->i_mapping->wb_err = 0; - } - set_capacity(lo->lo_disk, 0); + invalidate_disk(lo->lo_disk); loop_sysfs_exit(lo); if (bdev) { /* let user-space know about this change */ From 19f553db2ac03cb8407ec8efb8e140951afdfb87 Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Wed, 22 Sep 2021 20:37:10 +0800 Subject: [PATCH 146/174] loop: Remove the unnecessary bdev checks and unused bdev variable The lo->lo_device can't be null if the lo->lo_backing_file is set. So let's remove the unnecessary bdev checks and the entire bdev variable in __loop_clr_fd() since the lo->lo_backing_file is already checked before. Signed-off-by: Xie Yongji Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210922123711.187-4-xieyongji@bytedance.com Signed-off-by: Jens Axboe --- drivers/block/loop.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index eab6906326cc..980b538c008a 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1329,7 +1329,6 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) { struct file *filp = NULL; gfp_t gfp = lo->old_gfp_mask; - struct block_device *bdev = lo->lo_device; int err = 0; bool partscan = false; int lo_number; @@ -1397,16 +1396,14 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) blk_queue_io_min(lo->lo_queue, 512); invalidate_disk(lo->lo_disk); loop_sysfs_exit(lo); - if (bdev) { - /* let user-space know about this change */ - kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); - } + /* let user-space know about this change */ + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); mapping_set_gfp_mask(filp->f_mapping, gfp); /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); blk_mq_unfreeze_queue(lo->lo_queue); - partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev; + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; lo_number = lo->lo_number; disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); out_unlock: From 435c2acb307f19acc791b4295e29cc53a82bd24d Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Wed, 22 Sep 2021 20:37:11 +0800 Subject: [PATCH 147/174] nbd: Use invalidate_disk() helper on disconnect When a nbd device encounters a writeback error, that error will get propagated to the bd_inode's wb_err field. Then if this nbd device's backend is disconnected and another is attached, we will get back the previous writeback error on fsync, which is unexpected. To fix it, let's use invalidate_disk() helper to invalidate the disk on disconnect instead of just setting disk's capacity to zero. Signed-off-by: Xie Yongji Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210922123711.187-5-xieyongji@bytedance.com Signed-off-by: Jens Axboe --- drivers/block/nbd.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 1183f7872b71..8922b8864487 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -310,14 +310,6 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, nsock->sent = 0; } -static void nbd_size_clear(struct nbd_device *nbd) -{ - if (nbd->config->bytesize) { - set_capacity(nbd->disk, 0); - kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); - } -} - static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize) { @@ -1237,7 +1229,9 @@ static void nbd_config_put(struct nbd_device *nbd) &nbd->config_lock)) { struct nbd_config *config = nbd->config; nbd_dev_dbg_close(nbd); - nbd_size_clear(nbd); + invalidate_disk(nbd->disk); + if (nbd->config->bytesize) + kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); if (test_and_clear_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags)) device_remove_file(disk_to_dev(nbd->disk), &pid_attr); From eebcafaebb17cb8fda671709fab5dd836bdc3a08 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 18 Oct 2021 11:04:50 -0700 Subject: [PATCH 148/174] blk-crypto-fallback: properly prefix function and struct names For clarity, avoid using just the "blk_crypto_" prefix for functions and structs that are specific to blk-crypto-fallback. Instead, use "blk_crypto_fallback_". Some places already did this, but others didn't. This is also a prerequisite for using "struct blk_crypto_keyslot" to mean a generic blk-crypto keyslot (which is what it sounds like). Rename the fallback one to "struct blk_crypto_fallback_keyslot". No change in behavior. Reviewed-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Reviewed-by: Mike Snitzer Reviewed-by: Martin K. Petersen Signed-off-by: Eric Biggers Link: https://lore.kernel.org/r/20211018180453.40441-2-ebiggers@kernel.org Signed-off-by: Jens Axboe --- block/blk-crypto-fallback.c | 59 +++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index ec4c7823541c..1bcc1a151424 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -73,7 +73,7 @@ static mempool_t *bio_fallback_crypt_ctx_pool; static DEFINE_MUTEX(tfms_init_lock); static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX]; -static struct blk_crypto_keyslot { +static struct blk_crypto_fallback_keyslot { enum blk_crypto_mode_num crypto_mode; struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; } *blk_crypto_keyslots; @@ -89,9 +89,9 @@ static struct bio_set crypto_bio_split; */ static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE]; -static void blk_crypto_evict_keyslot(unsigned int slot) +static void blk_crypto_fallback_evict_keyslot(unsigned int slot) { - struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; + struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot]; enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode; int err; @@ -104,34 +104,34 @@ static void blk_crypto_evict_keyslot(unsigned int slot) slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; } -static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) +static int blk_crypto_fallback_keyslot_program(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) { - struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot]; + struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot]; const enum blk_crypto_mode_num crypto_mode = key->crypto_cfg.crypto_mode; int err; if (crypto_mode != slotp->crypto_mode && slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID) - blk_crypto_evict_keyslot(slot); + blk_crypto_fallback_evict_keyslot(slot); slotp->crypto_mode = crypto_mode; err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw, key->size); if (err) { - blk_crypto_evict_keyslot(slot); + blk_crypto_fallback_evict_keyslot(slot); return err; } return 0; } -static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) +static int blk_crypto_fallback_keyslot_evict(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) { - blk_crypto_evict_keyslot(slot); + blk_crypto_fallback_evict_keyslot(slot); return 0; } @@ -141,8 +141,8 @@ static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, * hardware. */ static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = { - .keyslot_program = blk_crypto_keyslot_program, - .keyslot_evict = blk_crypto_keyslot_evict, + .keyslot_program = blk_crypto_fallback_keyslot_program, + .keyslot_evict = blk_crypto_fallback_keyslot_evict, }; static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) @@ -160,7 +160,7 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) bio_endio(src_bio); } -static struct bio *blk_crypto_clone_bio(struct bio *bio_src) +static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) { struct bvec_iter iter; struct bio_vec bv; @@ -187,12 +187,13 @@ static struct bio *blk_crypto_clone_bio(struct bio *bio_src) return bio; } -static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot, - struct skcipher_request **ciph_req_ret, - struct crypto_wait *wait) +static bool +blk_crypto_fallback_alloc_cipher_req(struct blk_ksm_keyslot *slot, + struct skcipher_request **ciph_req_ret, + struct crypto_wait *wait) { struct skcipher_request *ciph_req; - const struct blk_crypto_keyslot *slotp; + const struct blk_crypto_fallback_keyslot *slotp; int keyslot_idx = blk_ksm_get_slot_idx(slot); slotp = &blk_crypto_keyslots[keyslot_idx]; @@ -210,7 +211,7 @@ static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot, return true; } -static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr) +static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) { struct bio *bio = *bio_ptr; unsigned int i = 0; @@ -277,7 +278,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) blk_status_t blk_st; /* Split the bio if it's too big for single page bvec */ - if (!blk_crypto_split_bio_if_needed(bio_ptr)) + if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr)) return false; src_bio = *bio_ptr; @@ -285,7 +286,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) data_unit_size = bc->bc_key->crypto_cfg.data_unit_size; /* Allocate bounce bio for encryption */ - enc_bio = blk_crypto_clone_bio(src_bio); + enc_bio = blk_crypto_fallback_clone_bio(src_bio); if (!enc_bio) { src_bio->bi_status = BLK_STS_RESOURCE; return false; @@ -302,7 +303,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) } /* and then allocate an skcipher_request for it */ - if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { + if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { src_bio->bi_status = BLK_STS_RESOURCE; goto out_release_keyslot; } @@ -404,7 +405,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) } /* and then allocate an skcipher_request for it */ - if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) { + if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) { bio->bi_status = BLK_STS_RESOURCE; goto out; } @@ -474,9 +475,9 @@ static void blk_crypto_fallback_decrypt_endio(struct bio *bio) * @bio_ptr: pointer to the bio to prepare * * If bio is doing a WRITE operation, this splits the bio into two parts if it's - * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio - * for the first part, encrypts it, and update bio_ptr to point to the bounce - * bio. + * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a + * bounce bio for the first part, encrypts it, and updates bio_ptr to point to + * the bounce bio. * * For a READ operation, we mark the bio for decryption by using bi_private and * bi_end_io. @@ -611,7 +612,7 @@ out: int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num) { const char *cipher_str = blk_crypto_modes[mode_num].cipher_str; - struct blk_crypto_keyslot *slotp; + struct blk_crypto_fallback_keyslot *slotp; unsigned int i; int err = 0; From 1e8d44bddf57f6d878e083f281a34d5c88feb7db Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 18 Oct 2021 11:04:51 -0700 Subject: [PATCH 149/174] blk-crypto: rename keyslot-manager files to blk-crypto-profile In preparation for renaming struct blk_keyslot_manager to struct blk_crypto_profile, rename the keyslot-manager.h and keyslot-manager.c source files. Renaming these files separately before making a lot of changes to their contents makes it easier for git to understand that they were renamed. Acked-by: Ulf Hansson # For MMC Reviewed-by: Christoph Hellwig Reviewed-by: Mike Snitzer Reviewed-by: Martin K. Petersen Signed-off-by: Eric Biggers Link: https://lore.kernel.org/r/20211018180453.40441-3-ebiggers@kernel.org Signed-off-by: Jens Axboe --- block/Makefile | 2 +- block/blk-crypto-fallback.c | 2 +- block/{keyslot-manager.c => blk-crypto-profile.c} | 2 +- block/blk-crypto.c | 2 +- drivers/md/dm-core.h | 2 +- drivers/md/dm.c | 2 +- drivers/mmc/host/cqhci-crypto.c | 2 +- drivers/scsi/ufs/ufshcd.h | 2 +- include/linux/{keyslot-manager.h => blk-crypto-profile.h} | 0 include/linux/mmc/host.h | 2 +- 10 files changed, 9 insertions(+), 9 deletions(-) rename block/{keyslot-manager.c => blk-crypto-profile.c} (99%) rename include/linux/{keyslot-manager.h => blk-crypto-profile.h} (100%) diff --git a/block/Makefile b/block/Makefile index 74df168729ec..602f7f47b7b6 100644 --- a/block/Makefile +++ b/block/Makefile @@ -36,6 +36,6 @@ obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o obj-$(CONFIG_BLK_PM) += blk-pm.o -obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o blk-crypto.o +obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += blk-crypto.o blk-crypto-profile.o obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o obj-$(CONFIG_BLOCK_HOLDER_DEPRECATED) += holder.o diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index 1bcc1a151424..08bfea292c75 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -12,9 +12,9 @@ #include #include #include +#include #include #include -#include #include #include #include diff --git a/block/keyslot-manager.c b/block/blk-crypto-profile.c similarity index 99% rename from block/keyslot-manager.c rename to block/blk-crypto-profile.c index 1792159d12d1..1a235fa3c3e8 100644 --- a/block/keyslot-manager.c +++ b/block/blk-crypto-profile.c @@ -28,7 +28,7 @@ #define pr_fmt(fmt) "blk-crypto: " fmt -#include +#include #include #include #include diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 8f53f4a1f9e2..76ce7a5d2676 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 55dccdfbcb22..841ed87999e7 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7870e6460633..4184fd8ccb08 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #define DM_MSG_PREFIX "core" diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c index 6419cfbb4ab7..628bbfaf8312 100644 --- a/drivers/mmc/host/cqhci-crypto.c +++ b/drivers/mmc/host/cqhci-crypto.c @@ -6,7 +6,7 @@ */ #include -#include +#include #include #include "cqhci-crypto.h" diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 41f6e06f9185..885fcf2e5922 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include "unipro.h" #include diff --git a/include/linux/keyslot-manager.h b/include/linux/blk-crypto-profile.h similarity index 100% rename from include/linux/keyslot-manager.h rename to include/linux/blk-crypto-profile.h diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0c0c9a0fdf57..725b1de41767 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -15,7 +15,7 @@ #include #include #include -#include +#include struct mmc_ios { unsigned int clock; /* clock rate */ From cb77cb5abe1f4fae4a33b735606aae22f9eaa1c7 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 18 Oct 2021 11:04:52 -0700 Subject: [PATCH 150/174] blk-crypto: rename blk_keyslot_manager to blk_crypto_profile blk_keyslot_manager is misnamed because it doesn't necessarily manage keyslots. It actually does several different things: - Contains the crypto capabilities of the device. - Provides functions to control the inline encryption hardware. Originally these were just for programming/evicting keyslots; however, new functionality (hardware-wrapped keys) will require new functions here which are unrelated to keyslots. Moreover, device-mapper devices already (ab)use "keyslot_evict" to pass key eviction requests to their underlying devices even though device-mapper devices don't have any keyslots themselves (so it really should be "evict_key", not "keyslot_evict"). - Sometimes (but not always!) it manages keyslots. Originally it always did, but device-mapper devices don't have keyslots themselves, so they use a "passthrough keyslot manager" which doesn't actually manage keyslots. This hack works, but the terminology is unnatural. Also, some hardware doesn't have keyslots and thus also uses a "passthrough keyslot manager" (support for such hardware is yet to be upstreamed, but it will happen eventually). Let's stop having keyslot managers which don't actually manage keyslots. Instead, rename blk_keyslot_manager to blk_crypto_profile. This is a fairly big change, since for consistency it also has to update keyslot manager-related function names, variable names, and comments -- not just the actual struct name. However it's still a fairly straightforward change, as it doesn't change any actual functionality. Acked-by: Ulf Hansson # For MMC Reviewed-by: Mike Snitzer Reviewed-by: Martin K. Petersen Signed-off-by: Eric Biggers Link: https://lore.kernel.org/r/20211018180453.40441-4-ebiggers@kernel.org Signed-off-by: Jens Axboe --- block/blk-crypto-fallback.c | 71 ++-- block/blk-crypto-profile.c | 520 ++++++++++++++--------------- block/blk-crypto.c | 27 +- block/blk-integrity.c | 4 +- drivers/md/dm-core.h | 2 +- drivers/md/dm-table.c | 168 +++++----- drivers/md/dm.c | 8 +- drivers/mmc/core/crypto.c | 11 +- drivers/mmc/host/cqhci-crypto.c | 31 +- drivers/scsi/ufs/ufshcd-crypto.c | 32 +- drivers/scsi/ufs/ufshcd-crypto.h | 9 +- drivers/scsi/ufs/ufshcd.c | 2 +- drivers/scsi/ufs/ufshcd.h | 4 +- include/linux/blk-crypto-profile.h | 166 +++++---- include/linux/blk-mq.h | 2 +- include/linux/blkdev.h | 16 +- include/linux/device-mapper.h | 4 +- include/linux/mmc/host.h | 2 +- 18 files changed, 556 insertions(+), 523 deletions(-) diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index 08bfea292c75..c87aba8584c6 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot { struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; } *blk_crypto_keyslots; -static struct blk_keyslot_manager blk_crypto_ksm; +static struct blk_crypto_profile blk_crypto_fallback_profile; static struct workqueue_struct *blk_crypto_wq; static mempool_t *blk_crypto_bounce_page_pool; static struct bio_set crypto_bio_split; @@ -104,9 +104,10 @@ static void blk_crypto_fallback_evict_keyslot(unsigned int slot) slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID; } -static int blk_crypto_fallback_keyslot_program(struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key, - unsigned int slot) +static int +blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) { struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot]; const enum blk_crypto_mode_num crypto_mode = @@ -127,7 +128,7 @@ static int blk_crypto_fallback_keyslot_program(struct blk_keyslot_manager *ksm, return 0; } -static int blk_crypto_fallback_keyslot_evict(struct blk_keyslot_manager *ksm, +static int blk_crypto_fallback_keyslot_evict(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot) { @@ -135,14 +136,9 @@ static int blk_crypto_fallback_keyslot_evict(struct blk_keyslot_manager *ksm, return 0; } -/* - * The crypto API fallback KSM ops - only used for a bio when it specifies a - * blk_crypto_key that was not supported by the device's inline encryption - * hardware. - */ -static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = { - .keyslot_program = blk_crypto_fallback_keyslot_program, - .keyslot_evict = blk_crypto_fallback_keyslot_evict, +static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = { + .keyslot_program = blk_crypto_fallback_keyslot_program, + .keyslot_evict = blk_crypto_fallback_keyslot_evict, }; static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) @@ -188,13 +184,13 @@ static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) } static bool -blk_crypto_fallback_alloc_cipher_req(struct blk_ksm_keyslot *slot, +blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot, struct skcipher_request **ciph_req_ret, struct crypto_wait *wait) { struct skcipher_request *ciph_req; const struct blk_crypto_fallback_keyslot *slotp; - int keyslot_idx = blk_ksm_get_slot_idx(slot); + int keyslot_idx = blk_crypto_keyslot_index(slot); slotp = &blk_crypto_keyslots[keyslot_idx]; ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode], @@ -266,7 +262,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) { struct bio *src_bio, *enc_bio; struct bio_crypt_ctx *bc; - struct blk_ksm_keyslot *slot; + struct blk_crypto_keyslot *slot; int data_unit_size; struct skcipher_request *ciph_req = NULL; DECLARE_CRYPTO_WAIT(wait); @@ -293,10 +289,11 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) } /* - * Use the crypto API fallback keyslot manager to get a crypto_skcipher - * for the algorithm and key specified for this bio. + * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for + * this bio's algorithm and key. */ - blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); + blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, + bc->bc_key, &slot); if (blk_st != BLK_STS_OK) { src_bio->bi_status = blk_st; goto out_put_enc_bio; @@ -364,7 +361,7 @@ out_free_bounce_pages: out_free_ciph_req: skcipher_request_free(ciph_req); out_release_keyslot: - blk_ksm_put_slot(slot); + blk_crypto_put_keyslot(slot); out_put_enc_bio: if (enc_bio) bio_put(enc_bio); @@ -382,7 +379,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) container_of(work, struct bio_fallback_crypt_ctx, work); struct bio *bio = f_ctx->bio; struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx; - struct blk_ksm_keyslot *slot; + struct blk_crypto_keyslot *slot; struct skcipher_request *ciph_req = NULL; DECLARE_CRYPTO_WAIT(wait); u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; @@ -395,10 +392,11 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) blk_status_t blk_st; /* - * Use the crypto API fallback keyslot manager to get a crypto_skcipher - * for the algorithm and key specified for this bio. + * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for + * this bio's algorithm and key. */ - blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot); + blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, + bc->bc_key, &slot); if (blk_st != BLK_STS_OK) { bio->bi_status = blk_st; goto out_no_keyslot; @@ -436,7 +434,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) out: skcipher_request_free(ciph_req); - blk_ksm_put_slot(slot); + blk_crypto_put_keyslot(slot); out_no_keyslot: mempool_free(f_ctx, bio_fallback_crypt_ctx_pool); bio_endio(bio); @@ -501,8 +499,8 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) return false; } - if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm, - &bc->bc_key->crypto_cfg)) { + if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile, + &bc->bc_key->crypto_cfg)) { bio->bi_status = BLK_STS_NOTSUPP; return false; } @@ -528,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) { - return blk_ksm_evict_key(&blk_crypto_ksm, key); + return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key); } static bool blk_crypto_fallback_inited; @@ -536,6 +534,7 @@ static int blk_crypto_fallback_init(void) { int i; int err; + struct blk_crypto_profile *profile = &blk_crypto_fallback_profile; if (blk_crypto_fallback_inited) return 0; @@ -546,24 +545,24 @@ static int blk_crypto_fallback_init(void) if (err) goto out; - err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots); + err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots); if (err) goto fail_free_bioset; err = -ENOMEM; - blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops; - blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; + profile->ll_ops = blk_crypto_fallback_ll_ops; + profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; /* All blk-crypto modes have a crypto API fallback. */ for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) - blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF; - blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; + profile->modes_supported[i] = 0xFFFFFFFF; + profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; blk_crypto_wq = alloc_workqueue("blk_crypto_wq", WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, num_online_cpus()); if (!blk_crypto_wq) - goto fail_free_ksm; + goto fail_destroy_profile; blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots, sizeof(blk_crypto_keyslots[0]), @@ -597,8 +596,8 @@ fail_free_keyslots: kfree(blk_crypto_keyslots); fail_free_wq: destroy_workqueue(blk_crypto_wq); -fail_free_ksm: - blk_ksm_destroy(&blk_crypto_ksm); +fail_destroy_profile: + blk_crypto_profile_destroy(profile); fail_free_bioset: bioset_exit(&crypto_bio_split); out: diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c index 1a235fa3c3e8..605ba0626a5c 100644 --- a/block/blk-crypto-profile.c +++ b/block/blk-crypto-profile.c @@ -4,26 +4,22 @@ */ /** - * DOC: The Keyslot Manager + * DOC: blk-crypto profiles * - * Many devices with inline encryption support have a limited number of "slots" - * into which encryption contexts may be programmed, and requests can be tagged - * with a slot number to specify the key to use for en/decryption. + * 'struct blk_crypto_profile' contains all generic inline encryption-related + * state for a particular inline encryption device. blk_crypto_profile serves + * as the way that drivers for inline encryption hardware expose their crypto + * capabilities and certain functions (e.g., functions to program and evict + * keys) to upper layers. Device drivers that want to support inline encryption + * construct a crypto profile, then associate it with the disk's request_queue. * - * As the number of slots is limited, and programming keys is expensive on - * many inline encryption hardware, we don't want to program the same key into - * multiple slots - if multiple requests are using the same key, we want to - * program just one slot with that key and use that slot for all requests. + * If the device has keyslots, then its blk_crypto_profile also handles managing + * these keyslots in a device-independent way, using the driver-provided + * functions to program and evict keys as needed. This includes keeping track + * of which key and how many I/O requests are using each keyslot, getting + * keyslots for I/O requests, and handling key eviction requests. * - * The keyslot manager manages these keyslots appropriately, and also acts as - * an abstraction between the inline encryption hardware and the upper layers. - * - * Lower layer devices will set up a keyslot manager in their request queue - * and tell it how to perform device specific operations like programming/ - * evicting keys from keyslots. - * - * Upper layers will call blk_ksm_get_slot_for_key() to program a - * key into some slot in the inline encryption hardware. + * For more information, see Documentation/block/inline-encryption.rst. */ #define pr_fmt(fmt) "blk-crypto: " fmt @@ -37,77 +33,75 @@ #include #include -struct blk_ksm_keyslot { +struct blk_crypto_keyslot { atomic_t slot_refs; struct list_head idle_slot_node; struct hlist_node hash_node; const struct blk_crypto_key *key; - struct blk_keyslot_manager *ksm; + struct blk_crypto_profile *profile; }; -static inline void blk_ksm_hw_enter(struct blk_keyslot_manager *ksm) +static inline void blk_crypto_hw_enter(struct blk_crypto_profile *profile) { /* - * Calling into the driver requires ksm->lock held and the device + * Calling into the driver requires profile->lock held and the device * resumed. But we must resume the device first, since that can acquire - * and release ksm->lock via blk_ksm_reprogram_all_keys(). + * and release profile->lock via blk_crypto_reprogram_all_keys(). */ - if (ksm->dev) - pm_runtime_get_sync(ksm->dev); - down_write(&ksm->lock); + if (profile->dev) + pm_runtime_get_sync(profile->dev); + down_write(&profile->lock); } -static inline void blk_ksm_hw_exit(struct blk_keyslot_manager *ksm) +static inline void blk_crypto_hw_exit(struct blk_crypto_profile *profile) { - up_write(&ksm->lock); - if (ksm->dev) - pm_runtime_put_sync(ksm->dev); -} - -static inline bool blk_ksm_is_passthrough(struct blk_keyslot_manager *ksm) -{ - return ksm->num_slots == 0; + up_write(&profile->lock); + if (profile->dev) + pm_runtime_put_sync(profile->dev); } /** - * blk_ksm_init() - Initialize a keyslot manager - * @ksm: The keyslot_manager to initialize. - * @num_slots: The number of key slots to manage. + * blk_crypto_profile_init() - Initialize a blk_crypto_profile + * @profile: the blk_crypto_profile to initialize + * @num_slots: the number of keyslots * - * Allocate memory for keyslots and initialize a keyslot manager. Called by - * e.g. storage drivers to set up a keyslot manager in their request_queue. + * Storage drivers must call this when starting to set up a blk_crypto_profile, + * before filling in additional fields. * * Return: 0 on success, or else a negative error code. */ -int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots) +int blk_crypto_profile_init(struct blk_crypto_profile *profile, + unsigned int num_slots) { unsigned int slot; unsigned int i; unsigned int slot_hashtable_size; - memset(ksm, 0, sizeof(*ksm)); + memset(profile, 0, sizeof(*profile)); + init_rwsem(&profile->lock); if (num_slots == 0) - return -EINVAL; + return 0; - ksm->slots = kvcalloc(num_slots, sizeof(ksm->slots[0]), GFP_KERNEL); - if (!ksm->slots) + /* Initialize keyslot management data. */ + + profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]), + GFP_KERNEL); + if (!profile->slots) return -ENOMEM; - ksm->num_slots = num_slots; + profile->num_slots = num_slots; - init_rwsem(&ksm->lock); - - init_waitqueue_head(&ksm->idle_slots_wait_queue); - INIT_LIST_HEAD(&ksm->idle_slots); + init_waitqueue_head(&profile->idle_slots_wait_queue); + INIT_LIST_HEAD(&profile->idle_slots); for (slot = 0; slot < num_slots; slot++) { - ksm->slots[slot].ksm = ksm; - list_add_tail(&ksm->slots[slot].idle_slot_node, - &ksm->idle_slots); + profile->slots[slot].profile = profile; + list_add_tail(&profile->slots[slot].idle_slot_node, + &profile->idle_slots); } - spin_lock_init(&ksm->idle_slots_lock); + spin_lock_init(&profile->idle_slots_lock); slot_hashtable_size = roundup_pow_of_two(num_slots); /* @@ -117,74 +111,80 @@ int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots) if (slot_hashtable_size < 2) slot_hashtable_size = 2; - ksm->log_slot_ht_size = ilog2(slot_hashtable_size); - ksm->slot_hashtable = kvmalloc_array(slot_hashtable_size, - sizeof(ksm->slot_hashtable[0]), - GFP_KERNEL); - if (!ksm->slot_hashtable) - goto err_destroy_ksm; + profile->log_slot_ht_size = ilog2(slot_hashtable_size); + profile->slot_hashtable = + kvmalloc_array(slot_hashtable_size, + sizeof(profile->slot_hashtable[0]), GFP_KERNEL); + if (!profile->slot_hashtable) + goto err_destroy; for (i = 0; i < slot_hashtable_size; i++) - INIT_HLIST_HEAD(&ksm->slot_hashtable[i]); + INIT_HLIST_HEAD(&profile->slot_hashtable[i]); return 0; -err_destroy_ksm: - blk_ksm_destroy(ksm); +err_destroy: + blk_crypto_profile_destroy(profile); return -ENOMEM; } -EXPORT_SYMBOL_GPL(blk_ksm_init); +EXPORT_SYMBOL_GPL(blk_crypto_profile_init); -static void blk_ksm_destroy_callback(void *ksm) +static void blk_crypto_profile_destroy_callback(void *profile) { - blk_ksm_destroy(ksm); + blk_crypto_profile_destroy(profile); } /** - * devm_blk_ksm_init() - Resource-managed blk_ksm_init() - * @dev: The device which owns the blk_keyslot_manager. - * @ksm: The blk_keyslot_manager to initialize. - * @num_slots: The number of key slots to manage. + * devm_blk_crypto_profile_init() - Resource-managed blk_crypto_profile_init() + * @dev: the device which owns the blk_crypto_profile + * @profile: the blk_crypto_profile to initialize + * @num_slots: the number of keyslots * - * Like blk_ksm_init(), but causes blk_ksm_destroy() to be called automatically - * on driver detach. + * Like blk_crypto_profile_init(), but causes blk_crypto_profile_destroy() to be + * called automatically on driver detach. * * Return: 0 on success, or else a negative error code. */ -int devm_blk_ksm_init(struct device *dev, struct blk_keyslot_manager *ksm, - unsigned int num_slots) +int devm_blk_crypto_profile_init(struct device *dev, + struct blk_crypto_profile *profile, + unsigned int num_slots) { - int err = blk_ksm_init(ksm, num_slots); + int err = blk_crypto_profile_init(profile, num_slots); if (err) return err; - return devm_add_action_or_reset(dev, blk_ksm_destroy_callback, ksm); + return devm_add_action_or_reset(dev, + blk_crypto_profile_destroy_callback, + profile); } -EXPORT_SYMBOL_GPL(devm_blk_ksm_init); +EXPORT_SYMBOL_GPL(devm_blk_crypto_profile_init); static inline struct hlist_head * -blk_ksm_hash_bucket_for_key(struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key) +blk_crypto_hash_bucket_for_key(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key) { - return &ksm->slot_hashtable[hash_ptr(key, ksm->log_slot_ht_size)]; + return &profile->slot_hashtable[ + hash_ptr(key, profile->log_slot_ht_size)]; } -static void blk_ksm_remove_slot_from_lru_list(struct blk_ksm_keyslot *slot) +static void +blk_crypto_remove_slot_from_lru_list(struct blk_crypto_keyslot *slot) { - struct blk_keyslot_manager *ksm = slot->ksm; + struct blk_crypto_profile *profile = slot->profile; unsigned long flags; - spin_lock_irqsave(&ksm->idle_slots_lock, flags); + spin_lock_irqsave(&profile->idle_slots_lock, flags); list_del(&slot->idle_slot_node); - spin_unlock_irqrestore(&ksm->idle_slots_lock, flags); + spin_unlock_irqrestore(&profile->idle_slots_lock, flags); } -static struct blk_ksm_keyslot *blk_ksm_find_keyslot( - struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key) +static struct blk_crypto_keyslot * +blk_crypto_find_keyslot(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key) { - const struct hlist_head *head = blk_ksm_hash_bucket_for_key(ksm, key); - struct blk_ksm_keyslot *slotp; + const struct hlist_head *head = + blk_crypto_hash_bucket_for_key(profile, key); + struct blk_crypto_keyslot *slotp; hlist_for_each_entry(slotp, head, hash_node) { if (slotp->key == key) @@ -193,68 +193,79 @@ static struct blk_ksm_keyslot *blk_ksm_find_keyslot( return NULL; } -static struct blk_ksm_keyslot *blk_ksm_find_and_grab_keyslot( - struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key) +static struct blk_crypto_keyslot * +blk_crypto_find_and_grab_keyslot(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key) { - struct blk_ksm_keyslot *slot; + struct blk_crypto_keyslot *slot; - slot = blk_ksm_find_keyslot(ksm, key); + slot = blk_crypto_find_keyslot(profile, key); if (!slot) return NULL; if (atomic_inc_return(&slot->slot_refs) == 1) { /* Took first reference to this slot; remove it from LRU list */ - blk_ksm_remove_slot_from_lru_list(slot); + blk_crypto_remove_slot_from_lru_list(slot); } return slot; } -unsigned int blk_ksm_get_slot_idx(struct blk_ksm_keyslot *slot) +/** + * blk_crypto_keyslot_index() - Get the index of a keyslot + * @slot: a keyslot that blk_crypto_get_keyslot() returned + * + * Return: the 0-based index of the keyslot within the device's keyslots. + */ +unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot) { - return slot - slot->ksm->slots; + return slot - slot->profile->slots; } -EXPORT_SYMBOL_GPL(blk_ksm_get_slot_idx); +EXPORT_SYMBOL_GPL(blk_crypto_keyslot_index); /** - * blk_ksm_get_slot_for_key() - Program a key into a keyslot. - * @ksm: The keyslot manager to program the key into. - * @key: Pointer to the key object to program, including the raw key, crypto - * mode, and data unit size. - * @slot_ptr: A pointer to return the pointer of the allocated keyslot. + * blk_crypto_get_keyslot() - Get a keyslot for a key, if needed. + * @profile: the crypto profile of the device the key will be used on + * @key: the key that will be used + * @slot_ptr: If a keyslot is allocated, an opaque pointer to the keyslot struct + * will be stored here; otherwise NULL will be stored here. * - * Get a keyslot that's been programmed with the specified key. If one already - * exists, return it with incremented refcount. Otherwise, wait for a keyslot - * to become idle and program it. + * If the device has keyslots, this gets a keyslot that's been programmed with + * the specified key. If the key is already in a slot, this reuses it; + * otherwise this waits for a slot to become idle and programs the key into it. * - * Context: Process context. Takes and releases ksm->lock. - * Return: BLK_STS_OK on success (and keyslot is set to the pointer of the - * allocated keyslot), or some other blk_status_t otherwise (and - * keyslot is set to NULL). + * This must be paired with a call to blk_crypto_put_keyslot(). + * + * Context: Process context. Takes and releases profile->lock. + * Return: BLK_STS_OK on success, meaning that either a keyslot was allocated or + * one wasn't needed; or a blk_status_t error on failure. */ -blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key, - struct blk_ksm_keyslot **slot_ptr) +blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + struct blk_crypto_keyslot **slot_ptr) { - struct blk_ksm_keyslot *slot; + struct blk_crypto_keyslot *slot; int slot_idx; int err; *slot_ptr = NULL; - if (blk_ksm_is_passthrough(ksm)) + /* + * If the device has no concept of "keyslots", then there is no need to + * get one. + */ + if (profile->num_slots == 0) return BLK_STS_OK; - down_read(&ksm->lock); - slot = blk_ksm_find_and_grab_keyslot(ksm, key); - up_read(&ksm->lock); + down_read(&profile->lock); + slot = blk_crypto_find_and_grab_keyslot(profile, key); + up_read(&profile->lock); if (slot) goto success; for (;;) { - blk_ksm_hw_enter(ksm); - slot = blk_ksm_find_and_grab_keyslot(ksm, key); + blk_crypto_hw_enter(profile); + slot = blk_crypto_find_and_grab_keyslot(profile, key); if (slot) { - blk_ksm_hw_exit(ksm); + blk_crypto_hw_exit(profile); goto success; } @@ -262,22 +273,22 @@ blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm, * If we're here, that means there wasn't a slot that was * already programmed with the key. So try to program it. */ - if (!list_empty(&ksm->idle_slots)) + if (!list_empty(&profile->idle_slots)) break; - blk_ksm_hw_exit(ksm); - wait_event(ksm->idle_slots_wait_queue, - !list_empty(&ksm->idle_slots)); + blk_crypto_hw_exit(profile); + wait_event(profile->idle_slots_wait_queue, + !list_empty(&profile->idle_slots)); } - slot = list_first_entry(&ksm->idle_slots, struct blk_ksm_keyslot, + slot = list_first_entry(&profile->idle_slots, struct blk_crypto_keyslot, idle_slot_node); - slot_idx = blk_ksm_get_slot_idx(slot); + slot_idx = blk_crypto_keyslot_index(slot); - err = ksm->ksm_ll_ops.keyslot_program(ksm, key, slot_idx); + err = profile->ll_ops.keyslot_program(profile, key, slot_idx); if (err) { - wake_up(&ksm->idle_slots_wait_queue); - blk_ksm_hw_exit(ksm); + wake_up(&profile->idle_slots_wait_queue); + blk_crypto_hw_exit(profile); return errno_to_blk_status(err); } @@ -285,97 +296,98 @@ blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm, if (slot->key) hlist_del(&slot->hash_node); slot->key = key; - hlist_add_head(&slot->hash_node, blk_ksm_hash_bucket_for_key(ksm, key)); + hlist_add_head(&slot->hash_node, + blk_crypto_hash_bucket_for_key(profile, key)); atomic_set(&slot->slot_refs, 1); - blk_ksm_remove_slot_from_lru_list(slot); + blk_crypto_remove_slot_from_lru_list(slot); - blk_ksm_hw_exit(ksm); + blk_crypto_hw_exit(profile); success: *slot_ptr = slot; return BLK_STS_OK; } /** - * blk_ksm_put_slot() - Release a reference to a slot - * @slot: The keyslot to release the reference of. + * blk_crypto_put_keyslot() - Release a reference to a keyslot + * @slot: The keyslot to release the reference of (may be NULL). * * Context: Any context. */ -void blk_ksm_put_slot(struct blk_ksm_keyslot *slot) +void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot) { - struct blk_keyslot_manager *ksm; + struct blk_crypto_profile *profile; unsigned long flags; if (!slot) return; - ksm = slot->ksm; + profile = slot->profile; if (atomic_dec_and_lock_irqsave(&slot->slot_refs, - &ksm->idle_slots_lock, flags)) { - list_add_tail(&slot->idle_slot_node, &ksm->idle_slots); - spin_unlock_irqrestore(&ksm->idle_slots_lock, flags); - wake_up(&ksm->idle_slots_wait_queue); + &profile->idle_slots_lock, flags)) { + list_add_tail(&slot->idle_slot_node, &profile->idle_slots); + spin_unlock_irqrestore(&profile->idle_slots_lock, flags); + wake_up(&profile->idle_slots_wait_queue); } } /** - * blk_ksm_crypto_cfg_supported() - Find out if a crypto configuration is - * supported by a ksm. - * @ksm: The keyslot manager to check - * @cfg: The crypto configuration to check for. + * __blk_crypto_cfg_supported() - Check whether the given crypto profile + * supports the given crypto configuration. + * @profile: the crypto profile to check + * @cfg: the crypto configuration to check for * - * Checks for crypto_mode/data unit size/dun bytes support. - * - * Return: Whether or not this ksm supports the specified crypto config. + * Return: %true if @profile supports the given @cfg. */ -bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm, - const struct blk_crypto_config *cfg) +bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile, + const struct blk_crypto_config *cfg) { - if (!ksm) + if (!profile) return false; - if (!(ksm->crypto_modes_supported[cfg->crypto_mode] & - cfg->data_unit_size)) + if (!(profile->modes_supported[cfg->crypto_mode] & cfg->data_unit_size)) return false; - if (ksm->max_dun_bytes_supported < cfg->dun_bytes) + if (profile->max_dun_bytes_supported < cfg->dun_bytes) return false; return true; } /** - * blk_ksm_evict_key() - Evict a key from the lower layer device. - * @ksm: The keyslot manager to evict from - * @key: The key to evict + * __blk_crypto_evict_key() - Evict a key from a device. + * @profile: the crypto profile of the device + * @key: the key to evict. It must not still be used in any I/O. * - * Find the keyslot that the specified key was programmed into, and evict that - * slot from the lower layer device. The slot must not be in use by any - * in-flight IO when this function is called. + * If the device has keyslots, this finds the keyslot (if any) that contains the + * specified key and calls the driver's keyslot_evict function to evict it. * - * Context: Process context. Takes and releases ksm->lock. + * Otherwise, this just calls the driver's keyslot_evict function if it is + * implemented, passing just the key (without any particular keyslot). This + * allows layered devices to evict the key from their underlying devices. + * + * Context: Process context. Takes and releases profile->lock. * Return: 0 on success or if there's no keyslot with the specified key, -EBUSY * if the keyslot is still in use, or another -errno value on other * error. */ -int blk_ksm_evict_key(struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key) +int __blk_crypto_evict_key(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key) { - struct blk_ksm_keyslot *slot; + struct blk_crypto_keyslot *slot; int err = 0; - if (blk_ksm_is_passthrough(ksm)) { - if (ksm->ksm_ll_ops.keyslot_evict) { - blk_ksm_hw_enter(ksm); - err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, -1); - blk_ksm_hw_exit(ksm); + if (profile->num_slots == 0) { + if (profile->ll_ops.keyslot_evict) { + blk_crypto_hw_enter(profile); + err = profile->ll_ops.keyslot_evict(profile, key, -1); + blk_crypto_hw_exit(profile); return err; } return 0; } - blk_ksm_hw_enter(ksm); - slot = blk_ksm_find_keyslot(ksm, key); + blk_crypto_hw_enter(profile); + slot = blk_crypto_find_keyslot(profile, key); if (!slot) goto out_unlock; @@ -383,8 +395,8 @@ int blk_ksm_evict_key(struct blk_keyslot_manager *ksm, err = -EBUSY; goto out_unlock; } - err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, - blk_ksm_get_slot_idx(slot)); + err = profile->ll_ops.keyslot_evict(profile, key, + blk_crypto_keyslot_index(slot)); if (err) goto out_unlock; @@ -392,81 +404,84 @@ int blk_ksm_evict_key(struct blk_keyslot_manager *ksm, slot->key = NULL; err = 0; out_unlock: - blk_ksm_hw_exit(ksm); + blk_crypto_hw_exit(profile); return err; } /** - * blk_ksm_reprogram_all_keys() - Re-program all keyslots. - * @ksm: The keyslot manager + * blk_crypto_reprogram_all_keys() - Re-program all keyslots. + * @profile: The crypto profile * * Re-program all keyslots that are supposed to have a key programmed. This is * intended only for use by drivers for hardware that loses its keys on reset. * - * Context: Process context. Takes and releases ksm->lock. + * Context: Process context. Takes and releases profile->lock. */ -void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm) +void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile) { unsigned int slot; - if (blk_ksm_is_passthrough(ksm)) + if (profile->num_slots == 0) return; /* This is for device initialization, so don't resume the device */ - down_write(&ksm->lock); - for (slot = 0; slot < ksm->num_slots; slot++) { - const struct blk_crypto_key *key = ksm->slots[slot].key; + down_write(&profile->lock); + for (slot = 0; slot < profile->num_slots; slot++) { + const struct blk_crypto_key *key = profile->slots[slot].key; int err; if (!key) continue; - err = ksm->ksm_ll_ops.keyslot_program(ksm, key, slot); + err = profile->ll_ops.keyslot_program(profile, key, slot); WARN_ON(err); } - up_write(&ksm->lock); + up_write(&profile->lock); } -EXPORT_SYMBOL_GPL(blk_ksm_reprogram_all_keys); +EXPORT_SYMBOL_GPL(blk_crypto_reprogram_all_keys); -void blk_ksm_destroy(struct blk_keyslot_manager *ksm) +void blk_crypto_profile_destroy(struct blk_crypto_profile *profile) { - if (!ksm) + if (!profile) return; - kvfree(ksm->slot_hashtable); - kvfree_sensitive(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots); - memzero_explicit(ksm, sizeof(*ksm)); + kvfree(profile->slot_hashtable); + kvfree_sensitive(profile->slots, + sizeof(profile->slots[0]) * profile->num_slots); + memzero_explicit(profile, sizeof(*profile)); } -EXPORT_SYMBOL_GPL(blk_ksm_destroy); +EXPORT_SYMBOL_GPL(blk_crypto_profile_destroy); -bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q) +bool blk_crypto_register(struct blk_crypto_profile *profile, + struct request_queue *q) { if (blk_integrity_queue_supports_integrity(q)) { pr_warn("Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n"); return false; } - q->ksm = ksm; + q->crypto_profile = profile; return true; } -EXPORT_SYMBOL_GPL(blk_ksm_register); +EXPORT_SYMBOL_GPL(blk_crypto_register); -void blk_ksm_unregister(struct request_queue *q) +void blk_crypto_unregister(struct request_queue *q) { - q->ksm = NULL; + q->crypto_profile = NULL; } /** - * blk_ksm_intersect_modes() - restrict supported modes by child device - * @parent: The keyslot manager for parent device - * @child: The keyslot manager for child device, or NULL + * blk_crypto_intersect_capabilities() - restrict supported crypto capabilities + * by child device + * @parent: the crypto profile for the parent device + * @child: the crypto profile for the child device, or NULL * - * Clear any crypto mode support bits in @parent that aren't set in @child. - * If @child is NULL, then all parent bits are cleared. + * This clears all crypto capabilities in @parent that aren't set in @child. If + * @child is NULL, then this clears all parent capabilities. * - * Only use this when setting up the keyslot manager for a layered device, - * before it's been exposed yet. + * Only use this when setting up the crypto profile for a layered device, before + * it's been exposed yet. */ -void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent, - const struct blk_keyslot_manager *child) +void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent, + const struct blk_crypto_profile *child) { if (child) { unsigned int i; @@ -474,73 +489,63 @@ void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent, parent->max_dun_bytes_supported = min(parent->max_dun_bytes_supported, child->max_dun_bytes_supported); - for (i = 0; i < ARRAY_SIZE(child->crypto_modes_supported); - i++) { - parent->crypto_modes_supported[i] &= - child->crypto_modes_supported[i]; - } + for (i = 0; i < ARRAY_SIZE(child->modes_supported); i++) + parent->modes_supported[i] &= child->modes_supported[i]; } else { parent->max_dun_bytes_supported = 0; - memset(parent->crypto_modes_supported, 0, - sizeof(parent->crypto_modes_supported)); + memset(parent->modes_supported, 0, + sizeof(parent->modes_supported)); } } -EXPORT_SYMBOL_GPL(blk_ksm_intersect_modes); +EXPORT_SYMBOL_GPL(blk_crypto_intersect_capabilities); /** - * blk_ksm_is_superset() - Check if a KSM supports a superset of crypto modes - * and DUN bytes that another KSM supports. Here, - * "superset" refers to the mathematical meaning of the - * word - i.e. if two KSMs have the *same* capabilities, - * they *are* considered supersets of each other. - * @ksm_superset: The KSM that we want to verify is a superset - * @ksm_subset: The KSM that we want to verify is a subset + * blk_crypto_has_capabilities() - Check whether @target supports at least all + * the crypto capabilities that @reference does. + * @target: the target profile + * @reference: the reference profile * - * Return: True if @ksm_superset supports a superset of the crypto modes and DUN - * bytes that @ksm_subset supports. + * Return: %true if @target supports all the crypto capabilities of @reference. */ -bool blk_ksm_is_superset(struct blk_keyslot_manager *ksm_superset, - struct blk_keyslot_manager *ksm_subset) +bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target, + const struct blk_crypto_profile *reference) { int i; - if (!ksm_subset) + if (!reference) return true; - if (!ksm_superset) + if (!target) return false; - for (i = 0; i < ARRAY_SIZE(ksm_superset->crypto_modes_supported); i++) { - if (ksm_subset->crypto_modes_supported[i] & - (~ksm_superset->crypto_modes_supported[i])) { + for (i = 0; i < ARRAY_SIZE(target->modes_supported); i++) { + if (reference->modes_supported[i] & ~target->modes_supported[i]) return false; - } } - if (ksm_subset->max_dun_bytes_supported > - ksm_superset->max_dun_bytes_supported) { + if (reference->max_dun_bytes_supported > + target->max_dun_bytes_supported) return false; - } return true; } -EXPORT_SYMBOL_GPL(blk_ksm_is_superset); +EXPORT_SYMBOL_GPL(blk_crypto_has_capabilities); /** - * blk_ksm_update_capabilities() - Update the restrictions of a KSM to those of - * another KSM - * @target_ksm: The KSM whose restrictions to update. - * @reference_ksm: The KSM to whose restrictions this function will update - * @target_ksm's restrictions to. + * blk_crypto_update_capabilities() - Update the capabilities of a crypto + * profile to match those of another crypto + * profile. + * @dst: The crypto profile whose capabilities to update. + * @src: The crypto profile whose capabilities this function will update @dst's + * capabilities to. * * Blk-crypto requires that crypto capabilities that were * advertised when a bio was created continue to be supported by the * device until that bio is ended. This is turn means that a device cannot * shrink its advertised crypto capabilities without any explicit * synchronization with upper layers. So if there's no such explicit - * synchronization, @reference_ksm must support all the crypto capabilities that - * @target_ksm does - * (i.e. we need blk_ksm_is_superset(@reference_ksm, @target_ksm) == true). + * synchronization, @src must support all the crypto capabilities that + * @dst does (i.e. we need blk_crypto_has_capabilities(@src, @dst)). * * Note also that as long as the crypto capabilities are being expanded, the * order of updates becoming visible is not important because it's alright @@ -549,31 +554,12 @@ EXPORT_SYMBOL_GPL(blk_ksm_is_superset); * might result in blk-crypto-fallback being used if available, or the bio being * failed). */ -void blk_ksm_update_capabilities(struct blk_keyslot_manager *target_ksm, - struct blk_keyslot_manager *reference_ksm) +void blk_crypto_update_capabilities(struct blk_crypto_profile *dst, + const struct blk_crypto_profile *src) { - memcpy(target_ksm->crypto_modes_supported, - reference_ksm->crypto_modes_supported, - sizeof(target_ksm->crypto_modes_supported)); + memcpy(dst->modes_supported, src->modes_supported, + sizeof(dst->modes_supported)); - target_ksm->max_dun_bytes_supported = - reference_ksm->max_dun_bytes_supported; + dst->max_dun_bytes_supported = src->max_dun_bytes_supported; } -EXPORT_SYMBOL_GPL(blk_ksm_update_capabilities); - -/** - * blk_ksm_init_passthrough() - Init a passthrough keyslot manager - * @ksm: The keyslot manager to init - * - * Initialize a passthrough keyslot manager. - * Called by e.g. storage drivers to set up a keyslot manager in their - * request_queue, when the storage driver wants to manage its keys by itself. - * This is useful for inline encryption hardware that doesn't have the concept - * of keyslots, and for layered devices. - */ -void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm) -{ - memset(ksm, 0, sizeof(*ksm)); - init_rwsem(&ksm->lock); -} -EXPORT_SYMBOL_GPL(blk_ksm_init_passthrough); +EXPORT_SYMBOL_GPL(blk_crypto_update_capabilities); diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 76ce7a5d2676..ec9efeeeca91 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -218,8 +218,9 @@ static bool bio_crypt_check_alignment(struct bio *bio) blk_status_t __blk_crypto_init_request(struct request *rq) { - return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key, - &rq->crypt_keyslot); + return blk_crypto_get_keyslot(rq->q->crypto_profile, + rq->crypt_ctx->bc_key, + &rq->crypt_keyslot); } /** @@ -233,7 +234,7 @@ blk_status_t __blk_crypto_init_request(struct request *rq) */ void __blk_crypto_free_request(struct request *rq) { - blk_ksm_put_slot(rq->crypt_keyslot); + blk_crypto_put_keyslot(rq->crypt_keyslot); mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); blk_crypto_rq_set_defaults(rq); } @@ -264,6 +265,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr) { struct bio *bio = *bio_ptr; const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; + struct blk_crypto_profile *profile; /* Error if bio has no data. */ if (WARN_ON_ONCE(!bio_has_data(bio))) { @@ -280,8 +282,8 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr) * Success if device supports the encryption context, or if we succeeded * in falling back to the crypto API. */ - if (blk_ksm_crypto_cfg_supported(bdev_get_queue(bio->bi_bdev)->ksm, - &bc_key->crypto_cfg)) + profile = bdev_get_queue(bio->bi_bdev)->crypto_profile; + if (__blk_crypto_cfg_supported(profile, &bc_key->crypto_cfg)) return true; if (blk_crypto_fallback_bio_prep(bio_ptr)) @@ -357,7 +359,7 @@ bool blk_crypto_config_supported(struct request_queue *q, const struct blk_crypto_config *cfg) { return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) || - blk_ksm_crypto_cfg_supported(q->ksm, cfg); + __blk_crypto_cfg_supported(q->crypto_profile, cfg); } /** @@ -378,7 +380,7 @@ bool blk_crypto_config_supported(struct request_queue *q, int blk_crypto_start_using_key(const struct blk_crypto_key *key, struct request_queue *q) { - if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) + if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg)) return 0; return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode); } @@ -394,18 +396,17 @@ int blk_crypto_start_using_key(const struct blk_crypto_key *key, * evicted from any hardware that it might have been programmed into. The key * must not be in use by any in-flight IO when this function is called. * - * Return: 0 on success or if key is not present in the q's ksm, -err on error. + * Return: 0 on success or if the key wasn't in any keyslot; -errno on error. */ int blk_crypto_evict_key(struct request_queue *q, const struct blk_crypto_key *key) { - if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) - return blk_ksm_evict_key(q->ksm, key); + if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg)) + return __blk_crypto_evict_key(q->crypto_profile, key); /* - * If the request queue's associated inline encryption hardware didn't - * have support for the key, then the key might have been programmed - * into the fallback keyslot manager, so try to evict from there. + * If the request_queue didn't support the key, then blk-crypto-fallback + * may have been used, so try to evict the key from blk-crypto-fallback. */ return blk_crypto_fallback_evict_key(key); } diff --git a/block/blk-integrity.c b/block/blk-integrity.c index cef534a7cbc9..d670d54e5f7a 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -409,9 +409,9 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); #ifdef CONFIG_BLK_INLINE_ENCRYPTION - if (disk->queue->ksm) { + if (disk->queue->crypto_profile) { pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n"); - blk_ksm_unregister(disk->queue); + blk_crypto_unregister(disk->queue); } #endif } diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 841ed87999e7..b855fef4f38a 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -200,7 +200,7 @@ struct dm_table { struct dm_md_mempools *mempools; #ifdef CONFIG_BLK_INLINE_ENCRYPTION - struct blk_keyslot_manager *ksm; + struct blk_crypto_profile *crypto_profile; #endif }; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 1fa4d5582dca..8b0f27a745d9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -170,7 +170,7 @@ static void free_devices(struct list_head *devices, struct mapped_device *md) } } -static void dm_table_destroy_keyslot_manager(struct dm_table *t); +static void dm_table_destroy_crypto_profile(struct dm_table *t); void dm_table_destroy(struct dm_table *t) { @@ -200,7 +200,7 @@ void dm_table_destroy(struct dm_table *t) dm_free_md_mempools(t->mempools); - dm_table_destroy_keyslot_manager(t); + dm_table_destroy_crypto_profile(t); kfree(t); } @@ -1187,8 +1187,8 @@ static int dm_table_register_integrity(struct dm_table *t) #ifdef CONFIG_BLK_INLINE_ENCRYPTION -struct dm_keyslot_manager { - struct blk_keyslot_manager ksm; +struct dm_crypto_profile { + struct blk_crypto_profile profile; struct mapped_device *md; }; @@ -1214,13 +1214,11 @@ static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, * When an inline encryption key is evicted from a device-mapper device, evict * it from all the underlying devices. */ -static int dm_keyslot_evict(struct blk_keyslot_manager *ksm, +static int dm_keyslot_evict(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot) { - struct dm_keyslot_manager *dksm = container_of(ksm, - struct dm_keyslot_manager, - ksm); - struct mapped_device *md = dksm->md; + struct mapped_device *md = + container_of(profile, struct dm_crypto_profile, profile)->md; struct dm_keyslot_evict_args args = { key }; struct dm_table *t; int srcu_idx; @@ -1240,150 +1238,148 @@ static int dm_keyslot_evict(struct blk_keyslot_manager *ksm, return args.err; } -static const struct blk_ksm_ll_ops dm_ksm_ll_ops = { - .keyslot_evict = dm_keyslot_evict, -}; - -static int device_intersect_crypto_modes(struct dm_target *ti, - struct dm_dev *dev, sector_t start, - sector_t len, void *data) +static int +device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - struct blk_keyslot_manager *parent = data; - struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm; + struct blk_crypto_profile *parent = data; + struct blk_crypto_profile *child = + bdev_get_queue(dev->bdev)->crypto_profile; - blk_ksm_intersect_modes(parent, child); + blk_crypto_intersect_capabilities(parent, child); return 0; } -void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm) +void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) { - struct dm_keyslot_manager *dksm = container_of(ksm, - struct dm_keyslot_manager, - ksm); + struct dm_crypto_profile *dmcp = container_of(profile, + struct dm_crypto_profile, + profile); - if (!ksm) + if (!profile) return; - blk_ksm_destroy(ksm); - kfree(dksm); + blk_crypto_profile_destroy(profile); + kfree(dmcp); } -static void dm_table_destroy_keyslot_manager(struct dm_table *t) +static void dm_table_destroy_crypto_profile(struct dm_table *t) { - dm_destroy_keyslot_manager(t->ksm); - t->ksm = NULL; + dm_destroy_crypto_profile(t->crypto_profile); + t->crypto_profile = NULL; } /* - * Constructs and initializes t->ksm with a keyslot manager that - * represents the common set of crypto capabilities of the devices - * described by the dm_table. However, if the constructed keyslot - * manager does not support a superset of the crypto capabilities - * supported by the current keyslot manager of the mapped_device, - * it returns an error instead, since we don't support restricting - * crypto capabilities on table changes. Finally, if the constructed - * keyslot manager doesn't actually support any crypto modes at all, - * it just returns NULL. + * Constructs and initializes t->crypto_profile with a crypto profile that + * represents the common set of crypto capabilities of the devices described by + * the dm_table. However, if the constructed crypto profile doesn't support all + * crypto capabilities that are supported by the current mapped_device, it + * returns an error instead, since we don't support removing crypto capabilities + * on table changes. Finally, if the constructed crypto profile is "empty" (has + * no crypto capabilities at all), it just sets t->crypto_profile to NULL. */ -static int dm_table_construct_keyslot_manager(struct dm_table *t) +static int dm_table_construct_crypto_profile(struct dm_table *t) { - struct dm_keyslot_manager *dksm; - struct blk_keyslot_manager *ksm; + struct dm_crypto_profile *dmcp; + struct blk_crypto_profile *profile; struct dm_target *ti; unsigned int i; - bool ksm_is_empty = true; + bool empty_profile = true; - dksm = kmalloc(sizeof(*dksm), GFP_KERNEL); - if (!dksm) + dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL); + if (!dmcp) return -ENOMEM; - dksm->md = t->md; + dmcp->md = t->md; - ksm = &dksm->ksm; - blk_ksm_init_passthrough(ksm); - ksm->ksm_ll_ops = dm_ksm_ll_ops; - ksm->max_dun_bytes_supported = UINT_MAX; - memset(ksm->crypto_modes_supported, 0xFF, - sizeof(ksm->crypto_modes_supported)); + profile = &dmcp->profile; + blk_crypto_profile_init(profile, 0); + profile->ll_ops.keyslot_evict = dm_keyslot_evict; + profile->max_dun_bytes_supported = UINT_MAX; + memset(profile->modes_supported, 0xFF, + sizeof(profile->modes_supported)); for (i = 0; i < dm_table_get_num_targets(t); i++) { ti = dm_table_get_target(t, i); if (!dm_target_passes_crypto(ti->type)) { - blk_ksm_intersect_modes(ksm, NULL); + blk_crypto_intersect_capabilities(profile, NULL); break; } if (!ti->type->iterate_devices) continue; - ti->type->iterate_devices(ti, device_intersect_crypto_modes, - ksm); + ti->type->iterate_devices(ti, + device_intersect_crypto_capabilities, + profile); } - if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) { + if (t->md->queue && + !blk_crypto_has_capabilities(profile, + t->md->queue->crypto_profile)) { DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); - dm_destroy_keyslot_manager(ksm); + dm_destroy_crypto_profile(profile); return -EINVAL; } /* - * If the new KSM doesn't actually support any crypto modes, we may as - * well represent it with a NULL ksm. + * If the new profile doesn't actually support any crypto capabilities, + * we may as well represent it with a NULL profile. */ - ksm_is_empty = true; - for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) { - if (ksm->crypto_modes_supported[i]) { - ksm_is_empty = false; + for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) { + if (profile->modes_supported[i]) { + empty_profile = false; break; } } - if (ksm_is_empty) { - dm_destroy_keyslot_manager(ksm); - ksm = NULL; + if (empty_profile) { + dm_destroy_crypto_profile(profile); + profile = NULL; } /* - * t->ksm is only set temporarily while the table is being set - * up, and it gets set to NULL after the capabilities have - * been transferred to the request_queue. + * t->crypto_profile is only set temporarily while the table is being + * set up, and it gets set to NULL after the profile has been + * transferred to the request_queue. */ - t->ksm = ksm; + t->crypto_profile = profile; return 0; } -static void dm_update_keyslot_manager(struct request_queue *q, - struct dm_table *t) +static void dm_update_crypto_profile(struct request_queue *q, + struct dm_table *t) { - if (!t->ksm) + if (!t->crypto_profile) return; - /* Make the ksm less restrictive */ - if (!q->ksm) { - blk_ksm_register(t->ksm, q); + /* Make the crypto profile less restrictive. */ + if (!q->crypto_profile) { + blk_crypto_register(t->crypto_profile, q); } else { - blk_ksm_update_capabilities(q->ksm, t->ksm); - dm_destroy_keyslot_manager(t->ksm); + blk_crypto_update_capabilities(q->crypto_profile, + t->crypto_profile); + dm_destroy_crypto_profile(t->crypto_profile); } - t->ksm = NULL; + t->crypto_profile = NULL; } #else /* CONFIG_BLK_INLINE_ENCRYPTION */ -static int dm_table_construct_keyslot_manager(struct dm_table *t) +static int dm_table_construct_crypto_profile(struct dm_table *t) { return 0; } -void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm) +void dm_destroy_crypto_profile(struct blk_crypto_profile *profile) { } -static void dm_table_destroy_keyslot_manager(struct dm_table *t) +static void dm_table_destroy_crypto_profile(struct dm_table *t) { } -static void dm_update_keyslot_manager(struct request_queue *q, - struct dm_table *t) +static void dm_update_crypto_profile(struct request_queue *q, + struct dm_table *t) { } @@ -1415,9 +1411,9 @@ int dm_table_complete(struct dm_table *t) return r; } - r = dm_table_construct_keyslot_manager(t); + r = dm_table_construct_crypto_profile(t); if (r) { - DMERR("could not construct keyslot manager."); + DMERR("could not construct crypto profile."); return r; } @@ -2071,7 +2067,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, return r; } - dm_update_keyslot_manager(q, t); + dm_update_crypto_profile(q, t); disk_update_readahead(t->md->disk); return 0; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4184fd8ccb08..8b91f4f0e053 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1663,14 +1663,14 @@ static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); #ifdef CONFIG_BLK_INLINE_ENCRYPTION -static void dm_queue_destroy_keyslot_manager(struct request_queue *q) +static void dm_queue_destroy_crypto_profile(struct request_queue *q) { - dm_destroy_keyslot_manager(q->ksm); + dm_destroy_crypto_profile(q->crypto_profile); } #else /* CONFIG_BLK_INLINE_ENCRYPTION */ -static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q) +static inline void dm_queue_destroy_crypto_profile(struct request_queue *q) { } #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ @@ -1696,7 +1696,7 @@ static void cleanup_mapped_device(struct mapped_device *md) dm_sysfs_exit(md); del_gendisk(md->disk); } - dm_queue_destroy_keyslot_manager(md->queue); + dm_queue_destroy_crypto_profile(md->queue); blk_cleanup_disk(md->disk); } diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c index 67557808cada..fec4fbf16a5b 100644 --- a/drivers/mmc/core/crypto.c +++ b/drivers/mmc/core/crypto.c @@ -16,13 +16,13 @@ void mmc_crypto_set_initial_state(struct mmc_host *host) { /* Reset might clear all keys, so reprogram all the keys. */ if (host->caps2 & MMC_CAP2_CRYPTO) - blk_ksm_reprogram_all_keys(&host->ksm); + blk_crypto_reprogram_all_keys(&host->crypto_profile); } void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host) { if (host->caps2 & MMC_CAP2_CRYPTO) - blk_ksm_register(&host->ksm, q); + blk_crypto_register(&host->crypto_profile, q); } EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue); @@ -30,12 +30,15 @@ void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq) { struct request *req = mmc_queue_req_to_req(mqrq); struct mmc_request *mrq = &mqrq->brq.mrq; + struct blk_crypto_keyslot *keyslot; if (!req->crypt_ctx) return; mrq->crypto_ctx = req->crypt_ctx; - if (req->crypt_keyslot) - mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot); + + keyslot = req->crypt_keyslot; + if (keyslot) + mrq->crypto_key_slot = blk_crypto_keyslot_index(keyslot); } EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req); diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c index 628bbfaf8312..d5f4b6972f63 100644 --- a/drivers/mmc/host/cqhci-crypto.c +++ b/drivers/mmc/host/cqhci-crypto.c @@ -23,9 +23,10 @@ static const struct cqhci_crypto_alg_entry { }; static inline struct cqhci_host * -cqhci_host_from_ksm(struct blk_keyslot_manager *ksm) +cqhci_host_from_crypto_profile(struct blk_crypto_profile *profile) { - struct mmc_host *mmc = container_of(ksm, struct mmc_host, ksm); + struct mmc_host *mmc = + container_of(profile, struct mmc_host, crypto_profile); return mmc->cqe_private; } @@ -57,12 +58,12 @@ static int cqhci_crypto_program_key(struct cqhci_host *cq_host, return 0; } -static int cqhci_crypto_keyslot_program(struct blk_keyslot_manager *ksm, +static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot) { - struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm); + struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile); const union cqhci_crypto_cap_entry *ccap_array = cq_host->crypto_cap_array; const struct cqhci_crypto_alg_entry *alg = @@ -115,11 +116,11 @@ static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot) return cqhci_crypto_program_key(cq_host, &cfg, slot); } -static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, +static int cqhci_crypto_keyslot_evict(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot) { - struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm); + struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile); return cqhci_crypto_clear_keyslot(cq_host, slot); } @@ -132,7 +133,7 @@ static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, * "enabled" when these are called, i.e. CQHCI_ENABLE might not be set in the * CQHCI_CFG register. But the hardware allows that. */ -static const struct blk_ksm_ll_ops cqhci_ksm_ops = { +static const struct blk_crypto_ll_ops cqhci_crypto_ops = { .keyslot_program = cqhci_crypto_keyslot_program, .keyslot_evict = cqhci_crypto_keyslot_evict, }; @@ -157,8 +158,8 @@ cqhci_find_blk_crypto_mode(union cqhci_crypto_cap_entry cap) * * If the driver previously set MMC_CAP2_CRYPTO and the CQE declares * CQHCI_CAP_CS, initialize the crypto support. This involves reading the - * crypto capability registers, initializing the keyslot manager, clearing all - * keyslots, and enabling 128-bit task descriptors. + * crypto capability registers, initializing the blk_crypto_profile, clearing + * all keyslots, and enabling 128-bit task descriptors. * * Return: 0 if crypto was initialized or isn't supported; whether * MMC_CAP2_CRYPTO remains set indicates which one of those cases it is. @@ -168,7 +169,7 @@ int cqhci_crypto_init(struct cqhci_host *cq_host) { struct mmc_host *mmc = cq_host->mmc; struct device *dev = mmc_dev(mmc); - struct blk_keyslot_manager *ksm = &mmc->ksm; + struct blk_crypto_profile *profile = &mmc->crypto_profile; unsigned int num_keyslots; unsigned int cap_idx; enum blk_crypto_mode_num blk_mode_num; @@ -199,15 +200,15 @@ int cqhci_crypto_init(struct cqhci_host *cq_host) */ num_keyslots = cq_host->crypto_capabilities.config_count + 1; - err = devm_blk_ksm_init(dev, ksm, num_keyslots); + err = devm_blk_crypto_profile_init(dev, profile, num_keyslots); if (err) goto out; - ksm->ksm_ll_ops = cqhci_ksm_ops; - ksm->dev = dev; + profile->ll_ops = cqhci_crypto_ops; + profile->dev = dev; /* Unfortunately, CQHCI crypto only supports 32 DUN bits. */ - ksm->max_dun_bytes_supported = 4; + profile->max_dun_bytes_supported = 4; /* * Cache all the crypto capabilities and advertise the supported crypto @@ -223,7 +224,7 @@ int cqhci_crypto_init(struct cqhci_host *cq_host) cq_host->crypto_cap_array[cap_idx]); if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID) continue; - ksm->crypto_modes_supported[blk_mode_num] |= + profile->modes_supported[blk_mode_num] |= cq_host->crypto_cap_array[cap_idx].sdus_mask * 512; } diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c index d70cdcd35e43..67402baf6fae 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.c +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -48,11 +48,12 @@ out: return err; } -static int ufshcd_crypto_keyslot_program(struct blk_keyslot_manager *ksm, +static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot) { - struct ufs_hba *hba = container_of(ksm, struct ufs_hba, ksm); + struct ufs_hba *hba = + container_of(profile, struct ufs_hba, crypto_profile); const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array; const struct ufs_crypto_alg_entry *alg = &ufs_crypto_algs[key->crypto_cfg.crypto_mode]; @@ -105,11 +106,12 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) return ufshcd_program_key(hba, &cfg, slot); } -static int ufshcd_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, +static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot) { - struct ufs_hba *hba = container_of(ksm, struct ufs_hba, ksm); + struct ufs_hba *hba = + container_of(profile, struct ufs_hba, crypto_profile); return ufshcd_clear_keyslot(hba, slot); } @@ -120,11 +122,11 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba) return false; /* Reset might clear all keys, so reprogram all the keys. */ - blk_ksm_reprogram_all_keys(&hba->ksm); + blk_crypto_reprogram_all_keys(&hba->crypto_profile); return true; } -static const struct blk_ksm_ll_ops ufshcd_ksm_ops = { +static const struct blk_crypto_ll_ops ufshcd_crypto_ops = { .keyslot_program = ufshcd_crypto_keyslot_program, .keyslot_evict = ufshcd_crypto_keyslot_evict, }; @@ -179,15 +181,16 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) } /* The actual number of configurations supported is (CFGC+1) */ - err = devm_blk_ksm_init(hba->dev, &hba->ksm, - hba->crypto_capabilities.config_count + 1); + err = devm_blk_crypto_profile_init( + hba->dev, &hba->crypto_profile, + hba->crypto_capabilities.config_count + 1); if (err) goto out; - hba->ksm.ksm_ll_ops = ufshcd_ksm_ops; + hba->crypto_profile.ll_ops = ufshcd_crypto_ops; /* UFS only supports 8 bytes for any DUN */ - hba->ksm.max_dun_bytes_supported = 8; - hba->ksm.dev = hba->dev; + hba->crypto_profile.max_dun_bytes_supported = 8; + hba->crypto_profile.dev = hba->dev; /* * Cache all the UFS crypto capabilities and advertise the supported @@ -202,7 +205,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) blk_mode_num = ufshcd_find_blk_crypto_mode( hba->crypto_cap_array[cap_idx]); if (blk_mode_num != BLK_ENCRYPTION_MODE_INVALID) - hba->ksm.crypto_modes_supported[blk_mode_num] |= + hba->crypto_profile.modes_supported[blk_mode_num] |= hba->crypto_cap_array[cap_idx].sdus_mask * 512; } @@ -230,9 +233,8 @@ void ufshcd_init_crypto(struct ufs_hba *hba) ufshcd_clear_keyslot(hba, slot); } -void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q) +void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q) { if (hba->caps & UFSHCD_CAP_CRYPTO) - blk_ksm_register(&hba->ksm, q); + blk_crypto_register(&hba->crypto_profile, q); } diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h index 78a58e788dff..e18c01276873 100644 --- a/drivers/scsi/ufs/ufshcd-crypto.h +++ b/drivers/scsi/ufs/ufshcd-crypto.h @@ -18,7 +18,7 @@ static inline void ufshcd_prepare_lrbp_crypto(struct request *rq, return; } - lrbp->crypto_key_slot = blk_ksm_get_slot_idx(rq->crypt_keyslot); + lrbp->crypto_key_slot = blk_crypto_keyslot_index(rq->crypt_keyslot); lrbp->data_unit_num = rq->crypt_ctx->bc_dun[0]; } @@ -40,8 +40,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); void ufshcd_init_crypto(struct ufs_hba *hba); -void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q); +void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q); #else /* CONFIG_SCSI_UFS_CRYPTO */ @@ -64,8 +63,8 @@ static inline int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) static inline void ufshcd_init_crypto(struct ufs_hba *hba) { } -static inline void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, - struct request_queue *q) { } +static inline void ufshcd_crypto_register(struct ufs_hba *hba, + struct request_queue *q) { } #endif /* CONFIG_SCSI_UFS_CRYPTO */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 95be7ecdfe10..bf81da2ecf98 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -4986,7 +4986,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) else if (ufshcd_is_rpm_autosuspend_allowed(hba)) sdev->rpm_autosuspend = 1; - ufshcd_crypto_setup_rq_keyslot_manager(hba, q); + ufshcd_crypto_register(hba, q); return 0; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 885fcf2e5922..62bdc412d38a 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -766,7 +766,7 @@ struct ufs_hba_monitor { * @crypto_capabilities: Content of crypto capabilities register (0x100) * @crypto_cap_array: Array of crypto capabilities * @crypto_cfg_register: Start of the crypto cfg array - * @ksm: the keyslot manager tied to this hba + * @crypto_profile: the crypto profile of this hba (if applicable) */ struct ufs_hba { void __iomem *mmio_base; @@ -911,7 +911,7 @@ struct ufs_hba { union ufs_crypto_capabilities crypto_capabilities; union ufs_crypto_cap_entry *crypto_cap_array; u32 crypto_cfg_register; - struct blk_keyslot_manager ksm; + struct blk_crypto_profile crypto_profile; #endif #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_root; diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h index a27605e2f826..bbab65bd5428 100644 --- a/include/linux/blk-crypto-profile.h +++ b/include/linux/blk-crypto-profile.h @@ -3,67 +3,113 @@ * Copyright 2019 Google LLC */ -#ifndef __LINUX_KEYSLOT_MANAGER_H -#define __LINUX_KEYSLOT_MANAGER_H +#ifndef __LINUX_BLK_CRYPTO_PROFILE_H +#define __LINUX_BLK_CRYPTO_PROFILE_H #include #include -struct blk_keyslot_manager; +struct blk_crypto_profile; /** - * struct blk_ksm_ll_ops - functions to manage keyslots in hardware - * @keyslot_program: Program the specified key into the specified slot in the - * inline encryption hardware. - * @keyslot_evict: Evict key from the specified keyslot in the hardware. - * The key is provided so that e.g. dm layers can evict - * keys from the devices that they map over. - * Returns 0 on success, -errno otherwise. + * struct blk_crypto_ll_ops - functions to control inline encryption hardware * - * This structure should be provided by storage device drivers when they set up - * a keyslot manager - this structure holds the function ptrs that the keyslot - * manager will use to manipulate keyslots in the hardware. + * Low-level operations for controlling inline encryption hardware. This + * interface must be implemented by storage drivers that support inline + * encryption. All functions may sleep, are serialized by profile->lock, and + * are never called while profile->dev (if set) is runtime-suspended. */ -struct blk_ksm_ll_ops { - int (*keyslot_program)(struct blk_keyslot_manager *ksm, +struct blk_crypto_ll_ops { + + /** + * @keyslot_program: Program a key into the inline encryption hardware. + * + * Program @key into the specified @slot in the inline encryption + * hardware, overwriting any key that the keyslot may already contain. + * The keyslot is guaranteed to not be in-use by any I/O. + * + * This is required if the device has keyslots. Otherwise (i.e. if the + * device is a layered device, or if the device is real hardware that + * simply doesn't have the concept of keyslots) it is never called. + * + * Must return 0 on success, or -errno on failure. + */ + int (*keyslot_program)(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot); - int (*keyslot_evict)(struct blk_keyslot_manager *ksm, + + /** + * @keyslot_evict: Evict a key from the inline encryption hardware. + * + * If the device has keyslots, this function must evict the key from the + * specified @slot. The slot will contain @key, but there should be no + * need for the @key argument to be used as @slot should be sufficient. + * The keyslot is guaranteed to not be in-use by any I/O. + * + * If the device doesn't have keyslots itself, this function must evict + * @key from any underlying devices. @slot won't be valid in this case. + * + * If there are no keyslots and no underlying devices, this function + * isn't required. + * + * Must return 0 on success, or -errno on failure. + */ + int (*keyslot_evict)(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, unsigned int slot); }; -struct blk_keyslot_manager { - /* - * The struct blk_ksm_ll_ops that this keyslot manager will use - * to perform operations like programming and evicting keys on the - * device - */ - struct blk_ksm_ll_ops ksm_ll_ops; +/** + * struct blk_crypto_profile - inline encryption profile for a device + * + * This struct contains a storage device's inline encryption capabilities (e.g. + * the supported crypto algorithms), driver-provided functions to control the + * inline encryption hardware (e.g. programming and evicting keys), and optional + * device-independent keyslot management data. + */ +struct blk_crypto_profile { - /* - * The maximum number of bytes supported for specifying the data unit - * number. + /* public: Drivers must initialize the following fields. */ + + /** + * @ll_ops: Driver-provided functions to control the inline encryption + * hardware, e.g. program and evict keys. + */ + struct blk_crypto_ll_ops ll_ops; + + /** + * @max_dun_bytes_supported: The maximum number of bytes supported for + * specifying the data unit number (DUN). Specifically, the range of + * supported DUNs is 0 through (1 << (8 * max_dun_bytes_supported)) - 1. */ unsigned int max_dun_bytes_supported; - /* - * Array of size BLK_ENCRYPTION_MODE_MAX of bitmasks that represents - * whether a crypto mode and data unit size are supported. The i'th - * bit of crypto_mode_supported[crypto_mode] is set iff a data unit - * size of (1 << i) is supported. We only support data unit sizes - * that are powers of 2. + /** + * @modes_supported: Array of bitmasks that specifies whether each + * combination of crypto mode and data unit size is supported. + * Specifically, the i'th bit of modes_supported[crypto_mode] is set if + * crypto_mode can be used with a data unit size of (1 << i). Note that + * only data unit sizes that are powers of 2 can be supported. */ - unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; + unsigned int modes_supported[BLK_ENCRYPTION_MODE_MAX]; - /* Device for runtime power management (NULL if none) */ + /** + * @dev: An optional device for runtime power management. If the driver + * provides this device, it will be runtime-resumed before any function + * in @ll_ops is called and will remain resumed during the call. + */ struct device *dev; - /* Here onwards are *private* fields for internal keyslot manager use */ + /* private: The following fields shouldn't be accessed by drivers. */ + /* Number of keyslots, or 0 if not applicable */ unsigned int num_slots; - /* Protects programming and evicting keys from the device */ + /* + * Serializes all calls to functions in @ll_ops as well as all changes + * to @slot_hashtable. This can also be taken in read mode to look up + * keyslots while ensuring that they can't be changed concurrently. + */ struct rw_semaphore lock; /* List of idle slots, with least recently used slot at front */ @@ -80,41 +126,41 @@ struct blk_keyslot_manager { unsigned int log_slot_ht_size; /* Per-keyslot data */ - struct blk_ksm_keyslot *slots; + struct blk_crypto_keyslot *slots; }; -int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots); +int blk_crypto_profile_init(struct blk_crypto_profile *profile, + unsigned int num_slots); -int devm_blk_ksm_init(struct device *dev, struct blk_keyslot_manager *ksm, - unsigned int num_slots); +int devm_blk_crypto_profile_init(struct device *dev, + struct blk_crypto_profile *profile, + unsigned int num_slots); -blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key, - struct blk_ksm_keyslot **slot_ptr); +unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot); -unsigned int blk_ksm_get_slot_idx(struct blk_ksm_keyslot *slot); +blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + struct blk_crypto_keyslot **slot_ptr); -void blk_ksm_put_slot(struct blk_ksm_keyslot *slot); +void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot); -bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm, - const struct blk_crypto_config *cfg); +bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile, + const struct blk_crypto_config *cfg); -int blk_ksm_evict_key(struct blk_keyslot_manager *ksm, - const struct blk_crypto_key *key); +int __blk_crypto_evict_key(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key); -void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm); +void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile); -void blk_ksm_destroy(struct blk_keyslot_manager *ksm); +void blk_crypto_profile_destroy(struct blk_crypto_profile *profile); -void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent, - const struct blk_keyslot_manager *child); +void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent, + const struct blk_crypto_profile *child); -void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm); +bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target, + const struct blk_crypto_profile *reference); -bool blk_ksm_is_superset(struct blk_keyslot_manager *ksm_superset, - struct blk_keyslot_manager *ksm_subset); +void blk_crypto_update_capabilities(struct blk_crypto_profile *dst, + const struct blk_crypto_profile *src); -void blk_ksm_update_capabilities(struct blk_keyslot_manager *target_ksm, - struct blk_keyslot_manager *reference_ksm); - -#endif /* __LINUX_KEYSLOT_MANAGER_H */ +#endif /* __LINUX_BLK_CRYPTO_PROFILE_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index e13780236550..b4039fdf1b04 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -133,7 +133,7 @@ struct request { #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct bio_crypt_ctx *crypt_ctx; - struct blk_ksm_keyslot *crypt_keyslot; + struct blk_crypto_keyslot *crypt_keyslot; #endif unsigned short write_hint; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c7b1e9355123..f72ccb2829db 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -30,7 +30,7 @@ struct pr_ops; struct rq_qos; struct blk_queue_stats; struct blk_stat_callback; -struct blk_keyslot_manager; +struct blk_crypto_profile; /* Must be consistent with blk_mq_poll_stats_bkt() */ #define BLK_MQ_POLL_STATS_BKTS 16 @@ -224,8 +224,7 @@ struct request_queue { unsigned int dma_alignment; #ifdef CONFIG_BLK_INLINE_ENCRYPTION - /* Inline crypto capabilities */ - struct blk_keyslot_manager *ksm; + struct blk_crypto_profile *crypto_profile; #endif unsigned int rq_timeout; @@ -1142,19 +1141,20 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned lo #ifdef CONFIG_BLK_INLINE_ENCRYPTION -bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q); +bool blk_crypto_register(struct blk_crypto_profile *profile, + struct request_queue *q); -void blk_ksm_unregister(struct request_queue *q); +void blk_crypto_unregister(struct request_queue *q); #else /* CONFIG_BLK_INLINE_ENCRYPTION */ -static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm, - struct request_queue *q) +static inline bool blk_crypto_register(struct blk_crypto_profile *profile, + struct request_queue *q) { return true; } -static inline void blk_ksm_unregister(struct request_queue *q) { } +static inline void blk_crypto_unregister(struct request_queue *q) { } #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 114553b487ef..a7df155ea49b 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -576,9 +576,9 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *t); /* - * Table keyslot manager functions + * Table blk_crypto_profile functions */ -void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); +void dm_destroy_crypto_profile(struct blk_crypto_profile *profile); /*----------------------------------------------------------------- * Macros. diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 725b1de41767..52eae8c45b8d 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -492,7 +492,7 @@ struct mmc_host { /* Inline encryption support */ #ifdef CONFIG_MMC_CRYPTO - struct blk_keyslot_manager ksm; + struct blk_crypto_profile crypto_profile; #endif /* Host Software Queue support */ From 8e9f666a6e66d3f882c094646d35536d2759103a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 18 Oct 2021 11:04:53 -0700 Subject: [PATCH 151/174] blk-crypto: update inline encryption documentation Rework most of inline-encryption.rst to be easier to follow, to correct some information, to add some important details and remove some unimportant details, and to take into account the renaming from blk_keyslot_manager to blk_crypto_profile. Reviewed-by: Mike Snitzer Reviewed-by: Martin K. Petersen Signed-off-by: Eric Biggers Link: https://lore.kernel.org/r/20211018180453.40441-5-ebiggers@kernel.org Signed-off-by: Jens Axboe --- Documentation/block/inline-encryption.rst | 401 ++++++++++++---------- 1 file changed, 220 insertions(+), 181 deletions(-) diff --git a/Documentation/block/inline-encryption.rst b/Documentation/block/inline-encryption.rst index 7f9b40d6b416..71d1044617a9 100644 --- a/Documentation/block/inline-encryption.rst +++ b/Documentation/block/inline-encryption.rst @@ -7,230 +7,269 @@ Inline Encryption Background ========== -Inline encryption hardware sits logically between memory and the disk, and can -en/decrypt data as it goes in/out of the disk. Inline encryption hardware has a -fixed number of "keyslots" - slots into which encryption contexts (i.e. the -encryption key, encryption algorithm, data unit size) can be programmed by the -kernel at any time. Each request sent to the disk can be tagged with the index -of a keyslot (and also a data unit number to act as an encryption tweak), and -the inline encryption hardware will en/decrypt the data in the request with the -encryption context programmed into that keyslot. This is very different from -full disk encryption solutions like self encrypting drives/TCG OPAL/ATA -Security standards, since with inline encryption, any block on disk could be -encrypted with any encryption context the kernel chooses. +Inline encryption hardware sits logically between memory and disk, and can +en/decrypt data as it goes in/out of the disk. For each I/O request, software +can control exactly how the inline encryption hardware will en/decrypt the data +in terms of key, algorithm, data unit size (the granularity of en/decryption), +and data unit number (a value that determines the initialization vector(s)). +Some inline encryption hardware accepts all encryption parameters including raw +keys directly in low-level I/O requests. However, most inline encryption +hardware instead has a fixed number of "keyslots" and requires that the key, +algorithm, and data unit size first be programmed into a keyslot. Each +low-level I/O request then just contains a keyslot index and data unit number. + +Note that inline encryption hardware is very different from traditional crypto +accelerators, which are supported through the kernel crypto API. Traditional +crypto accelerators operate on memory regions, whereas inline encryption +hardware operates on I/O requests. Thus, inline encryption hardware needs to be +managed by the block layer, not the kernel crypto API. + +Inline encryption hardware is also very different from "self-encrypting drives", +such as those based on the TCG Opal or ATA Security standards. Self-encrypting +drives don't provide fine-grained control of encryption and provide no way to +verify the correctness of the resulting ciphertext. Inline encryption hardware +provides fine-grained control of encryption, including the choice of key and +initialization vector for each sector, and can be tested for correctness. Objective ========= -We want to support inline encryption (IE) in the kernel. -To allow for testing, we also want a crypto API fallback when actual -IE hardware is absent. We also want IE to work with layered devices -like dm and loopback (i.e. we want to be able to use the IE hardware -of the underlying devices if present, or else fall back to crypto API -en/decryption). - +We want to support inline encryption in the kernel. To make testing easier, we +also want support for falling back to the kernel crypto API when actual inline +encryption hardware is absent. We also want inline encryption to work with +layered devices like device-mapper and loopback (i.e. we want to be able to use +the inline encryption hardware of the underlying devices if present, or else +fall back to crypto API en/decryption). Constraints and notes ===================== -- IE hardware has a limited number of "keyslots" that can be programmed - with an encryption context (key, algorithm, data unit size, etc.) at any time. - One can specify a keyslot in a data request made to the device, and the - device will en/decrypt the data using the encryption context programmed into - that specified keyslot. When possible, we want to make multiple requests with - the same encryption context share the same keyslot. +- We need a way for upper layers (e.g. filesystems) to specify an encryption + context to use for en/decrypting a bio, and device drivers (e.g. UFSHCD) need + to be able to use that encryption context when they process the request. + Encryption contexts also introduce constraints on bio merging; the block layer + needs to be aware of these constraints. -- We need a way for upper layers like filesystems to specify an encryption - context to use for en/decrypting a struct bio, and a device driver (like UFS) - needs to be able to use that encryption context when it processes the bio. +- Different inline encryption hardware has different supported algorithms, + supported data unit sizes, maximum data unit numbers, etc. We call these + properties the "crypto capabilities". We need a way for device drivers to + advertise crypto capabilities to upper layers in a generic way. -- We need a way for device drivers to expose their inline encryption - capabilities in a unified way to the upper layers. +- Inline encryption hardware usually (but not always) requires that keys be + programmed into keyslots before being used. Since programming keyslots may be + slow and there may not be very many keyslots, we shouldn't just program the + key for every I/O request, but rather keep track of which keys are in the + keyslots and reuse an already-programmed keyslot when possible. +- Upper layers typically define a specific end-of-life for crypto keys, e.g. + when an encrypted directory is locked or when a crypto mapping is torn down. + At these times, keys are wiped from memory. We must provide a way for upper + layers to also evict keys from any keyslots they are present in. -Design -====== +- When possible, device-mapper devices must be able to pass through the inline + encryption support of their underlying devices. However, it doesn't make + sense for device-mapper devices to have keyslots themselves. -We add a struct bio_crypt_ctx to struct bio that can -represent an encryption context, because we need to be able to pass this -encryption context from the upper layers (like the fs layer) to the -device driver to act upon. +Basic design +============ -While IE hardware works on the notion of keyslots, the FS layer has no -knowledge of keyslots - it simply wants to specify an encryption context to -use while en/decrypting a bio. +We introduce ``struct blk_crypto_key`` to represent an inline encryption key and +how it will be used. This includes the actual bytes of the key; the size of the +key; the algorithm and data unit size the key will be used with; and the number +of bytes needed to represent the maximum data unit number the key will be used +with. -We introduce a keyslot manager (KSM) that handles the translation from -encryption contexts specified by the FS to keyslots on the IE hardware. -This KSM also serves as the way IE hardware can expose its capabilities to -upper layers. The generic mode of operation is: each device driver that wants -to support IE will construct a KSM and set it up in its struct request_queue. -Upper layers that want to use IE on this device can then use this KSM in -the device's struct request_queue to translate an encryption context into -a keyslot. The presence of the KSM in the request queue shall be used to mean -that the device supports IE. +We introduce ``struct bio_crypt_ctx`` to represent an encryption context. It +contains a data unit number and a pointer to a blk_crypto_key. We add pointers +to a bio_crypt_ctx to ``struct bio`` and ``struct request``; this allows users +of the block layer (e.g. filesystems) to provide an encryption context when +creating a bio and have it be passed down the stack for processing by the block +layer and device drivers. Note that the encryption context doesn't explicitly +say whether to encrypt or decrypt, as that is implicit from the direction of the +bio; WRITE means encrypt, and READ means decrypt. -The KSM uses refcounts to track which keyslots are idle (either they have no -encryption context programmed, or there are no in-flight struct bios -referencing that keyslot). When a new encryption context needs a keyslot, it -tries to find a keyslot that has already been programmed with the same -encryption context, and if there is no such keyslot, it evicts the least -recently used idle keyslot and programs the new encryption context into that -one. If no idle keyslots are available, then the caller will sleep until there -is at least one. +We also introduce ``struct blk_crypto_profile`` to contain all generic inline +encryption-related state for a particular inline encryption device. The +blk_crypto_profile serves as the way that drivers for inline encryption hardware +advertise their crypto capabilities and provide certain functions (e.g., +functions to program and evict keys) to upper layers. Each device driver that +wants to support inline encryption will construct a blk_crypto_profile, then +associate it with the disk's request_queue. +The blk_crypto_profile also manages the hardware's keyslots, when applicable. +This happens in the block layer, so that users of the block layer can just +specify encryption contexts and don't need to know about keyslots at all, nor do +device drivers need to care about most details of keyslot management. -blk-mq changes, other block layer changes and blk-crypto-fallback -================================================================= +Specifically, for each keyslot, the block layer (via the blk_crypto_profile) +keeps track of which blk_crypto_key that keyslot contains (if any), and how many +in-flight I/O requests are using it. When the block layer creates a +``struct request`` for a bio that has an encryption context, it grabs a keyslot +that already contains the key if possible. Otherwise it waits for an idle +keyslot (a keyslot that isn't in-use by any I/O), then programs the key into the +least-recently-used idle keyslot using the function the device driver provided. +In both cases, the resulting keyslot is stored in the ``crypt_keyslot`` field of +the request, where it is then accessible to device drivers and is released after +the request completes. -We add a pointer to a ``bi_crypt_context`` and ``keyslot`` to -struct request. These will be referred to as the ``crypto fields`` -for the request. This ``keyslot`` is the keyslot into which the -``bi_crypt_context`` has been programmed in the KSM of the ``request_queue`` -that this request is being sent to. +``struct request`` also contains a pointer to the original bio_crypt_ctx. +Requests can be built from multiple bios, and the block layer must take the +encryption context into account when trying to merge bios and requests. For two +bios/requests to be merged, they must have compatible encryption contexts: both +unencrypted, or both encrypted with the same key and contiguous data unit +numbers. Only the encryption context for the first bio in a request is +retained, since the remaining bios have been verified to be merge-compatible +with the first bio. -We introduce ``block/blk-crypto-fallback.c``, which allows upper layers to remain -blissfully unaware of whether or not real inline encryption hardware is present -underneath. When a bio is submitted with a target ``request_queue`` that doesn't -support the encryption context specified with the bio, the block layer will -en/decrypt the bio with the blk-crypto-fallback. +To make it possible for inline encryption to work with request_queue based +layered devices, when a request is cloned, its encryption context is cloned as +well. When the cloned request is submitted, it is then processed as usual; this +includes getting a keyslot from the clone's target device if needed. -If the bio is a ``WRITE`` bio, a bounce bio is allocated, and the data in the bio -is encrypted stored in the bounce bio - blk-mq will then proceed to process the -bounce bio as if it were not encrypted at all (except when blk-integrity is -concerned). ``blk-crypto-fallback`` sets the bounce bio's ``bi_end_io`` to an -internal function that cleans up the bounce bio and ends the original bio. +blk-crypto-fallback +=================== -If the bio is a ``READ`` bio, the bio's ``bi_end_io`` (and also ``bi_private``) -is saved and overwritten by ``blk-crypto-fallback`` to -``bio_crypto_fallback_decrypt_bio``. The bio's ``bi_crypt_context`` is also -overwritten with ``NULL``, so that to the rest of the stack, the bio looks -as if it was a regular bio that never had an encryption context specified. -``bio_crypto_fallback_decrypt_bio`` will decrypt the bio, restore the original -``bi_end_io`` (and also ``bi_private``) and end the bio again. +It is desirable for the inline encryption support of upper layers (e.g. +filesystems) to be testable without real inline encryption hardware, and +likewise for the block layer's keyslot management logic. It is also desirable +to allow upper layers to just always use inline encryption rather than have to +implement encryption in multiple ways. -Regardless of whether real inline encryption hardware is used or the +Therefore, we also introduce *blk-crypto-fallback*, which is an implementation +of inline encryption using the kernel crypto API. blk-crypto-fallback is built +into the block layer, so it works on any block device without any special setup. +Essentially, when a bio with an encryption context is submitted to a +request_queue that doesn't support that encryption context, the block layer will +handle en/decryption of the bio using blk-crypto-fallback. + +For encryption, the data cannot be encrypted in-place, as callers usually rely +on it being unmodified. Instead, blk-crypto-fallback allocates bounce pages, +fills a new bio with those bounce pages, encrypts the data into those bounce +pages, and submits that "bounce" bio. When the bounce bio completes, +blk-crypto-fallback completes the original bio. If the original bio is too +large, multiple bounce bios may be required; see the code for details. + +For decryption, blk-crypto-fallback "wraps" the bio's completion callback +(``bi_complete``) and private data (``bi_private``) with its own, unsets the +bio's encryption context, then submits the bio. If the read completes +successfully, blk-crypto-fallback restores the bio's original completion +callback and private data, then decrypts the bio's data in-place using the +kernel crypto API. Decryption happens from a workqueue, as it may sleep. +Afterwards, blk-crypto-fallback completes the bio. + +In both cases, the bios that blk-crypto-fallback submits no longer have an +encryption context. Therefore, lower layers only see standard unencrypted I/O. + +blk-crypto-fallback also defines its own blk_crypto_profile and has its own +"keyslots"; its keyslots contain ``struct crypto_skcipher`` objects. The reason +for this is twofold. First, it allows the keyslot management logic to be tested +without actual inline encryption hardware. Second, similar to actual inline +encryption hardware, the crypto API doesn't accept keys directly in requests but +rather requires that keys be set ahead of time, and setting keys can be +expensive; moreover, allocating a crypto_skcipher can't happen on the I/O path +at all due to the locks it takes. Therefore, the concept of keyslots still +makes sense for blk-crypto-fallback. + +Note that regardless of whether real inline encryption hardware or blk-crypto-fallback is used, the ciphertext written to disk (and hence the -on-disk format of data) will be the same (assuming the hardware's implementation -of the algorithm being used adheres to spec and functions correctly). - -If a ``request queue``'s inline encryption hardware claimed to support the -encryption context specified with a bio, then it will not be handled by the -``blk-crypto-fallback``. We will eventually reach a point in blk-mq when a -struct request needs to be allocated for that bio. At that point, -blk-mq tries to program the encryption context into the ``request_queue``'s -keyslot_manager, and obtain a keyslot, which it stores in its newly added -``keyslot`` field. This keyslot is released when the request is completed. - -When the first bio is added to a request, ``blk_crypto_rq_bio_prep`` is called, -which sets the request's ``crypt_ctx`` to a copy of the bio's -``bi_crypt_context``. bio_crypt_do_front_merge is called whenever a subsequent -bio is merged to the front of the request, which updates the ``crypt_ctx`` of -the request so that it matches the newly merged bio's ``bi_crypt_context``. In particular, the request keeps a copy of the ``bi_crypt_context`` of the first -bio in its bio-list (blk-mq needs to be careful to maintain this invariant -during bio and request merges). - -To make it possible for inline encryption to work with request queue based -layered devices, when a request is cloned, its ``crypto fields`` are cloned as -well. When the cloned request is submitted, blk-mq programs the -``bi_crypt_context`` of the request into the clone's request_queue's keyslot -manager, and stores the returned keyslot in the clone's ``keyslot``. +on-disk format of data) will be the same (assuming that both the inline +encryption hardware's implementation and the kernel crypto API's implementation +of the algorithm being used adhere to spec and function correctly). +blk-crypto-fallback is optional and is controlled by the +``CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK`` kernel configuration option. API presented to users of the block layer ========================================= -``struct blk_crypto_key`` represents a crypto key (the raw key, size of the -key, the crypto algorithm to use, the data unit size to use, and the number of -bytes required to represent data unit numbers that will be specified with the -``bi_crypt_context``). +``blk_crypto_config_supported()`` allows users to check ahead of time whether +inline encryption with particular crypto settings will work on a particular +request_queue -- either via hardware or via blk-crypto-fallback. This function +takes in a ``struct blk_crypto_config`` which is like blk_crypto_key, but omits +the actual bytes of the key and instead just contains the algorithm, data unit +size, etc. This function can be useful if blk-crypto-fallback is disabled. -``blk_crypto_init_key`` allows upper layers to initialize such a -``blk_crypto_key``. +``blk_crypto_init_key()`` allows users to initialize a blk_crypto_key. -``bio_crypt_set_ctx`` should be called on any bio that a user of -the block layer wants en/decrypted via inline encryption (or the -blk-crypto-fallback, if hardware support isn't available for the desired -crypto configuration). This function takes the ``blk_crypto_key`` and the -data unit number (DUN) to use when en/decrypting the bio. +Users must call ``blk_crypto_start_using_key()`` before actually starting to use +a blk_crypto_key on a request_queue (even if ``blk_crypto_config_supported()`` +was called earlier). This is needed to initialize blk-crypto-fallback if it +will be needed. This must not be called from the data path, as this may have to +allocate resources, which may deadlock in that case. -``blk_crypto_config_supported`` allows upper layers to query whether or not the -an encryption context passed to request queue can be handled by blk-crypto -(either by real inline encryption hardware, or by the blk-crypto-fallback). -This is useful e.g. when blk-crypto-fallback is disabled, and the upper layer -wants to use an algorithm that may not supported by hardware - this function -lets the upper layer know ahead of time that the algorithm isn't supported, -and the upper layer can fallback to something else if appropriate. +Next, to attach an encryption context to a bio, users should call +``bio_crypt_set_ctx()``. This function allocates a bio_crypt_ctx and attaches +it to a bio, given the blk_crypto_key and the data unit number that will be used +for en/decryption. Users don't need to worry about freeing the bio_crypt_ctx +later, as that happens automatically when the bio is freed or reset. -``blk_crypto_start_using_key`` - Upper layers must call this function on -``blk_crypto_key`` and a ``request_queue`` before using the key with any bio -headed for that ``request_queue``. This function ensures that either the -hardware supports the key's crypto settings, or the crypto API fallback has -transforms for the needed mode allocated and ready to go. Note that this -function may allocate an ``skcipher``, and must not be called from the data -path, since allocating ``skciphers`` from the data path can deadlock. +Finally, when done using inline encryption with a blk_crypto_key on a +request_queue, users must call ``blk_crypto_evict_key()``. This ensures that +the key is evicted from all keyslots it may be programmed into and unlinked from +any kernel data structures it may be linked into. -``blk_crypto_evict_key`` *must* be called by upper layers before a -``blk_crypto_key`` is freed. Further, it *must* only be called only once -there are no more in-flight requests that use that ``blk_crypto_key``. -``blk_crypto_evict_key`` will ensure that a key is removed from any keyslots in -inline encryption hardware that the key might have been programmed into (or the blk-crypto-fallback). +In summary, for users of the block layer, the lifecycle of a blk_crypto_key is +as follows: + +1. ``blk_crypto_config_supported()`` (optional) +2. ``blk_crypto_init_key()`` +3. ``blk_crypto_start_using_key()`` +4. ``bio_crypt_set_ctx()`` (potentially many times) +5. ``blk_crypto_evict_key()`` (after all I/O has completed) +6. Zeroize the blk_crypto_key (this has no dedicated function) + +If a blk_crypto_key is being used on multiple request_queues, then +``blk_crypto_config_supported()`` (if used), ``blk_crypto_start_using_key()``, +and ``blk_crypto_evict_key()`` must be called on each request_queue. API presented to device drivers =============================== -A :c:type:``struct blk_keyslot_manager`` should be set up by device drivers in -the ``request_queue`` of the device. The device driver needs to call -``blk_ksm_init`` (or its resource-managed variant ``devm_blk_ksm_init``) on the -``blk_keyslot_manager``, while specifying the number of keyslots supported by -the hardware. +A device driver that wants to support inline encryption must set up a +blk_crypto_profile in the request_queue of its device. To do this, it first +must call ``blk_crypto_profile_init()`` (or its resource-managed variant +``devm_blk_crypto_profile_init()``), providing the number of keyslots. -The device driver also needs to tell the KSM how to actually manipulate the -IE hardware in the device to do things like programming the crypto key into -the IE hardware into a particular keyslot. All this is achieved through the -struct blk_ksm_ll_ops field in the KSM that the device driver -must fill up after initing the ``blk_keyslot_manager``. +Next, it must advertise its crypto capabilities by setting fields in the +blk_crypto_profile, e.g. ``modes_supported`` and ``max_dun_bytes_supported``. -The KSM also handles runtime power management for the device when applicable -(e.g. when it wants to program a crypto key into the IE hardware, the device -must be runtime powered on) - so the device driver must also set the ``dev`` -field in the ksm to point to the `struct device` for the KSM to use for runtime -power management. +It then must set function pointers in the ``ll_ops`` field of the +blk_crypto_profile to tell upper layers how to control the inline encryption +hardware, e.g. how to program and evict keyslots. Most drivers will need to +implement ``keyslot_program`` and ``keyslot_evict``. For details, see the +comments for ``struct blk_crypto_ll_ops``. -``blk_ksm_reprogram_all_keys`` can be called by device drivers if the device -needs each and every of its keyslots to be reprogrammed with the key it -"should have" at the point in time when the function is called. This is useful -e.g. if a device loses all its keys on runtime power down/up. +Once the driver registers a blk_crypto_profile with a request_queue, I/O +requests the driver receives via that queue may have an encryption context. All +encryption contexts will be compatible with the crypto capabilities declared in +the blk_crypto_profile, so drivers don't need to worry about handling +unsupported requests. Also, if a nonzero number of keyslots was declared in the +blk_crypto_profile, then all I/O requests that have an encryption context will +also have a keyslot which was already programmed with the appropriate key. -If the driver used ``blk_ksm_init`` instead of ``devm_blk_ksm_init``, then -``blk_ksm_destroy`` should be called to free up all resources used by a -``blk_keyslot_manager`` once it is no longer needed. +If the driver implements runtime suspend and its blk_crypto_ll_ops don't work +while the device is runtime-suspended, then the driver must also set the ``dev`` +field of the blk_crypto_profile to point to the ``struct device`` that will be +resumed before any of the low-level operations are called. + +If there are situations where the inline encryption hardware loses the contents +of its keyslots, e.g. device resets, the driver must handle reprogramming the +keyslots. To do this, the driver may call ``blk_crypto_reprogram_all_keys()``. + +Finally, if the driver used ``blk_crypto_profile_init()`` instead of +``devm_blk_crypto_profile_init()``, then it is responsible for calling +``blk_crypto_profile_destroy()`` when the crypto profile is no longer needed. Layered Devices =============== -Request queue based layered devices like dm-rq that wish to support IE need to -create their own keyslot manager for their request queue, and expose whatever -functionality they choose. When a layered device wants to pass a clone of that -request to another ``request_queue``, blk-crypto will initialize and prepare the -clone as necessary - see ``blk_crypto_insert_cloned_request`` in -``blk-crypto.c``. - - -Future Optimizations for layered devices -======================================== - -Creating a keyslot manager for a layered device uses up memory for each -keyslot, and in general, a layered device merely passes the request on to a -"child" device, so the keyslots in the layered device itself are completely -unused, and don't need any refcounting or keyslot programming. We can instead -define a new type of KSM; the "passthrough KSM", that layered devices can use -to advertise an unlimited number of keyslots, and support for any encryption -algorithms they choose, while not actually using any memory for each keyslot. -Another use case for the "passthrough KSM" is for IE devices that do not have a -limited number of keyslots. - +Request queue based layered devices like dm-rq that wish to support inline +encryption need to create their own blk_crypto_profile for their request_queue, +and expose whatever functionality they choose. When a layered device wants to +pass a clone of that request to another request_queue, blk-crypto will +initialize and prepare the clone as necessary; see +``blk_crypto_insert_cloned_request()``. Interaction between inline encryption and blk integrity ======================================================= @@ -257,7 +296,7 @@ Because there isn't any real hardware yet, it seems prudent to assume that hardware implementations might not implement both features together correctly, and disallow the combination for now. Whenever a device supports integrity, the kernel will pretend that the device does not support hardware inline encryption -(by essentially setting the keyslot manager in the request_queue of the device -to NULL). When the crypto API fallback is enabled, this means that all bios with -and encryption context will use the fallback, and IO will complete as usual. -When the fallback is disabled, a bio with an encryption context will be failed. +(by setting the blk_crypto_profile in the request_queue of the device to NULL). +When the crypto API fallback is enabled, this means that all bios with and +encryption context will use the fallback, and IO will complete as usual. When +the fallback is disabled, a bio with an encryption context will be failed. From 297db731847e7808881ec2123c7564067d594d39 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 22 Oct 2021 16:01:44 +0100 Subject: [PATCH 152/174] block: fix req_bio_endio append error handling Shinichiro Kawasaki reports that there is a bug in a recent req_bio_endio() patch causing problems with zonefs. As Shinichiro suggested, inverse the condition in zone append path to resemble how it was before: fail when it's not fully completed. Fixes: 478eb72b815f3 ("block: optimise req_bio_endio()") Reported-by: Shinichiro Kawasaki Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/344ea4e334aace9148b41af5f2426da38c8aa65a.1634914228.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index d04ee72ba125..c19dfa8ea65e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -640,7 +640,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, * Partial zone append completions cannot be supported as the * BIO fragments may end up not being written sequentially. */ - if (bio->bi_iter.bi_size == nbytes) + if (bio->bi_iter.bi_size != nbytes) bio->bi_status = BLK_STS_IOERR; else bio->bi_iter.bi_sector = rq->__sector; From 8bdf7b3fe1f48a2c1c212d4685903bba01409c0e Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 22 Oct 2021 16:12:20 +0800 Subject: [PATCH 153/174] blk-mq-sched: Don't reference queue tagset in blk_mq_sched_tags_teardown() We should not reference the queue tagset in blk_mq_sched_tags_teardown() (see function comment) for the blk-mq flags, so use the passed flags instead. This solves a use-after-free, similarly fixed earlier (and since broken again) in commit f0c1c4d2864e ("blk-mq: fix use-after-free in blk_mq_exit_sched"). Reported-by: Linux Kernel Functional Testing Reported-by: Naresh Kamboju Tested-by: Anders Roxell Fixes: e155b0c238b2 ("blk-mq: Use shared tags for shared sbitmap support") Signed-off-by: John Garry Link: https://lore.kernel.org/r/1634890340-15432-1-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 5b259fdea794..c62b966dfaba 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -541,7 +541,7 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int fla queue_for_each_hw_ctx(q, hctx, i) { if (hctx->sched_tags) { - if (!blk_mq_is_shared_tags(q->tag_set->flags)) + if (!blk_mq_is_shared_tags(flags)) blk_mq_free_rq_map(hctx->sched_tags); hctx->sched_tags = NULL; } From 599593a82fc57f5e9453c8ef7420df3206934a0c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 22 Oct 2021 19:35:45 -0600 Subject: [PATCH 154/174] sched: make task_struct->plug always defined MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If CONFIG_BLOCK isn't set, then it's an empty struct anyway. Just make it generally available, so we don't break the compile: kernel/sched/core.c: In function ‘sched_submit_work’: kernel/sched/core.c:6346:35: error: ‘struct task_struct’ has no member named ‘plug’ 6346 | blk_flush_plug(tsk->plug, true); | ^~ kernel/sched/core.c: In function ‘io_schedule_prepare’: kernel/sched/core.c:8357:20: error: ‘struct task_struct’ has no member named ‘plug’ 8357 | if (current->plug) | ^~ kernel/sched/core.c:8358:39: error: ‘struct task_struct’ has no member named ‘plug’ 8358 | blk_flush_plug(current->plug, true); | ^~ Reported-by: Nathan Chancellor Fixes: 008f75a20e70 ("block: cleanup the flush plug helpers") Signed-off-by: Jens Axboe --- include/linux/sched.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index c1a927ddec64..e0454e60fe8f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1160,10 +1160,8 @@ struct task_struct { /* Stacked block device info: */ struct bio_list *bio_list; -#ifdef CONFIG_BLOCK /* Stack plugging: */ struct blk_plug *plug; -#endif /* VM state: */ struct reclaim_state *reclaim_state; From 54a88eb838d37af930c9f19e1930a4fba6789cb5 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 17:21:32 +0100 Subject: [PATCH 155/174] block: add single bio async direct IO helper As with __blkdev_direct_IO_simple(), we can implement direct IO more efficiently if there is only one bio. Add __blkdev_direct_IO_async() and blkdev_bio_end_io_async(). This patch brings me from 4.45-4.5 MIOPS with nullblk to 4.7+. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/f0ae4109b7a6934adede490f84d188d53b97051b.1635006010.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/fops.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 3 deletions(-) diff --git a/block/fops.c b/block/fops.c index 396537598e3e..a7b328296912 100644 --- a/block/fops.c +++ b/block/fops.c @@ -305,6 +305,85 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, return ret; } +static void blkdev_bio_end_io_async(struct bio *bio) +{ + struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); + struct kiocb *iocb = dio->iocb; + ssize_t ret; + + if (likely(!bio->bi_status)) { + ret = dio->size; + iocb->ki_pos += ret; + } else { + ret = blk_status_to_errno(bio->bi_status); + } + + iocb->ki_complete(iocb, ret, 0); + + if (dio->flags & DIO_SHOULD_DIRTY) { + bio_check_pages_dirty(bio); + } else { + bio_release_pages(bio, false); + bio_put(bio); + } +} + +static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, + struct iov_iter *iter, + unsigned int nr_pages) +{ + struct block_device *bdev = iocb->ki_filp->private_data; + struct blkdev_dio *dio; + struct bio *bio; + loff_t pos = iocb->ki_pos; + int ret = 0; + + if ((pos | iov_iter_alignment(iter)) & + (bdev_logical_block_size(bdev) - 1)) + return -EINVAL; + + bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); + dio = container_of(bio, struct blkdev_dio, bio); + dio->flags = 0; + dio->iocb = iocb; + bio_set_dev(bio, bdev); + bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; + bio->bi_write_hint = iocb->ki_hint; + bio->bi_end_io = blkdev_bio_end_io_async; + bio->bi_ioprio = iocb->ki_ioprio; + + ret = bio_iov_iter_get_pages(bio, iter); + if (unlikely(ret)) { + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + return ret; + } + dio->size = bio->bi_iter.bi_size; + + if (iov_iter_rw(iter) == READ) { + bio->bi_opf = REQ_OP_READ; + if (iter_is_iovec(iter)) { + dio->flags |= DIO_SHOULD_DIRTY; + bio_set_pages_dirty(bio); + } + } else { + bio->bi_opf = dio_bio_write_op(iocb); + task_io_account_write(bio->bi_iter.bi_size); + } + + if (iocb->ki_flags & IOCB_NOWAIT) + bio->bi_opf |= REQ_NOWAIT; + + if (iocb->ki_flags & IOCB_HIPRI) { + bio_set_polled(bio, iocb); + submit_bio(bio); + WRITE_ONCE(iocb->private, bio); + } else { + submit_bio(bio); + } + return -EIOCBQUEUED; +} + static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { unsigned int nr_pages; @@ -313,9 +392,11 @@ static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) return 0; nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); - if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_VECS) - return __blkdev_direct_IO_simple(iocb, iter, nr_pages); - + if (likely(nr_pages <= BIO_MAX_VECS)) { + if (is_sync_kiocb(iocb)) + return __blkdev_direct_IO_simple(iocb, iter, nr_pages); + return __blkdev_direct_IO_async(iocb, iter, nr_pages); + } return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages)); } From fa5fa8ec6077a954e1aeb206fece15255b19e7b9 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 17:21:33 +0100 Subject: [PATCH 156/174] block: refactor bio_iov_bvec_set() Combine bio_iov_bvec_set() and bio_iov_bvec_set_append() and let the caller to do iov_iter_advance(). Also get rid of __bio_iov_bvec_set(), which was duplicated in the final binary, and replace a weird iov_iter_truncate() of a temporal iter copy with min() better reflecting the intention. Signed-off-by: Pavel Begunkov Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/bcf1ac36fce769a514e19475f3623cd86a1d8b72.1635006010.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bio.c | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/block/bio.c b/block/bio.c index 46a87c72d2b4..ead1f8a9ff5e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1046,36 +1046,27 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) } EXPORT_SYMBOL_GPL(__bio_release_pages); -static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) +static void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) { + size_t size = iov_iter_count(iter); + WARN_ON_ONCE(bio->bi_max_vecs); + if (bio_op(bio) == REQ_OP_ZONE_APPEND) { + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + size_t max_sectors = queue_max_zone_append_sectors(q); + + size = min(size, max_sectors << SECTOR_SHIFT); + } + bio->bi_vcnt = iter->nr_segs; bio->bi_io_vec = (struct bio_vec *)iter->bvec; bio->bi_iter.bi_bvec_done = iter->iov_offset; - bio->bi_iter.bi_size = iter->count; + bio->bi_iter.bi_size = size; bio_set_flag(bio, BIO_NO_PAGE_REF); bio_set_flag(bio, BIO_CLONED); } -static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) -{ - __bio_iov_bvec_set(bio, iter); - iov_iter_advance(iter, iter->count); - return 0; -} - -static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter) -{ - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - struct iov_iter i = *iter; - - iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9); - __bio_iov_bvec_set(bio, &i); - iov_iter_advance(iter, i.count); - return 0; -} - static void bio_put_pages(struct page **pages, size_t size, size_t off) { size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE); @@ -1217,9 +1208,9 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) int ret = 0; if (iov_iter_is_bvec(iter)) { - if (bio_op(bio) == REQ_OP_ZONE_APPEND) - return bio_iov_bvec_set_append(bio, iter); - return bio_iov_bvec_set(bio, iter); + bio_iov_bvec_set(bio, iter); + iov_iter_advance(iter, bio->bi_iter.bi_size); + return 0; } do { From 0c9d338c8443b06da8e8d3bfce824c5ea6d3488f Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Wed, 20 Oct 2021 09:40:36 +0800 Subject: [PATCH 157/174] blk-cgroup: synchronize blkg creation against policy deactivation Our test reports a null pointer dereference: [ 168.534653] ================================================================== [ 168.535614] Disabling lock debugging due to kernel taint [ 168.536346] BUG: kernel NULL pointer dereference, address: 0000000000000008 [ 168.537274] #PF: supervisor read access in kernel mode [ 168.537964] #PF: error_code(0x0000) - not-present page [ 168.538667] PGD 0 P4D 0 [ 168.539025] Oops: 0000 [#1] PREEMPT SMP KASAN [ 168.539656] CPU: 13 PID: 759 Comm: bash Tainted: G B 5.15.0-rc2-next-202100 [ 168.540954] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_0738364 [ 168.542736] RIP: 0010:bfq_pd_init+0x88/0x1e0 [ 168.543318] Code: 98 00 00 00 e8 c9 e4 5b ff 4c 8b 65 00 49 8d 7c 24 08 e8 bb e4 5b ff 4d0 [ 168.545803] RSP: 0018:ffff88817095f9c0 EFLAGS: 00010002 [ 168.546497] RAX: 0000000000000001 RBX: ffff888101a1c000 RCX: 0000000000000000 [ 168.547438] RDX: 0000000000000003 RSI: 0000000000000002 RDI: ffff888106553428 [ 168.548402] RBP: ffff888106553400 R08: ffffffff961bcaf4 R09: 0000000000000001 [ 168.549365] R10: ffffffffa2e16c27 R11: fffffbfff45c2d84 R12: 0000000000000000 [ 168.550291] R13: ffff888101a1c098 R14: ffff88810c7a08c8 R15: ffffffffa55541a0 [ 168.551221] FS: 00007fac75227700(0000) GS:ffff88839ba80000(0000) knlGS:0000000000000000 [ 168.552278] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 168.553040] CR2: 0000000000000008 CR3: 0000000165ce7000 CR4: 00000000000006e0 [ 168.554000] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 168.554929] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 168.555888] Call Trace: [ 168.556221] [ 168.556510] blkg_create+0x1c0/0x8c0 [ 168.556989] blkg_conf_prep+0x574/0x650 [ 168.557502] ? stack_trace_save+0x99/0xd0 [ 168.558033] ? blkcg_conf_open_bdev+0x1b0/0x1b0 [ 168.558629] tg_set_conf.constprop.0+0xb9/0x280 [ 168.559231] ? kasan_set_track+0x29/0x40 [ 168.559758] ? kasan_set_free_info+0x30/0x60 [ 168.560344] ? tg_set_limit+0xae0/0xae0 [ 168.560853] ? do_sys_openat2+0x33b/0x640 [ 168.561383] ? do_sys_open+0xa2/0x100 [ 168.561877] ? __x64_sys_open+0x4e/0x60 [ 168.562383] ? __kasan_check_write+0x20/0x30 [ 168.562951] ? copyin+0x48/0x70 [ 168.563390] ? _copy_from_iter+0x234/0x9e0 [ 168.563948] tg_set_conf_u64+0x17/0x20 [ 168.564467] cgroup_file_write+0x1ad/0x380 [ 168.565014] ? cgroup_file_poll+0x80/0x80 [ 168.565568] ? __mutex_lock_slowpath+0x30/0x30 [ 168.566165] ? pgd_free+0x100/0x160 [ 168.566649] kernfs_fop_write_iter+0x21d/0x340 [ 168.567246] ? cgroup_file_poll+0x80/0x80 [ 168.567796] new_sync_write+0x29f/0x3c0 [ 168.568314] ? new_sync_read+0x410/0x410 [ 168.568840] ? __handle_mm_fault+0x1c97/0x2d80 [ 168.569425] ? copy_page_range+0x2b10/0x2b10 [ 168.570007] ? _raw_read_lock_bh+0xa0/0xa0 [ 168.570622] vfs_write+0x46e/0x630 [ 168.571091] ksys_write+0xcd/0x1e0 [ 168.571563] ? __x64_sys_read+0x60/0x60 [ 168.572081] ? __kasan_check_write+0x20/0x30 [ 168.572659] ? do_user_addr_fault+0x446/0xff0 [ 168.573264] __x64_sys_write+0x46/0x60 [ 168.573774] do_syscall_64+0x35/0x80 [ 168.574264] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 168.574960] RIP: 0033:0x7fac74915130 [ 168.575456] Code: 73 01 c3 48 8b 0d 58 ed 2c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 444 [ 168.577969] RSP: 002b:00007ffc3080e288 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 168.578986] RAX: ffffffffffffffda RBX: 0000000000000009 RCX: 00007fac74915130 [ 168.579937] RDX: 0000000000000009 RSI: 000056007669f080 RDI: 0000000000000001 [ 168.580884] RBP: 000056007669f080 R08: 000000000000000a R09: 00007fac75227700 [ 168.581841] R10: 000056007655c8f0 R11: 0000000000000246 R12: 0000000000000009 [ 168.582796] R13: 0000000000000001 R14: 00007fac74be55e0 R15: 00007fac74be08c0 [ 168.583757] [ 168.584063] Modules linked in: [ 168.584494] CR2: 0000000000000008 [ 168.584964] ---[ end trace 2475611ad0f77a1a ]--- This is because blkg_alloc() is called from blkg_conf_prep() without holding 'q->queue_lock', and elevator is exited before blkg_create(): thread 1 thread 2 blkg_conf_prep spin_lock_irq(&q->queue_lock); blkg_lookup_check -> return NULL spin_unlock_irq(&q->queue_lock); blkg_alloc blkcg_policy_enabled -> true pd = ->pd_alloc_fn blkg->pd[i] = pd blk_mq_exit_sched bfq_exit_queue blkcg_deactivate_policy spin_lock_irq(&q->queue_lock); __clear_bit(pol->plid, q->blkcg_pols); spin_unlock_irq(&q->queue_lock); q->elevator = NULL; spin_lock_irq(&q->queue_lock); blkg_create if (blkg->pd[i]) ->pd_init_fn -> q->elevator is NULL spin_unlock_irq(&q->queue_lock); Because blkcg_deactivate_policy() requires queue to be frozen, we can grab q_usage_counter to synchoronize blkg_conf_prep() against blkcg_deactivate_policy(). Fixes: e21b7a0b9887 ("block, bfq: add full hierarchical scheduling and cgroups support") Signed-off-by: Yu Kuai Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20211020014036.2141723-1-yukuai3@huawei.com Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 8908298d6ad3..88b1fce90520 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -634,6 +634,14 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, q = bdev_get_queue(bdev); + /* + * blkcg_deactivate_policy() requires queue to be frozen, we can grab + * q_usage_counter to prevent concurrent with blkcg_deactivate_policy(). + */ + ret = blk_queue_enter(q, 0); + if (ret) + return ret; + rcu_read_lock(); spin_lock_irq(&q->queue_lock); @@ -703,6 +711,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, goto success; } success: + blk_queue_exit(q); ctx->bdev = bdev; ctx->blkg = blkg; ctx->body = input; @@ -715,6 +724,7 @@ fail_unlock: rcu_read_unlock(); fail: blkdev_put_no_open(bdev); + blk_queue_exit(q); /* * If queue was bypassing, we should retry. Do so after a * short msleep(). It isn't strictly necessary but queue From 9f8b93a7df4d8e1e8715fb2a45a893cffad9da0b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 25 Oct 2021 10:45:01 -0600 Subject: [PATCH 158/174] sbitmap: silence data race warning KCSAN complaints about the sbitmap hint update: ================================================================== BUG: KCSAN: data-race in sbitmap_queue_clear / sbitmap_queue_clear write to 0xffffe8ffffd145b8 of 4 bytes by interrupt on cpu 1: sbitmap_queue_clear+0xca/0xf0 lib/sbitmap.c:606 blk_mq_put_tag+0x82/0x90 __blk_mq_free_request+0x114/0x180 block/blk-mq.c:507 blk_mq_free_request+0x2c8/0x340 block/blk-mq.c:541 __blk_mq_end_request+0x214/0x230 block/blk-mq.c:565 blk_mq_end_request+0x37/0x50 block/blk-mq.c:574 lo_complete_rq+0xca/0x170 drivers/block/loop.c:541 blk_complete_reqs block/blk-mq.c:584 [inline] blk_done_softirq+0x69/0x90 block/blk-mq.c:589 __do_softirq+0x12c/0x26e kernel/softirq.c:558 run_ksoftirqd+0x13/0x20 kernel/softirq.c:920 smpboot_thread_fn+0x22f/0x330 kernel/smpboot.c:164 kthread+0x262/0x280 kernel/kthread.c:319 ret_from_fork+0x1f/0x30 write to 0xffffe8ffffd145b8 of 4 bytes by interrupt on cpu 0: sbitmap_queue_clear+0xca/0xf0 lib/sbitmap.c:606 blk_mq_put_tag+0x82/0x90 __blk_mq_free_request+0x114/0x180 block/blk-mq.c:507 blk_mq_free_request+0x2c8/0x340 block/blk-mq.c:541 __blk_mq_end_request+0x214/0x230 block/blk-mq.c:565 blk_mq_end_request+0x37/0x50 block/blk-mq.c:574 lo_complete_rq+0xca/0x170 drivers/block/loop.c:541 blk_complete_reqs block/blk-mq.c:584 [inline] blk_done_softirq+0x69/0x90 block/blk-mq.c:589 __do_softirq+0x12c/0x26e kernel/softirq.c:558 run_ksoftirqd+0x13/0x20 kernel/softirq.c:920 smpboot_thread_fn+0x22f/0x330 kernel/smpboot.c:164 kthread+0x262/0x280 kernel/kthread.c:319 ret_from_fork+0x1f/0x30 value changed: 0x00000035 -> 0x00000044 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 10 Comm: ksoftirqd/0 Not tainted 5.15.0-rc6-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 ================================================================== which is a data race, but not an important one. This is just updating the percpu alloc hint, and the reader of that hint doesn't ever require it to be valid. Just annotate it with data_race() to silence this one. Reported-by: syzbot+4f8bfd804b4a1f95b8f6@syzkaller.appspotmail.com Acked-by: Marco Elver Signed-off-by: Jens Axboe --- lib/sbitmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sbitmap.c b/lib/sbitmap.c index c6e2f1f2c4d2..2709ab825499 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -631,7 +631,7 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) { if (likely(!sb->round_robin && tag < sb->depth)) - *per_cpu_ptr(sb->alloc_hint, cpu) = tag; + data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); } void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, From ff1552232b3612edff43a95746a4e78e231ef3d4 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 26 Oct 2021 16:22:57 +0800 Subject: [PATCH 159/174] blk-mq: don't issue request directly in case that current is to be blocked When flushing plug list in case that current will be blocked, we can't issue request directly because ->queue_rq() may sleep, otherwise scheduler may complain. Fixes: dc5fc361d891 ("block: attempt direct issue of plug list") Signed-off-by: Ming Lei Link: https://lore.kernel.org/r/20211026082257.2889890-1-ming.lei@redhat.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index c19dfa8ea65e..9840b15f505b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2223,7 +2223,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) return; plug->rq_count = 0; - if (!plug->multiple_queues && !plug->has_elevator) { + if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { blk_mq_plug_issue_direct(plug, from_schedule); if (rq_list_empty(plug->mq_list)) return; From a2247f19ee1c5ad75ef095cdfb909a3244b88aa8 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Wed, 27 Oct 2021 11:22:19 +0900 Subject: [PATCH 160/174] block: Add independent access ranges support The Concurrent Positioning Ranges VPD page (for SCSI) and data log page (for ATA) contain parameters describing the set of contiguous LBAs that can be served independently by a single LUN multi-actuator hard-disk. Similarly, a logically defined block device composed of multiple disks can in some cases execute requests directed at different sector ranges in parallel. A dm-linear device aggregating 2 block devices together is an example. This patch implements support for exposing a block device independent access ranges to the user through sysfs to allow optimizing device accesses to increase performance. To describe the set of independent sector ranges of a device (actuators of a multi-actuator HDDs or table entries of a dm-linear device), The type struct blk_independent_access_ranges is introduced. This structure describes the sector ranges using an array of struct blk_independent_access_range structures. This range structure defines the start sector and number of sectors of the access range. The ranges in the array cannot overlap and must contain all sectors within the device capacity. The function disk_set_independent_access_ranges() allows a device driver to signal to the block layer that a device has multiple independent access ranges. In this case, a struct blk_independent_access_ranges is attached to the device request queue by the function disk_set_independent_access_ranges(). The function disk_alloc_independent_access_ranges() is provided for drivers to allocate this structure. struct blk_independent_access_ranges contains kobjects (struct kobject) to expose to the user through sysfs the set of independent access ranges supported by a device. When the device is initialized, sysfs registration of the ranges information is done from blk_register_queue() using the block layer internal function disk_register_independent_access_ranges(). If a driver calls disk_set_independent_access_ranges() for a registered queue, e.g. when a device is revalidated, disk_set_independent_access_ranges() will execute disk_register_independent_access_ranges() to update the sysfs attribute files. The sysfs file structure created starts from the independent_access_ranges sub-directory and contains the start sector and number of sectors of each range, with the information for each range grouped in numbered sub-directories. E.g. for a dual actuator HDD, the user sees: $ tree /sys/block/sdk/queue/independent_access_ranges/ /sys/block/sdk/queue/independent_access_ranges/ |-- 0 | |-- nr_sectors | `-- sector `-- 1 |-- nr_sectors `-- sector For a regular device with a single access range, the independent_access_ranges sysfs directory does not exist. Device revalidation may lead to changes to this structure and to the attribute values. When manipulated, the queue sysfs_lock and sysfs_dir_lock mutexes are held for atomicity, similarly to how the blk-mq and elevator sysfs queue sub-directories are protected. The code related to the management of independent access ranges is added in the new file block/blk-ia-ranges.c. Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Martin K. Petersen Reviewed-by: Keith Busch Link: https://lore.kernel.org/r/20211027022223.183838-2-damien.lemoal@wdc.com Signed-off-by: Jens Axboe --- block/Makefile | 2 +- block/blk-ia-ranges.c | 348 +++++++++++++++++++++++++++++++++++++++++ block/blk-sysfs.c | 26 ++- block/blk.h | 4 + include/linux/blkdev.h | 39 +++++ 5 files changed, 410 insertions(+), 9 deletions(-) create mode 100644 block/blk-ia-ranges.c diff --git a/block/Makefile b/block/Makefile index 602f7f47b7b6..44df57e562bf 100644 --- a/block/Makefile +++ b/block/Makefile @@ -9,7 +9,7 @@ obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \ genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o \ - disk-events.o + disk-events.o blk-ia-ranges.o obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_BLK_DEV_BSG_COMMON) += bsg.o diff --git a/block/blk-ia-ranges.c b/block/blk-ia-ranges.c new file mode 100644 index 000000000000..c246c425d0d7 --- /dev/null +++ b/block/blk-ia-ranges.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Block device concurrent positioning ranges. + * + * Copyright (C) 2021 Western Digital Corporation or its Affiliates. + */ +#include +#include +#include +#include + +#include "blk.h" + +static ssize_t +blk_ia_range_sector_show(struct blk_independent_access_range *iar, + char *buf) +{ + return sprintf(buf, "%llu\n", iar->sector); +} + +static ssize_t +blk_ia_range_nr_sectors_show(struct blk_independent_access_range *iar, + char *buf) +{ + return sprintf(buf, "%llu\n", iar->nr_sectors); +} + +struct blk_ia_range_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_independent_access_range *iar, char *buf); +}; + +static struct blk_ia_range_sysfs_entry blk_ia_range_sector_entry = { + .attr = { .name = "sector", .mode = 0444 }, + .show = blk_ia_range_sector_show, +}; + +static struct blk_ia_range_sysfs_entry blk_ia_range_nr_sectors_entry = { + .attr = { .name = "nr_sectors", .mode = 0444 }, + .show = blk_ia_range_nr_sectors_show, +}; + +static struct attribute *blk_ia_range_attrs[] = { + &blk_ia_range_sector_entry.attr, + &blk_ia_range_nr_sectors_entry.attr, + NULL, +}; +ATTRIBUTE_GROUPS(blk_ia_range); + +static ssize_t blk_ia_range_sysfs_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct blk_ia_range_sysfs_entry *entry = + container_of(attr, struct blk_ia_range_sysfs_entry, attr); + struct blk_independent_access_range *iar = + container_of(kobj, struct blk_independent_access_range, kobj); + ssize_t ret; + + mutex_lock(&iar->queue->sysfs_lock); + ret = entry->show(iar, buf); + mutex_unlock(&iar->queue->sysfs_lock); + + return ret; +} + +static const struct sysfs_ops blk_ia_range_sysfs_ops = { + .show = blk_ia_range_sysfs_show, +}; + +/* + * Independent access range entries are not freed individually, but alltogether + * with struct blk_independent_access_ranges and its array of ranges. Since + * kobject_add() takes a reference on the parent kobject contained in + * struct blk_independent_access_ranges, the array of independent access range + * entries cannot be freed until kobject_del() is called for all entries. + * So we do not need to do anything here, but still need this no-op release + * operation to avoid complaints from the kobject code. + */ +static void blk_ia_range_sysfs_nop_release(struct kobject *kobj) +{ +} + +static struct kobj_type blk_ia_range_ktype = { + .sysfs_ops = &blk_ia_range_sysfs_ops, + .default_groups = blk_ia_range_groups, + .release = blk_ia_range_sysfs_nop_release, +}; + +/* + * This will be executed only after all independent access range entries are + * removed with kobject_del(), at which point, it is safe to free everything, + * including the array of ranges. + */ +static void blk_ia_ranges_sysfs_release(struct kobject *kobj) +{ + struct blk_independent_access_ranges *iars = + container_of(kobj, struct blk_independent_access_ranges, kobj); + + kfree(iars); +} + +static struct kobj_type blk_ia_ranges_ktype = { + .release = blk_ia_ranges_sysfs_release, +}; + +/** + * disk_register_ia_ranges - register with sysfs a set of independent + * access ranges + * @disk: Target disk + * @new_iars: New set of independent access ranges + * + * Register with sysfs a set of independent access ranges for @disk. + * If @new_iars is not NULL, this set of ranges is registered and the old set + * specified by q->ia_ranges is unregistered. Otherwise, q->ia_ranges is + * registered if it is not already. + */ +int disk_register_independent_access_ranges(struct gendisk *disk, + struct blk_independent_access_ranges *new_iars) +{ + struct request_queue *q = disk->queue; + struct blk_independent_access_ranges *iars; + int i, ret; + + lockdep_assert_held(&q->sysfs_dir_lock); + lockdep_assert_held(&q->sysfs_lock); + + /* If a new range set is specified, unregister the old one */ + if (new_iars) { + if (q->ia_ranges) + disk_unregister_independent_access_ranges(disk); + q->ia_ranges = new_iars; + } + + iars = q->ia_ranges; + if (!iars) + return 0; + + /* + * At this point, iars is the new set of sector access ranges that needs + * to be registered with sysfs. + */ + WARN_ON(iars->sysfs_registered); + ret = kobject_init_and_add(&iars->kobj, &blk_ia_ranges_ktype, + &q->kobj, "%s", "independent_access_ranges"); + if (ret) { + q->ia_ranges = NULL; + kfree(iars); + return ret; + } + + for (i = 0; i < iars->nr_ia_ranges; i++) { + iars->ia_range[i].queue = q; + ret = kobject_init_and_add(&iars->ia_range[i].kobj, + &blk_ia_range_ktype, &iars->kobj, + "%d", i); + if (ret) { + while (--i >= 0) + kobject_del(&iars->ia_range[i].kobj); + kobject_del(&iars->kobj); + kobject_put(&iars->kobj); + return ret; + } + } + + iars->sysfs_registered = true; + + return 0; +} + +void disk_unregister_independent_access_ranges(struct gendisk *disk) +{ + struct request_queue *q = disk->queue; + struct blk_independent_access_ranges *iars = q->ia_ranges; + int i; + + lockdep_assert_held(&q->sysfs_dir_lock); + lockdep_assert_held(&q->sysfs_lock); + + if (!iars) + return; + + if (iars->sysfs_registered) { + for (i = 0; i < iars->nr_ia_ranges; i++) + kobject_del(&iars->ia_range[i].kobj); + kobject_del(&iars->kobj); + kobject_put(&iars->kobj); + } else { + kfree(iars); + } + + q->ia_ranges = NULL; +} + +static struct blk_independent_access_range * +disk_find_ia_range(struct blk_independent_access_ranges *iars, + sector_t sector) +{ + struct blk_independent_access_range *iar; + int i; + + for (i = 0; i < iars->nr_ia_ranges; i++) { + iar = &iars->ia_range[i]; + if (sector >= iar->sector && + sector < iar->sector + iar->nr_sectors) + return iar; + } + + return NULL; +} + +static bool disk_check_ia_ranges(struct gendisk *disk, + struct blk_independent_access_ranges *iars) +{ + struct blk_independent_access_range *iar, *tmp; + sector_t capacity = get_capacity(disk); + sector_t sector = 0; + int i; + + /* + * While sorting the ranges in increasing LBA order, check that the + * ranges do not overlap, that there are no sector holes and that all + * sectors belong to one range. + */ + for (i = 0; i < iars->nr_ia_ranges; i++) { + tmp = disk_find_ia_range(iars, sector); + if (!tmp || tmp->sector != sector) { + pr_warn("Invalid non-contiguous independent access ranges\n"); + return false; + } + + iar = &iars->ia_range[i]; + if (tmp != iar) { + swap(iar->sector, tmp->sector); + swap(iar->nr_sectors, tmp->nr_sectors); + } + + sector += iar->nr_sectors; + } + + if (sector != capacity) { + pr_warn("Independent access ranges do not match disk capacity\n"); + return false; + } + + return true; +} + +static bool disk_ia_ranges_changed(struct gendisk *disk, + struct blk_independent_access_ranges *new) +{ + struct blk_independent_access_ranges *old = disk->queue->ia_ranges; + int i; + + if (!old) + return true; + + if (old->nr_ia_ranges != new->nr_ia_ranges) + return true; + + for (i = 0; i < old->nr_ia_ranges; i++) { + if (new->ia_range[i].sector != old->ia_range[i].sector || + new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors) + return true; + } + + return false; +} + +/** + * disk_alloc_independent_access_ranges - Allocate an independent access ranges + * data structure + * @disk: target disk + * @nr_ia_ranges: Number of independent access ranges + * + * Allocate a struct blk_independent_access_ranges structure with @nr_ia_ranges + * access range descriptors. + */ +struct blk_independent_access_ranges * +disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges) +{ + struct blk_independent_access_ranges *iars; + + iars = kzalloc_node(struct_size(iars, ia_range, nr_ia_ranges), + GFP_KERNEL, disk->queue->node); + if (iars) + iars->nr_ia_ranges = nr_ia_ranges; + return iars; +} +EXPORT_SYMBOL_GPL(disk_alloc_independent_access_ranges); + +/** + * disk_set_independent_access_ranges - Set a disk independent access ranges + * @disk: target disk + * @iars: independent access ranges structure + * + * Set the independent access ranges information of the request queue + * of @disk to @iars. If @iars is NULL and the independent access ranges + * structure already set is cleared. If there are no differences between + * @iars and the independent access ranges structure already set, @iars + * is freed. + */ +void disk_set_independent_access_ranges(struct gendisk *disk, + struct blk_independent_access_ranges *iars) +{ + struct request_queue *q = disk->queue; + + if (WARN_ON_ONCE(iars && !iars->nr_ia_ranges)) { + kfree(iars); + iars = NULL; + } + + mutex_lock(&q->sysfs_dir_lock); + mutex_lock(&q->sysfs_lock); + + if (iars) { + if (!disk_check_ia_ranges(disk, iars)) { + kfree(iars); + iars = NULL; + goto reg; + } + + if (!disk_ia_ranges_changed(disk, iars)) { + kfree(iars); + goto unlock; + } + } + + /* + * This may be called for a registered queue. E.g. during a device + * revalidation. If that is the case, we need to unregister the old + * set of independent access ranges and register the new set. If the + * queue is not registered, registration of the device request queue + * will register the independent access ranges, so only swap in the + * new set and free the old one. + */ +reg: + if (blk_queue_registered(q)) { + disk_register_independent_access_ranges(disk, iars); + } else { + swap(q->ia_ranges, iars); + kfree(iars); + } + +unlock: + mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->sysfs_dir_lock); +} +EXPORT_SYMBOL_GPL(disk_set_independent_access_ranges); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 36f14d658e81..cef1f713370b 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -873,16 +873,15 @@ int blk_register_queue(struct gendisk *disk) } mutex_lock(&q->sysfs_lock); + + ret = disk_register_independent_access_ranges(disk, NULL); + if (ret) + goto put_dev; + if (q->elevator) { ret = elv_register_queue(q, false); - if (ret) { - mutex_unlock(&q->sysfs_lock); - mutex_unlock(&q->sysfs_dir_lock); - kobject_del(&q->kobj); - blk_trace_remove_sysfs(dev); - kobject_put(&dev->kobj); - return ret; - } + if (ret) + goto put_dev; } blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); @@ -913,6 +912,16 @@ unlock: percpu_ref_switch_to_percpu(&q->q_usage_counter); } + return ret; + +put_dev: + disk_unregister_independent_access_ranges(disk); + mutex_unlock(&q->sysfs_lock); + mutex_unlock(&q->sysfs_dir_lock); + kobject_del(&q->kobj); + blk_trace_remove_sysfs(dev); + kobject_put(&dev->kobj); + return ret; } @@ -958,6 +967,7 @@ void blk_unregister_queue(struct gendisk *disk) mutex_lock(&q->sysfs_lock); if (q->elevator) elv_unregister_queue(q); + disk_unregister_independent_access_ranges(disk); mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_dir_lock); diff --git a/block/blk.h b/block/blk.h index 6a039e6c7d07..7afffd548daf 100644 --- a/block/blk.h +++ b/block/blk.h @@ -454,4 +454,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); extern const struct address_space_operations def_blk_aops; +int disk_register_independent_access_ranges(struct gendisk *disk, + struct blk_independent_access_ranges *new_iars); +void disk_unregister_independent_access_ranges(struct gendisk *disk); + #endif /* BLK_INTERNAL_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f72ccb2829db..6d95a4b36cfa 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -150,6 +150,34 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev, #endif /* CONFIG_BLK_DEV_ZONED */ +/* + * Independent access ranges: struct blk_independent_access_range describes + * a range of contiguous sectors that can be accessed using device command + * execution resources that are independent from the resources used for + * other access ranges. This is typically found with single-LUN multi-actuator + * HDDs where each access range is served by a different set of heads. + * The set of independent ranges supported by the device is defined using + * struct blk_independent_access_ranges. The independent ranges must not overlap + * and must include all sectors within the disk capacity (no sector holes + * allowed). + * For a device with multiple ranges, requests targeting sectors in different + * ranges can be executed in parallel. A request can straddle an access range + * boundary. + */ +struct blk_independent_access_range { + struct kobject kobj; + struct request_queue *queue; + sector_t sector; + sector_t nr_sectors; +}; + +struct blk_independent_access_ranges { + struct kobject kobj; + bool sysfs_registered; + unsigned int nr_ia_ranges; + struct blk_independent_access_range ia_range[]; +}; + struct request_queue { struct request *last_merge; struct elevator_queue *elevator; @@ -331,6 +359,12 @@ struct request_queue { #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; + + /* + * Independent sector access ranges. This is always NULL for + * devices that do not have multiple independent access ranges. + */ + struct blk_independent_access_ranges *ia_ranges; }; /* Keep blk_queue_flag_name[] in sync with the definitions below */ @@ -698,6 +732,11 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); +struct blk_independent_access_ranges * +disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); +void disk_set_independent_access_ranges(struct gendisk *disk, + struct blk_independent_access_ranges *iars); + /* * Elevator features for blk_queue_required_elevator_features: */ From 1bb6b81029456f4e2e6727c5167f43bdfc34bee5 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 27 Oct 2021 13:21:07 +0100 Subject: [PATCH 161/174] block: avoid extra iter advance with async iocb Nobody cares about iov iterators state if we return -EIOCBQUEUED, so as the we now have __blkdev_direct_IO_async(), which gets pages only once, we can skip expensive iov_iter_advance(). It's around 1-2% of all CPU spent. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a6158edfbfa2ae3bc24aed29a72f035df18fad2f.1635337135.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bio.c | 2 +- block/fops.c | 20 +++++++++++++++----- include/linux/bio.h | 1 + 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/block/bio.c b/block/bio.c index ead1f8a9ff5e..15ab0d6d1c06 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1046,7 +1046,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) } EXPORT_SYMBOL_GPL(__bio_release_pages); -static void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) +void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) { size_t size = iov_iter_count(iter); diff --git a/block/fops.c b/block/fops.c index a7b328296912..092e5079e827 100644 --- a/block/fops.c +++ b/block/fops.c @@ -352,11 +352,21 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, bio->bi_end_io = blkdev_bio_end_io_async; bio->bi_ioprio = iocb->ki_ioprio; - ret = bio_iov_iter_get_pages(bio, iter); - if (unlikely(ret)) { - bio->bi_status = BLK_STS_IOERR; - bio_endio(bio); - return ret; + if (iov_iter_is_bvec(iter)) { + /* + * Users don't rely on the iterator being in any particular + * state for async I/O returning -EIOCBQUEUED, hence we can + * avoid expensive iov_iter_advance(). Bypass + * bio_iov_iter_get_pages() and set the bvec directly. + */ + bio_iov_bvec_set(bio, iter); + } else { + ret = bio_iov_iter_get_pages(bio, iter); + if (unlikely(ret)) { + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + return ret; + } } dio->size = bio->bi_iter.bi_size; diff --git a/include/linux/bio.h b/include/linux/bio.h index c88700d1bdc3..fe6bdfbbef66 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -417,6 +417,7 @@ int bio_add_zone_append_page(struct bio *bio, struct page *page, void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); +void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter); void __bio_release_pages(struct bio *bio, bool mark_dirty); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); From 25d207dc22271c2232df2d610ce4be6e125d1de8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 27 Oct 2021 13:21:08 +0100 Subject: [PATCH 162/174] block: kill unused polling bits in __blkdev_direct_IO() With addition of __blkdev_direct_IO_async(), __blkdev_direct_IO() now serves only multio-bio I/O, which we don't poll. Now we can remove anything related to I/O polling from it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b8c597a6b7ee612df394853bfd24726aee5b898e.1635337135.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/fops.c | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/block/fops.c b/block/fops.c index 092e5079e827..983e993c9a4b 100644 --- a/block/fops.c +++ b/block/fops.c @@ -190,7 +190,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, struct blk_plug plug; struct blkdev_dio *dio; struct bio *bio; - bool do_poll = (iocb->ki_flags & IOCB_HIPRI); bool is_read = (iov_iter_rw(iter) == READ), is_sync; loff_t pos = iocb->ki_pos; int ret = 0; @@ -216,12 +215,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (is_read && iter_is_iovec(iter)) dio->flags |= DIO_SHOULD_DIRTY; - /* - * Don't plug for HIPRI/polled IO, as those should go straight - * to issue - */ - if (!(iocb->ki_flags & IOCB_HIPRI)) - blk_start_plug(&plug); + blk_start_plug(&plug); for (;;) { bio_set_dev(bio, bdev); @@ -254,11 +248,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); if (!nr_pages) { - if (do_poll) - bio_set_polled(bio, iocb); submit_bio(bio); - if (do_poll) - WRITE_ONCE(iocb->private, bio); break; } if (!(dio->flags & DIO_MULTI_BIO)) { @@ -271,7 +261,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio_get(bio); dio->flags |= DIO_MULTI_BIO; atomic_set(&dio->ref, 2); - do_poll = false; } else { atomic_inc(&dio->ref); } @@ -280,8 +269,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio = bio_alloc(GFP_KERNEL, nr_pages); } - if (!(iocb->ki_flags & IOCB_HIPRI)) - blk_finish_plug(&plug); + blk_finish_plug(&plug); if (!is_sync) return -EIOCBQUEUED; @@ -290,9 +278,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, set_current_state(TASK_UNINTERRUPTIBLE); if (!READ_ONCE(dio->waiter)) break; - - if (!do_poll || !bio_poll(bio, NULL, 0)) - blk_io_schedule(); + blk_io_schedule(); } __set_current_state(TASK_RUNNING); From e71aa913e26543768d5acaef50abe14913c6c496 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 27 Oct 2021 13:21:09 +0100 Subject: [PATCH 163/174] block: kill DIO_MULTI_BIO Now __blkdev_direct_IO() serves only multi-bio I/O, thus remove not used anymore single bio refcounting optimisations. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/88eb488aae9ed4852a30f3a7132f296f56e43b80.1635337135.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/fops.c | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/block/fops.c b/block/fops.c index 983e993c9a4b..8594852bd344 100644 --- a/block/fops.c +++ b/block/fops.c @@ -124,9 +124,8 @@ out: } enum { - DIO_MULTI_BIO = 1, - DIO_SHOULD_DIRTY = 2, - DIO_IS_SYNC = 4, + DIO_SHOULD_DIRTY = 1, + DIO_IS_SYNC = 2, }; struct blkdev_dio { @@ -150,7 +149,7 @@ static void blkdev_bio_end_io(struct bio *bio) if (bio->bi_status && !dio->bio.bi_status) dio->bio.bi_status = bio->bi_status; - if (!(dio->flags & DIO_MULTI_BIO) || atomic_dec_and_test(&dio->ref)) { + if (atomic_dec_and_test(&dio->ref)) { if (!(dio->flags & DIO_IS_SYNC)) { struct kiocb *iocb = dio->iocb; ssize_t ret; @@ -165,8 +164,7 @@ static void blkdev_bio_end_io(struct bio *bio) } dio->iocb->ki_complete(iocb, ret, 0); - if (dio->flags & DIO_MULTI_BIO) - bio_put(&dio->bio); + bio_put(&dio->bio); } else { struct task_struct *waiter = dio->waiter; @@ -201,11 +199,17 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); + atomic_set(&dio->ref, 1); + /* + * Grab an extra reference to ensure the dio structure which is embedded + * into the first bio stays around. + */ + bio_get(bio); + is_sync = is_sync_kiocb(iocb); if (is_sync) { dio->flags = DIO_IS_SYNC; dio->waiter = current; - bio_get(bio); } else { dio->flags = 0; dio->iocb = iocb; @@ -251,20 +255,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, submit_bio(bio); break; } - if (!(dio->flags & DIO_MULTI_BIO)) { - /* - * AIO needs an extra reference to ensure the dio - * structure which is embedded into the first bio - * stays around. - */ - if (!is_sync) - bio_get(bio); - dio->flags |= DIO_MULTI_BIO; - atomic_set(&dio->ref, 2); - } else { - atomic_inc(&dio->ref); - } - + atomic_inc(&dio->ref); submit_bio(bio); bio = bio_alloc(GFP_KERNEL, nr_pages); } From 842e39b013465a279fb60348427b9309427a29de Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 27 Oct 2021 13:21:10 +0100 Subject: [PATCH 164/174] block: add async version of bio_set_polled If we know that a iocb is async we can optimise bio_set_polled() a bit, add a new helper bio_set_polled_async(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8fa137885164a5d05fadcff4c3521da8d5a83d00.1635337135.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/fops.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/block/fops.c b/block/fops.c index 8594852bd344..a2f492e50782 100644 --- a/block/fops.c +++ b/block/fops.c @@ -358,14 +358,13 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, task_io_account_write(bio->bi_iter.bi_size); } - if (iocb->ki_flags & IOCB_NOWAIT) - bio->bi_opf |= REQ_NOWAIT; - if (iocb->ki_flags & IOCB_HIPRI) { - bio_set_polled(bio, iocb); + bio->bi_opf |= REQ_POLLED | REQ_NOWAIT; submit_bio(bio); WRITE_ONCE(iocb->private, bio); } else { + if (iocb->ki_flags & IOCB_NOWAIT) + bio->bi_opf |= REQ_NOWAIT; submit_bio(bio); } return -EIOCBQUEUED; From 56f8da642bd827ef50a952e7bc3728c5830452be Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 19 Oct 2021 09:32:57 -0600 Subject: [PATCH 165/174] block: add rq_flags to struct blk_mq_alloc_data There's a hole here we can use, and it's faster to set this earlier rather than need to check q->elevator multiple times. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20211019153300.623322-2-axboe@kernel.dk Signed-off-by: Jens Axboe --- block/blk-mq.c | 20 ++++++++++---------- block/blk-mq.h | 8 ++++---- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 9840b15f505b..a4d5b779a65a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -321,25 +321,22 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct blk_mq_ctx *ctx = data->ctx; struct blk_mq_hw_ctx *hctx = data->hctx; struct request_queue *q = data->q; - struct elevator_queue *e = q->elevator; struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct request *rq = tags->static_rqs[tag]; - unsigned int rq_flags = 0; - if (e) { - rq_flags = RQF_ELV; - rq->tag = BLK_MQ_NO_TAG; - rq->internal_tag = tag; - } else { + if (!(data->rq_flags & RQF_ELV)) { rq->tag = tag; rq->internal_tag = BLK_MQ_NO_TAG; + } else { + rq->tag = BLK_MQ_NO_TAG; + rq->internal_tag = tag; } if (data->flags & BLK_MQ_REQ_PM) - rq_flags |= RQF_PM; + data->rq_flags |= RQF_PM; if (blk_queue_io_stat(q)) - rq_flags |= RQF_IO_STAT; - rq->rq_flags = rq_flags; + data->rq_flags |= RQF_IO_STAT; + rq->rq_flags = data->rq_flags; if (blk_mq_need_time_stamp(rq)) rq->start_time_ns = ktime_get_ns(); @@ -490,6 +487,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, .q = q, .flags = flags, .cmd_flags = op, + .rq_flags = q->elevator ? RQF_ELV : 0, .nr_tags = 1, }; struct request *rq; @@ -519,6 +517,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, .q = q, .flags = flags, .cmd_flags = op, + .rq_flags = q->elevator ? RQF_ELV : 0, .nr_tags = 1, }; u64 alloc_time_ns = 0; @@ -2512,6 +2511,7 @@ void blk_mq_submit_bio(struct bio *bio) .q = q, .nr_tags = 1, .cmd_flags = bio->bi_opf, + .rq_flags = q->elevator ? RQF_ELV : 0, }; if (plug) { diff --git a/block/blk-mq.h b/block/blk-mq.h index 08fb5922e611..28859fc5faee 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -149,6 +149,7 @@ struct blk_mq_alloc_data { blk_mq_req_flags_t flags; unsigned int shallow_depth; unsigned int cmd_flags; + unsigned int rq_flags; /* allocate multiple requests/tags in one go */ unsigned int nr_tags; @@ -166,10 +167,9 @@ static inline bool blk_mq_is_shared_tags(unsigned int flags) static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) { - if (data->q->elevator) - return data->hctx->sched_tags; - - return data->hctx->tags; + if (!(data->rq_flags & RQF_ELV)) + return data->hctx->tags; + return data->hctx->sched_tags; } static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) From fe6134f66906dfa16d4877cab60106275f48eef7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 19 Oct 2021 09:32:58 -0600 Subject: [PATCH 166/174] block: pass in blk_mq_tags to blk_mq_rq_ctx_init() Instead of getting this from data for every invocation of request initialization, pass it in as an argument instead. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20211019153300.623322-3-axboe@kernel.dk Signed-off-by: Jens Axboe --- block/blk-mq.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index a4d5b779a65a..e881e12a2691 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -316,12 +316,11 @@ void blk_mq_wake_waiters(struct request_queue *q) } static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, - unsigned int tag, u64 alloc_time_ns) + struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns) { struct blk_mq_ctx *ctx = data->ctx; struct blk_mq_hw_ctx *hctx = data->hctx; struct request_queue *q = data->q; - struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct request *rq = tags->static_rqs[tag]; if (!(data->rq_flags & RQF_ELV)) { @@ -393,20 +392,22 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, u64 alloc_time_ns) { unsigned int tag, tag_offset; + struct blk_mq_tags *tags; struct request *rq; - unsigned long tags; + unsigned long tag_mask; int i, nr = 0; - tags = blk_mq_get_tags(data, data->nr_tags, &tag_offset); - if (unlikely(!tags)) + tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); + if (unlikely(!tag_mask)) return NULL; - for (i = 0; tags; i++) { - if (!(tags & (1UL << i))) + tags = blk_mq_tags_from_data(data); + for (i = 0; tag_mask; i++) { + if (!(tag_mask & (1UL << i))) continue; tag = tag_offset + i; - tags &= ~(1UL << i); - rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + tag_mask &= ~(1UL << i); + rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns); rq_list_add(data->cached_rq, rq); } data->nr_tags -= nr; @@ -477,7 +478,8 @@ retry: goto retry; } - return blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag, + alloc_time_ns); } struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, @@ -563,7 +565,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, tag = blk_mq_get_tag(&data); if (tag == BLK_MQ_NO_TAG) goto out_queue_exit; - return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns); + return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, + alloc_time_ns); out_queue_exit: blk_queue_exit(q); From 92aff191cc5b15a56d10a7a1a0b4bc5f6e17fcf3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 19 Oct 2021 09:32:59 -0600 Subject: [PATCH 167/174] block: prefetch request to be initialized Now we have the tags available in __blk_mq_alloc_requests_batch(), we can start fetching the first request cacheline before calling into the request initialization. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20211019153300.623322-4-axboe@kernel.dk Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index e881e12a2691..7cdb10f96aa6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -405,6 +405,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, for (i = 0; tag_mask; i++) { if (!(tag_mask & (1UL << i))) continue; + prefetch(tags->static_rqs[tag]); tag = tag_offset + i; tag_mask &= ~(1UL << i); rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns); From c7b84d4226adaa601e9f73574ef123d1500cf712 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 19 Oct 2021 09:33:00 -0600 Subject: [PATCH 168/174] block: re-flow blk_mq_rq_ctx_init() Now that we have flags passed in, we can do a final re-arrange of the flow of blk_mq_rq_ctx_init() so we're always writing request in the order in which it is laid out. Signed-off-by: Jens Axboe Link: https://lore.kernel.org/r/20211019153300.623322-5-axboe@kernel.dk Signed-off-by: Jens Axboe --- block/blk-mq.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 7cdb10f96aa6..ec966e0b172d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -323,13 +323,10 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct request_queue *q = data->q; struct request *rq = tags->static_rqs[tag]; - if (!(data->rq_flags & RQF_ELV)) { - rq->tag = tag; - rq->internal_tag = BLK_MQ_NO_TAG; - } else { - rq->tag = BLK_MQ_NO_TAG; - rq->internal_tag = tag; - } + rq->q = q; + rq->mq_ctx = ctx; + rq->mq_hctx = hctx; + rq->cmd_flags = data->cmd_flags; if (data->flags & BLK_MQ_REQ_PM) data->rq_flags |= RQF_PM; @@ -337,15 +334,19 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, data->rq_flags |= RQF_IO_STAT; rq->rq_flags = data->rq_flags; + if (!(data->rq_flags & RQF_ELV)) { + rq->tag = tag; + rq->internal_tag = BLK_MQ_NO_TAG; + } else { + rq->tag = BLK_MQ_NO_TAG; + rq->internal_tag = tag; + } + rq->timeout = 0; + if (blk_mq_need_time_stamp(rq)) rq->start_time_ns = ktime_get_ns(); else rq->start_time_ns = 0; - /* csd/requeue_work/fifo_time is initialized before use */ - rq->q = q; - rq->mq_ctx = ctx; - rq->mq_hctx = hctx; - rq->cmd_flags = data->cmd_flags; rq->rq_disk = NULL; rq->part = NULL; #ifdef CONFIG_BLK_RQ_ALLOC_TIME @@ -357,7 +358,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; #endif - rq->timeout = 0; rq->end_io = NULL; rq->end_io_data = NULL; From 570b1cac477643cbf01a45fa5d018430a1fddbce Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Tue, 26 Oct 2021 22:40:12 +0800 Subject: [PATCH 169/174] block: Add a helper to validate the block size There are some duplicated codes to validate the block size in block drivers. This limitation actually comes from block layer, so this patch tries to add a new block layer helper for that. Signed-off-by: Xie Yongji Link: https://lore.kernel.org/r/20211026144015.188-2-xieyongji@bytedance.com Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6d95a4b36cfa..d2d627e2c782 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -44,6 +44,14 @@ struct blk_crypto_profile; */ #define BLKCG_MAX_POLS 6 +static inline int blk_validate_block_size(unsigned int bsize) +{ + if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) + return -EINVAL; + + return 0; +} + static inline bool blk_op_is_passthrough(unsigned int op) { op &= REQ_OP_MASK; From c4318d6cd038472d13e08a54a9035863c8c160bd Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Tue, 26 Oct 2021 22:40:13 +0800 Subject: [PATCH 170/174] nbd: Use blk_validate_block_size() to validate block size Use the block layer helper to validate block size instead of open coding it. Signed-off-by: Xie Yongji Link: https://lore.kernel.org/r/20211026144015.188-3-xieyongji@bytedance.com Signed-off-by: Jens Axboe --- drivers/block/nbd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 8922b8864487..504c20a2f33e 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -315,7 +315,8 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, { if (!blksize) blksize = 1u << NBD_DEF_BLKSIZE_BITS; - if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize)) + + if (blk_validate_block_size(blksize)) return -EINVAL; nbd->config->bytesize = bytesize; From af3c570fb0df422b4906ebd11c1bf363d89961d5 Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Tue, 26 Oct 2021 22:40:14 +0800 Subject: [PATCH 171/174] loop: Use blk_validate_block_size() to validate block size Remove loop_validate_block_size() and use the block layer helper to validate block size. Signed-off-by: Xie Yongji Link: https://lore.kernel.org/r/20211026144015.188-4-xieyongji@bytedance.com Signed-off-by: Jens Axboe --- drivers/block/loop.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 980b538c008a..ab23bbc467e7 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -272,19 +272,6 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_mq_unfreeze_queue(lo->lo_queue); } -/** - * loop_validate_block_size() - validates the passed in block size - * @bsize: size to validate - */ -static int -loop_validate_block_size(unsigned short bsize) -{ - if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) - return -EINVAL; - - return 0; -} - /** * loop_set_size() - sets device size and notifies userspace * @lo: struct loop_device to set the size for @@ -1236,7 +1223,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, } if (config->block_size) { - error = loop_validate_block_size(config->block_size); + error = blk_validate_block_size(config->block_size); if (error) goto out_unlock; } @@ -1752,7 +1739,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) if (lo->lo_state != Lo_bound) return -ENXIO; - err = loop_validate_block_size(arg); + err = blk_validate_block_size(arg); if (err) return err; From 57a13a5b8157d9a8606490aaa1b805bafe6c37e1 Mon Sep 17 00:00:00 2001 From: Xie Yongji Date: Tue, 26 Oct 2021 22:40:15 +0800 Subject: [PATCH 172/174] virtio-blk: Use blk_validate_block_size() to validate block size The block layer can't support a block size larger than page size yet. And a block size that's too small or not a power of two won't work either. If a misconfigured device presents an invalid block size in configuration space, it will result in the kernel crash something like below: [ 506.154324] BUG: kernel NULL pointer dereference, address: 0000000000000008 [ 506.160416] RIP: 0010:create_empty_buffers+0x24/0x100 [ 506.174302] Call Trace: [ 506.174651] create_page_buffers+0x4d/0x60 [ 506.175207] block_read_full_page+0x50/0x380 [ 506.175798] ? __mod_lruvec_page_state+0x60/0xa0 [ 506.176412] ? __add_to_page_cache_locked+0x1b2/0x390 [ 506.177085] ? blkdev_direct_IO+0x4a0/0x4a0 [ 506.177644] ? scan_shadow_nodes+0x30/0x30 [ 506.178206] ? lru_cache_add+0x42/0x60 [ 506.178716] do_read_cache_page+0x695/0x740 [ 506.179278] ? read_part_sector+0xe0/0xe0 [ 506.179821] read_part_sector+0x36/0xe0 [ 506.180337] adfspart_check_ICS+0x32/0x320 [ 506.180890] ? snprintf+0x45/0x70 [ 506.181350] ? read_part_sector+0xe0/0xe0 [ 506.181906] bdev_disk_changed+0x229/0x5c0 [ 506.182483] blkdev_get_whole+0x6d/0x90 [ 506.183013] blkdev_get_by_dev+0x122/0x2d0 [ 506.183562] device_add_disk+0x39e/0x3c0 [ 506.184472] virtblk_probe+0x3f8/0x79b [virtio_blk] [ 506.185461] virtio_dev_probe+0x15e/0x1d0 [virtio] So let's use a block layer helper to validate the block size. Signed-off-by: Xie Yongji Acked-by: Michael S. Tsirkin Link: https://lore.kernel.org/r/20211026144015.188-5-xieyongji@bytedance.com Signed-off-by: Jens Axboe --- drivers/block/virtio_blk.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 303caf2d17d0..fd086179f980 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -815,9 +815,17 @@ static int virtblk_probe(struct virtio_device *vdev) err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, struct virtio_blk_config, blk_size, &blk_size); - if (!err) + if (!err) { + err = blk_validate_block_size(blk_size); + if (err) { + dev_err(&vdev->dev, + "virtio_blk: invalid block size: 0x%x\n", + blk_size); + goto out_cleanup_disk; + } + blk_queue_logical_block_size(q, blk_size); - else + } else blk_size = queue_logical_block_size(q); /* Use topology information if available */ From 02f7eab0095a47b45f48a4321d33de3569c59061 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 28 Oct 2021 12:08:34 -0600 Subject: [PATCH 173/174] block: improve readability of blk_mq_end_request_batch() It's faster and easier to read if we tolerate cur_hctx being NULL in the "when to flush" condition. Rename last_hctx to cur_hctx while at it, as it better describes the role of that variable. Signed-off-by: Jens Axboe --- block/blk-mq.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index ec966e0b172d..221d1b7d10d6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -822,7 +822,7 @@ static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, void blk_mq_end_request_batch(struct io_comp_batch *iob) { int tags[TAG_COMP_BATCH], nr_tags = 0; - struct blk_mq_hw_ctx *last_hctx = NULL; + struct blk_mq_hw_ctx *cur_hctx = NULL; struct request *rq; u64 now = 0; @@ -845,17 +845,17 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob) blk_pm_mark_last_busy(rq); rq_qos_done(rq->q, rq); - if (nr_tags == TAG_COMP_BATCH || - (last_hctx && last_hctx != rq->mq_hctx)) { - blk_mq_flush_tag_batch(last_hctx, tags, nr_tags); + if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { + if (cur_hctx) + blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); nr_tags = 0; + cur_hctx = rq->mq_hctx; } tags[nr_tags++] = rq->tag; - last_hctx = rq->mq_hctx; } if (nr_tags) - blk_mq_flush_tag_batch(last_hctx, tags, nr_tags); + blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); } EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); From 9b84c629c90374498ab5825dede74a06ea1c775b Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 29 Oct 2021 16:40:23 +0800 Subject: [PATCH 174/174] blk-mq-debugfs: Show active requests per queue for shared tags Currently we show the hctx.active value for the per-hctx "active" file. However this is not maintained for shared tags, and we instead keep a record of the number active requests per request queue - see commit f1b49fdc1c64 ("blk-mq: Record active_queues_shared_sbitmap per tag_set for when using shared sbitmap). Change for the case of shared tags to show the active requests per request queue by using __blk_mq_active_requests() helper. Signed-off-by: John Garry Link: https://lore.kernel.org/r/1635496823-33515-1-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 68ca5d21cda7..0f8c60e9c719 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -550,7 +550,7 @@ static int hctx_active_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; - seq_printf(m, "%d\n", atomic_read(&hctx->nr_active)); + seq_printf(m, "%d\n", __blk_mq_active_requests(hctx)); return 0; }