block: add a bdev_nonrot helper

Add a helper to check the nonrot flag based on the block_device instead
of having to poke into the block layer internal request_queue.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: David Sterba <dsterba@suse.com> [btrfs]
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220415045258.199825-12-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2022-04-15 06:52:42 +02:00 committed by Jens Axboe
parent 9964e67455
commit 10f0d2a517
13 changed files with 19 additions and 18 deletions

View File

@ -489,7 +489,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
queue_max_sectors(bdev_get_queue(bdev))); queue_max_sectors(bdev_get_queue(bdev)));
return put_ushort(argp, max_sectors); return put_ushort(argp, max_sectors);
case BLKROTATIONAL: case BLKROTATIONAL:
return put_ushort(argp, !blk_queue_nonrot(bdev_get_queue(bdev))); return put_ushort(argp, !bdev_nonrot(bdev));
case BLKRASET: case BLKRASET:
case BLKFRASET: case BLKFRASET:
if(!capable(CAP_SYS_ADMIN)) if(!capable(CAP_SYS_ADMIN))

View File

@ -903,7 +903,7 @@ static void loop_update_rotational(struct loop_device *lo)
/* not all filesystems (e.g. tmpfs) have a sb->s_bdev */ /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
if (file_bdev) if (file_bdev)
nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev)); nonrot = bdev_nonrot(file_bdev);
if (nonrot) if (nonrot)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);

View File

@ -1820,9 +1820,7 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data) sector_t start, sector_t len, void *data)
{ {
struct request_queue *q = bdev_get_queue(dev->bdev); return !bdev_nonrot(dev->bdev);
return !blk_queue_nonrot(q);
} }
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,

View File

@ -5991,8 +5991,7 @@ int md_run(struct mddev *mddev)
bool nonrot = true; bool nonrot = true;
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 && if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
!blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
nonrot = false; nonrot = false;
break; break;
} }

View File

@ -708,7 +708,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
/* At least two disks to choose from so failfast is OK */ /* At least two disks to choose from so failfast is OK */
set_bit(R1BIO_FailFast, &r1_bio->state); set_bit(R1BIO_FailFast, &r1_bio->state);
nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); nonrot = bdev_nonrot(rdev->bdev);
has_nonrot_disk |= nonrot; has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending); pending = atomic_read(&rdev->nr_pending);
dist = abs(this_sector - conf->mirrors[disk].head_position); dist = abs(this_sector - conf->mirrors[disk].head_position);

View File

@ -803,7 +803,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance) if (!do_balance)
break; break;
nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); nonrot = bdev_nonrot(rdev->bdev);
has_nonrot_disk |= nonrot; has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending); pending = atomic_read(&rdev->nr_pending);
if (min_pending > pending && nonrot) { if (min_pending > pending && nonrot) {

View File

@ -7242,7 +7242,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
if (test_bit(Journal, &rdev->flags)) if (test_bit(Journal, &rdev->flags))
continue; continue;
if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { if (bdev_nonrot(rdev->bdev)) {
conf->batch_bio_dispatch = false; conf->batch_bio_dispatch = false;
break; break;
} }

View File

@ -135,7 +135,6 @@ static int fd_configure_device(struct se_device *dev)
inode = file->f_mapping->host; inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) { if (S_ISBLK(inode->i_mode)) {
struct block_device *bdev = I_BDEV(inode); struct block_device *bdev = I_BDEV(inode);
struct request_queue *q = bdev_get_queue(bdev);
unsigned long long dev_size; unsigned long long dev_size;
fd_dev->fd_block_size = bdev_logical_block_size(bdev); fd_dev->fd_block_size = bdev_logical_block_size(bdev);
@ -160,7 +159,7 @@ static int fd_configure_device(struct se_device *dev)
*/ */
dev->dev_attrib.max_write_same_len = 0xFFFF; dev->dev_attrib.max_write_same_len = 0xFFFF;
if (blk_queue_nonrot(q)) if (bdev_nonrot(bdev))
dev->dev_attrib.is_nonrot = 1; dev->dev_attrib.is_nonrot = 1;
} else { } else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {

View File

@ -133,7 +133,7 @@ static int iblock_configure_device(struct se_device *dev)
else else
dev->dev_attrib.max_write_same_len = 0xFFFF; dev->dev_attrib.max_write_same_len = 0xFFFF;
if (blk_queue_nonrot(q)) if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1; dev->dev_attrib.is_nonrot = 1;
bi = bdev_get_integrity(bd); bi = bdev_get_integrity(bd);

View File

@ -642,7 +642,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
} }
if (!blk_queue_nonrot(bdev_get_queue(bdev))) if (!bdev_nonrot(bdev))
fs_devices->rotating = true; fs_devices->rotating = true;
device->bdev = bdev; device->bdev = bdev;
@ -2705,7 +2705,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
atomic64_add(device->total_bytes, &fs_info->free_chunk_space); atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
if (!blk_queue_nonrot(bdev_get_queue(bdev))) if (!bdev_nonrot(bdev))
fs_devices->rotating = true; fs_devices->rotating = true;
orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);

View File

@ -3498,7 +3498,7 @@ int ext4_mb_init(struct super_block *sb)
spin_lock_init(&lg->lg_prealloc_lock); spin_lock_init(&lg->lg_prealloc_lock);
} }
if (blk_queue_nonrot(bdev_get_queue(sb->s_bdev))) if (bdev_nonrot(sb->s_bdev))
sbi->s_mb_max_linear_groups = 0; sbi->s_mb_max_linear_groups = 0;
else else
sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;

View File

@ -1326,6 +1326,11 @@ static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
return 0; return 0;
} }
static inline bool bdev_nonrot(struct block_device *bdev)
{
return blk_queue_nonrot(bdev_get_queue(bdev));
}
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev) static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);

View File

@ -2466,7 +2466,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
if (p->flags & SWP_CONTINUED) if (p->flags & SWP_CONTINUED)
free_swap_count_continuations(p); free_swap_count_continuations(p);
if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev))) if (!p->bdev || !bdev_nonrot(p->bdev))
atomic_dec(&nr_rotate_swap); atomic_dec(&nr_rotate_swap);
mutex_lock(&swapon_mutex); mutex_lock(&swapon_mutex);
@ -3071,7 +3071,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (p->bdev && p->bdev->bd_disk->fops->rw_page) if (p->bdev && p->bdev->bd_disk->fops->rw_page)
p->flags |= SWP_SYNCHRONOUS_IO; p->flags |= SWP_SYNCHRONOUS_IO;
if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { if (p->bdev && bdev_nonrot(p->bdev)) {
int cpu; int cpu;
unsigned long ci, nr_cluster; unsigned long ci, nr_cluster;