block: use bdev_is_zoned instead of open coding it

Use bdev_is_zoned in all places where a block_device is available instead
of open coding it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20220706070350.1703384-4-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2022-07-06 09:03:37 +02:00 committed by Jens Axboe
parent 6cc37a672a
commit edd1dbc83b
7 changed files with 12 additions and 13 deletions

View file

@ -1033,7 +1033,7 @@ int bio_add_zone_append_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
return 0; return 0;
if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
return 0; return 0;
return bio_add_hw_page(q, bio, page, len, offset, return bio_add_hw_page(q, bio, page, len, offset,

View file

@ -569,7 +569,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
int nr_sectors = bio_sectors(bio); int nr_sectors = bio_sectors(bio);
/* Only applicable to zoned block devices */ /* Only applicable to zoned block devices */
if (!blk_queue_is_zoned(q)) if (!bdev_is_zoned(bio->bi_bdev))
return BLK_STS_NOTSUPP; return BLK_STS_NOTSUPP;
/* The bio sector must point to the start of a sequential zone */ /* The bio sector must point to the start of a sequential zone */
@ -775,11 +775,11 @@ void submit_bio_noacct(struct bio *bio)
case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_FINISH:
if (!blk_queue_is_zoned(q)) if (!bdev_is_zoned(bio->bi_bdev))
goto not_supported; goto not_supported;
break; break;
case REQ_OP_ZONE_RESET_ALL: case REQ_OP_ZONE_RESET_ALL:
if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q)) if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
goto not_supported; goto not_supported;
break; break;
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:

View file

@ -317,7 +317,7 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
* For regular block devices or read operations, use the context plug * For regular block devices or read operations, use the context plug
* which may be NULL if blk_start_plug() was not executed. * which may be NULL if blk_start_plug() was not executed.
*/ */
if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio))) if (!bdev_is_zoned(bio->bi_bdev) || !op_is_write(bio_op(bio)))
return current->plug; return current->plug;
/* Zoned block device write operation case: do not plug the BIO */ /* Zoned block device write operation case: do not plug the BIO */

View file

@ -149,8 +149,7 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
struct gendisk *disk = bdev->bd_disk; struct gendisk *disk = bdev->bd_disk;
sector_t capacity = get_capacity(disk); sector_t capacity = get_capacity(disk);
if (!blk_queue_is_zoned(bdev_get_queue(bdev)) || if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
WARN_ON_ONCE(!disk->fops->report_zones))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!nr_zones || sector >= capacity) if (!nr_zones || sector >= capacity)
@ -268,7 +267,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
struct bio *bio = NULL; struct bio *bio = NULL;
int ret = 0; int ret = 0;
if (!blk_queue_is_zoned(q)) if (!bdev_is_zoned(bdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (bdev_read_only(bdev)) if (bdev_read_only(bdev))
@ -350,7 +349,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (!q) if (!q)
return -ENXIO; return -ENXIO;
if (!blk_queue_is_zoned(q)) if (!bdev_is_zoned(bdev))
return -ENOTTY; return -ENOTTY;
if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
@ -408,7 +407,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
if (!q) if (!q)
return -ENXIO; return -ENXIO;
if (!blk_queue_is_zoned(q)) if (!bdev_is_zoned(bdev))
return -ENOTTY; return -ENOTTY;
if (!(mode & FMODE_WRITE)) if (!(mode & FMODE_WRITE))

View file

@ -1623,7 +1623,7 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
struct request_queue *q = bdev_get_queue(dev->bdev); struct request_queue *q = bdev_get_queue(dev->bdev);
unsigned int *zone_sectors = data; unsigned int *zone_sectors = data;
if (!blk_queue_is_zoned(q)) if (!bdev_is_zoned(dev->bdev))
return 0; return 0;
return blk_queue_zone_sectors(q) != *zone_sectors; return blk_queue_zone_sectors(q) != *zone_sectors;

View file

@ -270,7 +270,7 @@ static int device_not_zone_append_capable(struct dm_target *ti,
struct dm_dev *dev, sector_t start, struct dm_dev *dev, sector_t start,
sector_t len, void *data) sector_t len, void *data)
{ {
return !blk_queue_is_zoned(bdev_get_queue(dev->bdev)); return !bdev_is_zoned(dev->bdev);
} }
static bool dm_table_supports_zone_append(struct dm_table *t) static bool dm_table_supports_zone_append(struct dm_table *t)

View file

@ -1033,7 +1033,7 @@ static void clone_endio(struct bio *bio)
} }
if (static_branch_unlikely(&zoned_enabled) && if (static_branch_unlikely(&zoned_enabled) &&
unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev)))) unlikely(bdev_is_zoned(bio->bi_bdev)))
dm_zone_endio(io, bio); dm_zone_endio(io, bio);
if (endio) { if (endio) {