block: make dma_alignment a stacking queue_limit
[ Upstream commitc964d62f5c
] Device mappers had always been getting the default 511 dma mask, but the underlying device might have a larger alignment requirement. Since this value is used to determine alloweable direct-io alignment, this needs to be a stackable limit. Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20221110184501.2451620-2-kbusch@meta.com Signed-off-by: Jens Axboe <axboe@kernel.dk> Stable-dep-of:86e4d3e8d1
("dm-crypt: provide dma_alignment limit in io_hints") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
207edad571
commit
7bcb1a45b8
|
@ -426,7 +426,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
|
||||||
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
|
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
|
||||||
goto fail_stats;
|
goto fail_stats;
|
||||||
|
|
||||||
blk_queue_dma_alignment(q, 511);
|
|
||||||
blk_set_default_limits(&q->limits);
|
blk_set_default_limits(&q->limits);
|
||||||
q->nr_requests = BLKDEV_DEFAULT_RQ;
|
q->nr_requests = BLKDEV_DEFAULT_RQ;
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,7 @@ void blk_set_default_limits(struct queue_limits *lim)
|
||||||
lim->misaligned = 0;
|
lim->misaligned = 0;
|
||||||
lim->zoned = BLK_ZONED_NONE;
|
lim->zoned = BLK_ZONED_NONE;
|
||||||
lim->zone_write_granularity = 0;
|
lim->zone_write_granularity = 0;
|
||||||
|
lim->dma_alignment = 511;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_set_default_limits);
|
EXPORT_SYMBOL(blk_set_default_limits);
|
||||||
|
|
||||||
|
@ -600,6 +601,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||||
|
|
||||||
t->io_min = max(t->io_min, b->io_min);
|
t->io_min = max(t->io_min, b->io_min);
|
||||||
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
|
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
|
||||||
|
t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
|
||||||
|
|
||||||
/* Set non-power-of-2 compatible chunk_sectors boundary */
|
/* Set non-power-of-2 compatible chunk_sectors boundary */
|
||||||
if (b->chunk_sectors)
|
if (b->chunk_sectors)
|
||||||
|
@ -773,7 +775,7 @@ EXPORT_SYMBOL(blk_queue_virt_boundary);
|
||||||
**/
|
**/
|
||||||
void blk_queue_dma_alignment(struct request_queue *q, int mask)
|
void blk_queue_dma_alignment(struct request_queue *q, int mask)
|
||||||
{
|
{
|
||||||
q->dma_alignment = mask;
|
q->limits.dma_alignment = mask;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_dma_alignment);
|
EXPORT_SYMBOL(blk_queue_dma_alignment);
|
||||||
|
|
||||||
|
@ -795,8 +797,8 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
|
||||||
{
|
{
|
||||||
BUG_ON(mask > PAGE_SIZE);
|
BUG_ON(mask > PAGE_SIZE);
|
||||||
|
|
||||||
if (mask > q->dma_alignment)
|
if (mask > q->limits.dma_alignment)
|
||||||
q->dma_alignment = mask;
|
q->limits.dma_alignment = mask;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
||||||
|
|
||||||
|
|
|
@ -311,6 +311,13 @@ struct queue_limits {
|
||||||
unsigned char discard_misaligned;
|
unsigned char discard_misaligned;
|
||||||
unsigned char raid_partial_stripes_expensive;
|
unsigned char raid_partial_stripes_expensive;
|
||||||
enum blk_zoned_model zoned;
|
enum blk_zoned_model zoned;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Drivers that set dma_alignment to less than 511 must be prepared to
|
||||||
|
* handle individual bvec's that are not a multiple of a SECTOR_SIZE
|
||||||
|
* due to possible offsets.
|
||||||
|
*/
|
||||||
|
unsigned int dma_alignment;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
|
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
|
||||||
|
@ -456,12 +463,6 @@ struct request_queue {
|
||||||
unsigned long nr_requests; /* Max # of requests */
|
unsigned long nr_requests; /* Max # of requests */
|
||||||
|
|
||||||
unsigned int dma_pad_mask;
|
unsigned int dma_pad_mask;
|
||||||
/*
|
|
||||||
* Drivers that set dma_alignment to less than 511 must be prepared to
|
|
||||||
* handle individual bvec's that are not a multiple of a SECTOR_SIZE
|
|
||||||
* due to possible offsets.
|
|
||||||
*/
|
|
||||||
unsigned int dma_alignment;
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
||||||
struct blk_crypto_profile *crypto_profile;
|
struct blk_crypto_profile *crypto_profile;
|
||||||
|
@ -1311,7 +1312,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
|
||||||
|
|
||||||
static inline int queue_dma_alignment(const struct request_queue *q)
|
static inline int queue_dma_alignment(const struct request_queue *q)
|
||||||
{
|
{
|
||||||
return q ? q->dma_alignment : 511;
|
return q ? q->limits.dma_alignment : 511;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
|
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
|
||||||
|
|
Loading…
Reference in New Issue