zram: take device and not only bvec offset into account

commit 95848dcb9d upstream.

Commit af8b04c637 ("zram: simplify bvec iteration in
__zram_make_request") changed the bio iteration in zram to rely on the
implicit capping to page boundaries in bio_for_each_segment.  But it
failed to care for the fact zram not only care about the page alignment
of the bio payload, but also the page alignment into the device.  For
buffered I/O and swap those are the same, but for direct I/O or kernel
internal I/O like XFS log buffer writes they can differ.

Fix this by open coding bio_for_each_segment and limiting the bvec len
so that it never crosses over a page alignment boundary in the device
in addition to the payload boundary already taken care of by
bio_iter_iovec.

Cc: stable@vger.kernel.org
Fixes: af8b04c637 ("zram: simplify bvec iteration in __zram_make_request")
Reported-by: Dusty Mabe <dusty@dustymabe.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Link: https://lore.kernel.org/r/20230805055537.147835-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Christoph Hellwig 2023-08-05 07:55:37 +02:00 committed by Greg Kroah-Hartman
parent 0f70a470e2
commit 729881aeec
1 changed files with 20 additions and 12 deletions

View File

@ -1870,15 +1870,16 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
static void zram_bio_read(struct zram *zram, struct bio *bio)
{
struct bvec_iter iter;
struct bio_vec bv;
unsigned long start_time;
unsigned long start_time = bio_start_io_acct(bio);
struct bvec_iter iter = bio->bi_iter;
start_time = bio_start_io_acct(bio);
bio_for_each_segment(bv, bio, iter) {
do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
struct bio_vec bv = bio_iter_iovec(bio, iter);
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_reads);
@ -1890,22 +1891,26 @@ static void zram_bio_read(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
}
bio_advance_iter_single(bio, &iter, bv.bv_len);
} while (iter.bi_size);
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
static void zram_bio_write(struct zram *zram, struct bio *bio)
{
struct bvec_iter iter;
struct bio_vec bv;
unsigned long start_time;
unsigned long start_time = bio_start_io_acct(bio);
struct bvec_iter iter = bio->bi_iter;
start_time = bio_start_io_acct(bio);
bio_for_each_segment(bv, bio, iter) {
do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
struct bio_vec bv = bio_iter_iovec(bio, iter);
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_writes);
@ -1916,7 +1921,10 @@ static void zram_bio_write(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
}
bio_advance_iter_single(bio, &iter, bv.bv_len);
} while (iter.bi_size);
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}