mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
block: add bio_truncate to fix guard_bio_eod
Some filesystem, such as vfat, may send bio which crosses device boundary, and the worse thing is that the IO request starting within device boundaries can contain more than one segment past EOD. Commitdce30ca9e3
("fs: fix guard_bio_eod to check for real EOD errors") tries to fix this issue by returning -EIO for this situation. However, this way lets fs user code lose chance to handle -EIO, then sync_inodes_sb() may hang for ever. Also the current truncating on last segment is dangerous by updating the last bvec, given bvec table becomes not immutable any more, and fs bio users may not retrieve the truncated pages via bio_for_each_segment_all() in its .end_io callback. Fixes this issue by supporting multi-segment truncating. And the approach is simpler: - just update bio size since block layer can make correct bvec with the updated bio size. Then bvec table becomes really immutable. - zero all truncated segments for read bio Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: linux-fsdevel@vger.kernel.org Fixed-by:dce30ca9e3
("fs: fix guard_bio_eod to check for real EOD errors") Reported-by: syzbot+2b9e54155c8c25d8d165@syzkaller.appspotmail.com Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b2c0fcd287
commit
85a8ce62c2
3 changed files with 41 additions and 24 deletions
39
block/bio.c
39
block/bio.c
|
@ -538,6 +538,45 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
|
|||
}
|
||||
EXPORT_SYMBOL(zero_fill_bio_iter);
|
||||
|
||||
void bio_truncate(struct bio *bio, unsigned new_size)
|
||||
{
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
unsigned int done = 0;
|
||||
bool truncated = false;
|
||||
|
||||
if (new_size >= bio->bi_iter.bi_size)
|
||||
return;
|
||||
|
||||
if (bio_data_dir(bio) != READ)
|
||||
goto exit;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
if (done + bv.bv_len > new_size) {
|
||||
unsigned offset;
|
||||
|
||||
if (!truncated)
|
||||
offset = new_size - done;
|
||||
else
|
||||
offset = 0;
|
||||
zero_user(bv.bv_page, offset, bv.bv_len - offset);
|
||||
truncated = true;
|
||||
}
|
||||
done += bv.bv_len;
|
||||
}
|
||||
|
||||
exit:
|
||||
/*
|
||||
* Don't touch bvec table here and make it really immutable, since
|
||||
* fs bio user has to retrieve all pages via bio_for_each_segment_all
|
||||
* in its .end_bio() callback.
|
||||
*
|
||||
* It is enough to truncate bio by updating .bi_size since we can make
|
||||
* correct bvec with the updated .bi_size for drivers.
|
||||
*/
|
||||
bio->bi_iter.bi_size = new_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_put - release a reference to a bio
|
||||
* @bio: bio to release reference to
|
||||
|
|
25
fs/buffer.c
25
fs/buffer.c
|
@ -3034,8 +3034,6 @@ static void end_bio_bh_io_sync(struct bio *bio)
|
|||
void guard_bio_eod(int op, struct bio *bio)
|
||||
{
|
||||
sector_t maxsector;
|
||||
struct bio_vec *bvec = bio_last_bvec_all(bio);
|
||||
unsigned truncated_bytes;
|
||||
struct hd_struct *part;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -3061,28 +3059,7 @@ void guard_bio_eod(int op, struct bio *bio)
|
|||
if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
|
||||
return;
|
||||
|
||||
/* Uhhuh. We've got a bio that straddles the device size! */
|
||||
truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
|
||||
|
||||
/*
|
||||
* The bio contains more than one segment which spans EOD, just return
|
||||
* and let IO layer turn it into an EIO
|
||||
*/
|
||||
if (truncated_bytes > bvec->bv_len)
|
||||
return;
|
||||
|
||||
/* Truncate the bio.. */
|
||||
bio->bi_iter.bi_size -= truncated_bytes;
|
||||
bvec->bv_len -= truncated_bytes;
|
||||
|
||||
/* ..and clear the end of the buffer for reads */
|
||||
if (op == REQ_OP_READ) {
|
||||
struct bio_vec bv;
|
||||
|
||||
mp_bvec_last_segment(bvec, &bv);
|
||||
zero_user(bv.bv_page, bv.bv_offset + bv.bv_len,
|
||||
truncated_bytes);
|
||||
}
|
||||
bio_truncate(bio, maxsector << 9);
|
||||
}
|
||||
|
||||
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
|
||||
|
|
|
@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
|
|||
gfp_t);
|
||||
extern int bio_uncopy_user(struct bio *);
|
||||
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
|
||||
void bio_truncate(struct bio *bio, unsigned new_size);
|
||||
|
||||
static inline void zero_fill_bio(struct bio *bio)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue