mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
- Fix DM era to commit metadata during suspend using drain_workqueue
instead of flush_workqueue. - Fix DM core's dm_io_complete to not return early if io error is BLK_STS_AGAIN but bio polling is not in use. - Fix DM core's dm_io_complete BLK_STS_DM_REQUEUE handling when dm_io represents a split bio. - Fix recent DM mirror log regression by clearing bits up to BITS_PER_LONG boundary. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmK0uNIACgkQxSPxCi2d A1rFyQf/ajvT4GfNFDfR+U53M8nARpEsIdqpMUlg2Kt+jgIEYK+NA2JfzFEfxy33 Xvh0gZtYZnreQJuKigb8g7ocbdhEdJgHhIZEyTYxG9fwDIoInpZuUFm6qW4w1zzq Eix0oY3h+ORRaO+26UvfhQ7rLEF6c+HmsTg2+snssmJ/L6LGT4MCHvBQ3Ga+Mbve jUimDNITm7Rn2iMZBHYOc2r5ApWhlt8w8U1kfnxF6deQsUHSDhFQGsgPT2KwRKqq AufKGqEdnV+jcrxT2DfRIubI/3AOa6lAQMMT8dP9caQtVSZVo+ZdFLmthLIHvEM9 WM+wKGQ7ffyMQoA/kXPVshNLA2x4wQ== =CnP/ -----END PGP SIGNATURE----- Merge tag 'for-5.19/dm-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - Fix DM era to commit metadata during suspend using drain_workqueue instead of flush_workqueue. - Fix DM core's dm_io_complete to not return early if io error is BLK_STS_AGAIN but bio polling is not in use. - Fix DM core's dm_io_complete BLK_STS_DM_REQUEUE handling when dm_io represents a split bio. - Fix recent DM mirror log regression by clearing bits up to BITS_PER_LONG boundary. * tag 'for-5.19/dm-fixes-4' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm mirror log: clear log bits up to BITS_PER_LONG boundary dm: fix BLK_STS_DM_REQUEUE handling when dm_io represents split bio dm: do not return early from dm_io_complete if BLK_STS_AGAIN without polling dm era: commit metadata in postsuspend after worker stops
This commit is contained in:
commit
cbe232ab07
4 changed files with 19 additions and 7 deletions
|
@ -272,6 +272,7 @@ struct dm_io {
|
|||
atomic_t io_count;
|
||||
struct mapped_device *md;
|
||||
|
||||
struct bio *split_bio;
|
||||
/* The three fields represent mapped part of original bio */
|
||||
struct bio *orig_bio;
|
||||
unsigned int sector_offset; /* offset to end of orig_bio */
|
||||
|
|
|
@ -1400,7 +1400,7 @@ static void start_worker(struct era *era)
|
|||
static void stop_worker(struct era *era)
|
||||
{
|
||||
atomic_set(&era->suspended, 1);
|
||||
flush_workqueue(era->wq);
|
||||
drain_workqueue(era->wq);
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------
|
||||
|
@ -1570,6 +1570,12 @@ static void era_postsuspend(struct dm_target *ti)
|
|||
}
|
||||
|
||||
stop_worker(era);
|
||||
|
||||
r = metadata_commit(era->md);
|
||||
if (r) {
|
||||
DMERR("%s: metadata_commit failed", __func__);
|
||||
/* FIXME: fail mode */
|
||||
}
|
||||
}
|
||||
|
||||
static int era_preresume(struct dm_target *ti)
|
||||
|
|
|
@ -615,7 +615,7 @@ static int disk_resume(struct dm_dirty_log *log)
|
|||
log_clear_bit(lc, lc->clean_bits, i);
|
||||
|
||||
/* clear any old bits -- device has shrunk */
|
||||
for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
|
||||
for (i = lc->region_count; i % BITS_PER_LONG; i++)
|
||||
log_clear_bit(lc, lc->clean_bits, i);
|
||||
|
||||
/* copy clean across to sync */
|
||||
|
|
|
@ -594,6 +594,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
|
|||
atomic_set(&io->io_count, 2);
|
||||
this_cpu_inc(*md->pending_io);
|
||||
io->orig_bio = bio;
|
||||
io->split_bio = NULL;
|
||||
io->md = md;
|
||||
spin_lock_init(&io->lock);
|
||||
io->start_time = jiffies;
|
||||
|
@ -887,7 +888,7 @@ static void dm_io_complete(struct dm_io *io)
|
|||
{
|
||||
blk_status_t io_error;
|
||||
struct mapped_device *md = io->md;
|
||||
struct bio *bio = io->orig_bio;
|
||||
struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio;
|
||||
|
||||
if (io->status == BLK_STS_DM_REQUEUE) {
|
||||
unsigned long flags;
|
||||
|
@ -939,9 +940,11 @@ static void dm_io_complete(struct dm_io *io)
|
|||
if (io_error == BLK_STS_AGAIN) {
|
||||
/* io_uring doesn't handle BLK_STS_AGAIN (yet) */
|
||||
queue_io(md, bio);
|
||||
return;
|
||||
}
|
||||
}
|
||||
return;
|
||||
if (io_error == BLK_STS_DM_REQUEUE)
|
||||
return;
|
||||
}
|
||||
|
||||
if (bio_is_flush_with_data(bio)) {
|
||||
|
@ -1691,9 +1694,11 @@ static void dm_split_and_process_bio(struct mapped_device *md,
|
|||
* Remainder must be passed to submit_bio_noacct() so it gets handled
|
||||
* *after* bios already submitted have been completely processed.
|
||||
*/
|
||||
bio_trim(bio, io->sectors, ci.sector_count);
|
||||
trace_block_split(bio, bio->bi_iter.bi_sector);
|
||||
bio_inc_remaining(bio);
|
||||
WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT));
|
||||
io->split_bio = bio_split(bio, io->sectors, GFP_NOIO,
|
||||
&md->queue->bio_split);
|
||||
bio_chain(io->split_bio, bio);
|
||||
trace_block_split(io->split_bio, bio->bi_iter.bi_sector);
|
||||
submit_bio_noacct(bio);
|
||||
out:
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue