block: pass a block_device to bio_clone_fast

Pass a block_device to bio_clone_fast and __bio_clone_fast and give
the functions more suitable names.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Link: https://lore.kernel.org/r/20220202160109.108149-14-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2022-02-02 17:01:09 +01:00 committed by Jens Axboe
parent a0e8de798d
commit abfc426d1b
20 changed files with 80 additions and 80 deletions

View file

@ -663,11 +663,6 @@ to i/o submission, if the bio fields are likely to be accessed after the
i/o is issued (since the bio may otherwise get freed in case i/o completion i/o is issued (since the bio may otherwise get freed in case i/o completion
happens in the meantime). happens in the meantime).
The bio_clone_fast() routine may be used to duplicate a bio, where the clone
shares the bio_vec_list with the original bio (i.e. both point to the
same bio_vec_list). This would typically be used for splitting i/o requests
in lvm or md.
3.2 Generic bio helper Routines 3.2 Generic bio helper Routines
------------------------------- -------------------------------

View file

@ -733,7 +733,8 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
bio_set_flag(bio, BIO_CLONED); bio_set_flag(bio, BIO_CLONED);
if (bio_flagged(bio_src, BIO_THROTTLED)) if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED); bio_set_flag(bio, BIO_THROTTLED);
if (bio_flagged(bio_src, BIO_REMAPPED)) if (bio->bi_bdev == bio_src->bi_bdev &&
bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED); bio_set_flag(bio, BIO_REMAPPED);
bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_write_hint = bio_src->bi_write_hint;
@ -751,7 +752,8 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
} }
/** /**
* bio_clone_fast - clone a bio that shares the original bio's biovec * bio_alloc_clone - clone a bio that shares the original bio's biovec
* @bdev: block_device to clone onto
* @bio_src: bio to clone from * @bio_src: bio to clone from
* @gfp: allocation priority * @gfp: allocation priority
* @bs: bio_set to allocate from * @bs: bio_set to allocate from
@ -761,11 +763,12 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
* *
* The caller must ensure that the return bio is not freed before @bio_src. * The caller must ensure that the return bio is not freed before @bio_src.
*/ */
struct bio *bio_clone_fast(struct bio *bio_src, gfp_t gfp, struct bio_set *bs) struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
gfp_t gfp, struct bio_set *bs)
{ {
struct bio *bio; struct bio *bio;
bio = bio_alloc_bioset(bio_src->bi_bdev, 0, bio_src->bi_opf, gfp, bs); bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
if (!bio) if (!bio)
return NULL; return NULL;
@ -777,10 +780,11 @@ struct bio *bio_clone_fast(struct bio *bio_src, gfp_t gfp, struct bio_set *bs)
return bio; return bio;
} }
EXPORT_SYMBOL(bio_clone_fast); EXPORT_SYMBOL(bio_alloc_clone);
/** /**
* __bio_clone_fast - clone a bio that shares the original bio's biovec * bio_init_clone - clone a bio that shares the original bio's biovec
* @bdev: block_device to clone onto
* @bio: bio to clone into * @bio: bio to clone into
* @bio_src: bio to clone from * @bio_src: bio to clone from
* @gfp: allocation priority * @gfp: allocation priority
@ -790,17 +794,18 @@ EXPORT_SYMBOL(bio_clone_fast);
* *
* The caller must ensure that @bio_src is not freed before @bio. * The caller must ensure that @bio_src is not freed before @bio.
*/ */
int __bio_clone_fast(struct bio *bio, struct bio *bio_src, gfp_t gfp) int bio_init_clone(struct block_device *bdev, struct bio *bio,
struct bio *bio_src, gfp_t gfp)
{ {
int ret; int ret;
bio_init(bio, bio_src->bi_bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
ret = __bio_clone(bio, bio_src, gfp); ret = __bio_clone(bio, bio_src, gfp);
if (ret) if (ret)
bio_uninit(bio); bio_uninit(bio);
return ret; return ret;
} }
EXPORT_SYMBOL(__bio_clone_fast); EXPORT_SYMBOL(bio_init_clone);
const char *bio_devname(struct bio *bio, char *buf) const char *bio_devname(struct bio *bio, char *buf)
{ {
@ -1572,7 +1577,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
return NULL; return NULL;
split = bio_clone_fast(bio, gfp, bs); split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
if (!split) if (!split)
return NULL; return NULL;
@ -1667,9 +1672,9 @@ EXPORT_SYMBOL(bioset_exit);
* Note that the bio must be embedded at the END of that structure always, * Note that the bio must be embedded at the END of that structure always,
* or things will break badly. * or things will break badly.
* If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
* for allocating iovecs. This pool is not needed e.g. for bio_clone_fast(). * for allocating iovecs. This pool is not needed e.g. for bio_init_clone().
* If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
* dispatch queued requests when the mempool runs out of space. * to dispatch queued requests when the mempool runs out of space.
* *
*/ */
int bioset_init(struct bio_set *bs, int bioset_init(struct bio_set *bs,

View file

@ -2975,10 +2975,10 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
bs = &fs_bio_set; bs = &fs_bio_set;
__rq_for_each_bio(bio_src, rq_src) { __rq_for_each_bio(bio_src, rq_src) {
bio = bio_clone_fast(bio_src, gfp_mask, bs); bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
bs);
if (!bio) if (!bio)
goto free_and_out; goto free_and_out;
bio->bi_bdev = rq->q->disk->part0;
if (bio_ctr && bio_ctr(bio, bio_src, data)) if (bio_ctr && bio_ctr(bio, bio_src, data))
goto free_and_out; goto free_and_out;

View file

@ -162,8 +162,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
* that does not own the bio - reason being drivers don't use it for * that does not own the bio - reason being drivers don't use it for
* iterating over the biovec anymore, so expecting it to be kept up * iterating over the biovec anymore, so expecting it to be kept up
* to date (i.e. for clones that share the parent biovec) is just * to date (i.e. for clones that share the parent biovec) is just
* asking for trouble and would force extra work on * asking for trouble and would force extra work.
* __bio_clone_fast() anyways.
*/ */
bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src), bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src),
bio_src->bi_opf, GFP_NOIO, &bounce_bio_set); bio_src->bi_opf, GFP_NOIO, &bounce_bio_set);

View file

@ -30,8 +30,8 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio
return NULL; return NULL;
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
req->private_bio = bio_clone_fast(bio_src, GFP_NOIO, &drbd_io_bio_set); req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src,
bio_set_dev(req->private_bio, device->ldev->backing_bdev); GFP_NOIO, &drbd_io_bio_set);
req->private_bio->bi_private = req; req->private_bio->bi_private = req;
req->private_bio->bi_end_io = drbd_request_endio; req->private_bio->bi_end_io = drbd_request_endio;

View file

@ -1523,9 +1523,9 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
drbd_al_begin_io(device, &req->i); drbd_al_begin_io(device, &req->i);
req->private_bio = bio_clone_fast(req->master_bio, GFP_NOIO, req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
req->master_bio, GFP_NOIO,
&drbd_io_bio_set); &drbd_io_bio_set);
bio_set_dev(req->private_bio, device->ldev->backing_bdev);
req->private_bio->bi_private = req; req->private_bio->bi_private = req;
req->private_bio->bi_end_io = drbd_request_endio; req->private_bio->bi_end_io = drbd_request_endio;
submit_bio_noacct(req->private_bio); submit_bio_noacct(req->private_bio);

View file

@ -2294,12 +2294,12 @@ static void pkt_end_io_read_cloned(struct bio *bio)
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{ {
struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, &pkt_bio_set); struct bio *cloned_bio =
bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO); struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
psd->pd = pd; psd->pd = pd;
psd->bio = bio; psd->bio = bio;
bio_set_dev(cloned_bio, pd->bdev);
cloned_bio->bi_private = psd; cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned; cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio_sectors(bio); pd->stats.secs_r += bio_sectors(bio);

View file

@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s,
{ {
struct bio *bio = &s->bio.bio; struct bio *bio = &s->bio.bio;
__bio_clone_fast(bio, orig_bio, GFP_NOIO); bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
/* /*
* bi_end_io can be set separately somewhere else, e.g. the * bi_end_io can be set separately somewhere else, e.g. the
* variants in, * variants in,
@ -1036,7 +1036,8 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(s->iop.c, flush, cl); closure_bio_submit(s->iop.c, flush, cl);
} }
} else { } else {
s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split); s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
&dc->disk.bio_split);
/* I/O request sent to backing device */ /* I/O request sent to backing device */
bio->bi_end_io = backing_request_endio; bio->bi_end_io = backing_request_endio;
closure_bio_submit(s->iop.c, bio, cl); closure_bio_submit(s->iop.c, bio, cl);

View file

@ -819,13 +819,13 @@ static void issue_op(struct bio *bio, void *context)
static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio, static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock) dm_oblock_t oblock, dm_cblock_t cblock)
{ {
struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs); struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
GFP_NOIO, &cache->bs);
BUG_ON(!origin_bio); BUG_ON(!origin_bio);
bio_chain(origin_bio, bio); bio_chain(origin_bio, bio);
remap_to_origin(cache, origin_bio);
if (bio_data_dir(origin_bio) == WRITE) if (bio_data_dir(origin_bio) == WRITE)
clear_discard(cache, oblock_to_dblock(cache, oblock)); clear_discard(cache, oblock_to_dblock(cache, oblock));
submit_bio(origin_bio); submit_bio(origin_bio);

View file

@ -1834,17 +1834,16 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
struct bio *clone; struct bio *clone;
/* /*
* We need the original biovec array in order to decrypt * We need the original biovec array in order to decrypt the whole bio
* the whole bio data *afterwards* -- thanks to immutable * data *afterwards* -- thanks to immutable biovecs we don't need to
* biovecs we don't need to worry about the block layer * worry about the block layer modifying the biovec array; so leverage
* modifying the biovec array; so leverage bio_clone_fast(). * bio_alloc_clone().
*/ */
clone = bio_clone_fast(io->base_bio, gfp, &cc->bs); clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
if (!clone) if (!clone)
return 1; return 1;
clone->bi_private = io; clone->bi_private = io;
clone->bi_end_io = crypt_endio; clone->bi_end_io = crypt_endio;
bio_set_dev(clone, cc->dev->bdev);
crypt_inc_pending(io); crypt_inc_pending(io);

View file

@ -125,11 +125,10 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
if (dev->flags & DMZ_BDEV_DYING) if (dev->flags & DMZ_BDEV_DYING)
return -EIO; return -EIO;
clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set);
if (!clone) if (!clone)
return -ENOMEM; return -ENOMEM;
bio_set_dev(clone, dev->bdev);
bioctx->dev = dev; bioctx->dev = dev;
clone->bi_iter.bi_sector = clone->bi_iter.bi_sector =
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);

View file

@ -520,7 +520,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
struct dm_target_io *tio; struct dm_target_io *tio;
struct bio *clone; struct bio *clone;
clone = bio_clone_fast(bio, GFP_NOIO, &md->io_bs); clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs);
tio = clone_to_tio(clone); tio = clone_to_tio(clone);
tio->inside_dm_io = true; tio->inside_dm_io = true;
@ -553,8 +553,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
/* the dm_target_io embedded in ci->io is available */ /* the dm_target_io embedded in ci->io is available */
tio = &ci->io->tio; tio = &ci->io->tio;
} else { } else {
struct bio *clone = bio_clone_fast(ci->bio, gfp_mask, struct bio *clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio,
&ci->io->md->bs); gfp_mask, &ci->io->md->bs);
if (!clone) if (!clone)
return NULL; return NULL;

View file

@ -205,9 +205,9 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
} }
} }
if (failit) { if (failit) {
struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO,
&mddev->bio_set);
bio_set_dev(b, conf->rdev->bdev);
b->bi_private = bio; b->bi_private = bio;
b->bi_end_io = faulty_fail; b->bi_end_io = faulty_fail;
bio = b; bio = b;

View file

@ -121,10 +121,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
} }
multipath = conf->multipaths + mp_bh->path; multipath = conf->multipaths + mp_bh->path;
__bio_clone_fast(&mp_bh->bio, bio, GFP_NOIO); bio_init_clone(multipath->rdev->bdev, &mp_bh->bio, bio, GFP_NOIO);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh; mp_bh->bio.bi_private = mp_bh;

View file

@ -8634,13 +8634,14 @@ static void md_end_io_acct(struct bio *bio)
*/ */
void md_account_bio(struct mddev *mddev, struct bio **bio) void md_account_bio(struct mddev *mddev, struct bio **bio)
{ {
struct block_device *bdev = (*bio)->bi_bdev;
struct md_io_acct *md_io_acct; struct md_io_acct *md_io_acct;
struct bio *clone; struct bio *clone;
if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue)) if (!blk_queue_io_stat(bdev->bd_disk->queue))
return; return;
clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set); clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set);
md_io_acct = container_of(clone, struct md_io_acct, bio_clone); md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
md_io_acct->orig_bio = *bio; md_io_acct->orig_bio = *bio;
md_io_acct->start_time = bio_start_io_acct(*bio); md_io_acct->start_time = bio_start_io_acct(*bio);

View file

@ -1320,13 +1320,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
r1_bio->start_time = bio_start_io_acct(bio); r1_bio->start_time = bio_start_io_acct(bio);
read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
&mddev->bio_set);
r1_bio->bios[rdisk] = read_bio; r1_bio->bios[rdisk] = read_bio;
read_bio->bi_iter.bi_sector = r1_bio->sector + read_bio->bi_iter.bi_sector = r1_bio->sector +
mirror->rdev->data_offset; mirror->rdev->data_offset;
bio_set_dev(read_bio, mirror->rdev->bdev);
read_bio->bi_end_io = raid1_end_read_request; read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync); bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &mirror->rdev->flags) && if (test_bit(FailFast, &mirror->rdev->flags) &&
@ -1546,24 +1546,25 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
first_clone = 0; first_clone = 0;
} }
if (r1_bio->behind_master_bio)
mbio = bio_clone_fast(r1_bio->behind_master_bio,
GFP_NOIO, &mddev->bio_set);
else
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
if (r1_bio->behind_master_bio) { if (r1_bio->behind_master_bio) {
mbio = bio_alloc_clone(rdev->bdev,
r1_bio->behind_master_bio,
GFP_NOIO, &mddev->bio_set);
if (test_bit(CollisionCheck, &rdev->flags)) if (test_bit(CollisionCheck, &rdev->flags))
wait_for_serialization(rdev, r1_bio); wait_for_serialization(rdev, r1_bio);
if (test_bit(WriteMostly, &rdev->flags)) if (test_bit(WriteMostly, &rdev->flags))
atomic_inc(&r1_bio->behind_remaining); atomic_inc(&r1_bio->behind_remaining);
} else if (mddev->serialize_policy) } else {
wait_for_serialization(rdev, r1_bio); mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
&mddev->bio_set);
if (mddev->serialize_policy)
wait_for_serialization(rdev, r1_bio);
}
r1_bio->bios[i] = mbio; r1_bio->bios[i] = mbio;
mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset); mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid1_end_write_request; mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
if (test_bit(FailFast, &rdev->flags) && if (test_bit(FailFast, &rdev->flags) &&
@ -2416,12 +2417,12 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
/* Write at 'sector' for 'sectors'*/ /* Write at 'sector' for 'sectors'*/
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
wbio = bio_clone_fast(r1_bio->behind_master_bio, wbio = bio_alloc_clone(rdev->bdev,
GFP_NOIO, r1_bio->behind_master_bio,
&mddev->bio_set); GFP_NOIO, &mddev->bio_set);
} else { } else {
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
&mddev->bio_set); GFP_NOIO, &mddev->bio_set);
} }
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
@ -2430,7 +2431,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
bio_trim(wbio, sector - r1_bio->sector, sectors); bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset; wbio->bi_iter.bi_sector += rdev->data_offset;
bio_set_dev(wbio, rdev->bdev);
if (submit_bio_wait(wbio) < 0) if (submit_bio_wait(wbio) < 0)
/* failure! */ /* failure! */

View file

@ -1208,14 +1208,13 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue)) if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
r10_bio->start_time = bio_start_io_acct(bio); r10_bio->start_time = bio_start_io_acct(bio);
read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
r10_bio->devs[slot].bio = read_bio; r10_bio->devs[slot].bio = read_bio;
r10_bio->devs[slot].rdev = rdev; r10_bio->devs[slot].rdev = rdev;
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev); choose_data_offset(r10_bio, rdev);
bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_end_io = raid10_end_read_request; read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync); bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &rdev->flags) && if (test_bit(FailFast, &rdev->flags) &&
@ -1255,7 +1254,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
} else } else
rdev = conf->mirrors[devnum].rdev; rdev = conf->mirrors[devnum].rdev;
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
if (replacement) if (replacement)
r10_bio->devs[n_copy].repl_bio = mbio; r10_bio->devs[n_copy].repl_bio = mbio;
else else
@ -1263,7 +1262,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
choose_data_offset(r10_bio, rdev)); choose_data_offset(r10_bio, rdev));
bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid10_end_write_request; mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua); bio_set_op_attrs(mbio, op, do_sync | do_fua);
if (!replacement && test_bit(FailFast, if (!replacement && test_bit(FailFast,
@ -1812,7 +1810,8 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
*/ */
if (r10_bio->devs[disk].bio) { if (r10_bio->devs[disk].bio) {
struct md_rdev *rdev = conf->mirrors[disk].rdev; struct md_rdev *rdev = conf->mirrors[disk].rdev;
mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
&mddev->bio_set);
mbio->bi_end_io = raid10_end_discard_request; mbio->bi_end_io = raid10_end_discard_request;
mbio->bi_private = r10_bio; mbio->bi_private = r10_bio;
r10_bio->devs[disk].bio = mbio; r10_bio->devs[disk].bio = mbio;
@ -1825,7 +1824,8 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
} }
if (r10_bio->devs[disk].repl_bio) { if (r10_bio->devs[disk].repl_bio) {
struct md_rdev *rrdev = conf->mirrors[disk].replacement; struct md_rdev *rrdev = conf->mirrors[disk].replacement;
rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
&mddev->bio_set);
rbio->bi_end_io = raid10_end_discard_request; rbio->bi_end_io = raid10_end_discard_request;
rbio->bi_private = r10_bio; rbio->bi_private = r10_bio;
r10_bio->devs[disk].repl_bio = rbio; r10_bio->devs[disk].repl_bio = rbio;
@ -2892,12 +2892,12 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
if (sectors > sect_to_write) if (sectors > sect_to_write)
sectors = sect_to_write; sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */ /* Write at 'sector' for 'sectors' */
wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
&mddev->bio_set);
bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
wbio->bi_iter.bi_sector = wsector + wbio->bi_iter.bi_sector = wsector +
choose_data_offset(r10_bio, rdev); choose_data_offset(r10_bio, rdev);
bio_set_dev(wbio, rdev->bdev);
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (submit_bio_wait(wbio) < 0) if (submit_bio_wait(wbio) < 0)

View file

@ -5438,14 +5438,14 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
return 0; return 0;
} }
align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->io_acct_set); align_bio = bio_alloc_clone(rdev->bdev, raid_bio, GFP_NOIO,
&mddev->io_acct_set);
md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone); md_io_acct = container_of(align_bio, struct md_io_acct, bio_clone);
raid_bio->bi_next = (void *)rdev; raid_bio->bi_next = (void *)rdev;
if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue)) if (blk_queue_io_stat(raid_bio->bi_bdev->bd_disk->queue))
md_io_acct->start_time = bio_start_io_acct(raid_bio); md_io_acct->start_time = bio_start_io_acct(raid_bio);
md_io_acct->orig_bio = raid_bio; md_io_acct->orig_bio = raid_bio;
bio_set_dev(align_bio, rdev->bdev);
align_bio->bi_end_io = raid5_align_endio; align_bio->bi_end_io = raid5_align_endio;
align_bio->bi_private = md_io_acct; align_bio->bi_private = md_io_acct;
align_bio->bi_iter.bi_sector = sector; align_bio->bi_iter.bi_sector = sector;

View file

@ -3154,7 +3154,7 @@ struct bio *btrfs_bio_clone(struct bio *bio)
struct bio *new; struct bio *new;
/* Bio allocation backed by a bioset does not fail */ /* Bio allocation backed by a bioset does not fail */
new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset); new = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOFS, &btrfs_bioset);
bbio = btrfs_bio(new); bbio = btrfs_bio(new);
btrfs_bio_init(bbio); btrfs_bio_init(bbio);
bbio->iter = bio->bi_iter; bbio->iter = bio->bi_iter;
@ -3169,7 +3169,7 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
ASSERT(offset <= UINT_MAX && size <= UINT_MAX); ASSERT(offset <= UINT_MAX && size <= UINT_MAX);
/* this will never fail when it's backed by a bioset */ /* this will never fail when it's backed by a bioset */
bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset); bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
ASSERT(bio); ASSERT(bio);
bbio = btrfs_bio(bio); bbio = btrfs_bio(bio);

View file

@ -413,8 +413,10 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
extern void bio_put(struct bio *); extern void bio_put(struct bio *);
int __bio_clone_fast(struct bio *bio, struct bio *bio_src, gfp_t gfp); struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); gfp_t gfp, struct bio_set *bs);
int bio_init_clone(struct block_device *bdev, struct bio *bio,
struct bio *bio_src, gfp_t gfp);
extern struct bio_set fs_bio_set; extern struct bio_set fs_bio_set;