bcachefs: Kill direct access to bi_io_vec

Switch to always using bio_add_page(), which merges contiguous pages now
that we have multipage bvecs.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2019-07-03 19:27:42 -04:00 committed by Kent Overstreet
parent 738540f7fc
commit 885678f68d
11 changed files with 58 additions and 125 deletions

View File

@ -1037,10 +1037,9 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
rb->pick = pick;
INIT_WORK(&rb->work, btree_node_read_work);
bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_iter.bi_size = btree_bytes(c);
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = b;
bch2_bio_map(bio, b->data);
bch2_bio_map(bio, b->data, btree_bytes(c));
set_btree_node_read_in_flight(b);
@ -1502,11 +1501,10 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
wbio->data = data;
wbio->wbio.order = order;
wbio->wbio.used_mempool = used_mempool;
wbio->wbio.bio.bi_iter.bi_size = sectors_to_write << 9;
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
wbio->wbio.bio.bi_private = b;
bch2_bio_map(&wbio->wbio.bio, data);
bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
/*
* If we're appending to a leaf node, we don't technically need FUA -

View File

@ -244,7 +244,16 @@ int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
* might have to free existing pages and retry allocation from mempool -
* do this _after_ decompressing:
*/
bch2_bio_alloc_more_pages_pool(c, bio, crc->live_size << 9);
if (bio->bi_iter.bi_size < crc->live_size << 9) {
if (bch2_bio_alloc_pages(bio, (crc->live_size << 9) -
bio->bi_iter.bi_size,
GFP_NOFS)) {
bch2_bio_free_pages_pool(c, bio);
bio->bi_iter.bi_size = 0;
bio->bi_vcnt = 0;
bch2_bio_alloc_pages_pool(c, bio, crc->live_size << 9);
}
}
memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9));

View File

@ -70,8 +70,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
GFP_NOIO,
&c->btree_bio);
bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_iter.bi_size = btree_bytes(c);
bch2_bio_map(bio, n_sorted);
bch2_bio_map(bio, n_sorted, btree_bytes(c));
submit_bio_wait(bio);

View File

@ -399,11 +399,10 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
ec_bio->idx = idx;
ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
ec_bio->bio.bi_iter.bi_size = b;
ec_bio->bio.bi_end_io = ec_block_endio;
ec_bio->bio.bi_private = cl;
bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset);
bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
closure_get(cl);
percpu_ref_get(&ca->io_ref);

View File

@ -775,7 +775,7 @@ static int bio_add_page_contig(struct bio *bio, struct page *page)
else if (!bio_can_add_page_contig(bio, page))
return -1;
__bio_add_page(bio, page, PAGE_SIZE, 0);
BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
return 0;
}
@ -913,7 +913,7 @@ static void readpage_bio_extend(struct readpages_iter *iter,
put_page(page);
}
__bio_add_page(bio, page, PAGE_SIZE, 0);
BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
}
}
@ -1025,7 +1025,7 @@ void bch2_readahead(struct readahead_control *ractl)
rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
rbio->bio.bi_end_io = bch2_readpages_end_io;
__bio_add_page(&rbio->bio, page, PAGE_SIZE, 0);
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
bchfs_read(&trans, iter, rbio, inode->v.i_ino,
&readpages_iter);

View File

@ -141,14 +141,13 @@ void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
bio->bi_vcnt = 0;
}
static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
bool *using_mempool)
static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
{
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
struct page *page;
if (likely(!*using_mempool)) {
bv->bv_page = alloc_page(GFP_NOIO);
if (unlikely(!bv->bv_page)) {
page = alloc_page(GFP_NOIO);
if (unlikely(!page)) {
mutex_lock(&c->bio_bounce_pages_lock);
*using_mempool = true;
goto pool_alloc;
@ -156,57 +155,29 @@ static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
}
} else {
pool_alloc:
bv->bv_page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO);
}
bv->bv_len = PAGE_SIZE;
bv->bv_offset = 0;
return page;
}
void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
size_t bytes)
size_t size)
{
bool using_mempool = false;
BUG_ON(DIV_ROUND_UP(bytes, PAGE_SIZE) > bio->bi_max_vecs);
while (size) {
struct page *page = __bio_alloc_page_pool(c, &using_mempool);
unsigned len = min(PAGE_SIZE, size);
bio->bi_iter.bi_size = bytes;
while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
bch2_bio_alloc_page_pool(c, bio, &using_mempool);
BUG_ON(!bio_add_page(bio, page, len, 0));
size -= len;
}
if (using_mempool)
mutex_unlock(&c->bio_bounce_pages_lock);
}
void bch2_bio_alloc_more_pages_pool(struct bch_fs *c, struct bio *bio,
size_t bytes)
{
while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
bv->bv_page = alloc_page(GFP_NOIO);
if (!bv->bv_page) {
/*
* We already allocated from mempool, we can't allocate from it again
* without freeing the pages we already allocated or else we could
* deadlock:
*/
bch2_bio_free_pages_pool(c, bio);
bch2_bio_alloc_pages_pool(c, bio, bytes);
return;
}
bv->bv_len = PAGE_SIZE;
bv->bv_offset = 0;
bio->bi_vcnt++;
}
bio->bi_iter.bi_size = bytes;
}
/* Writes */
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
@ -491,8 +462,7 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
wbio->bio.bi_opf = src->bi_opf;
if (buf) {
bio->bi_iter.bi_size = output_available;
bch2_bio_map(bio, buf);
bch2_bio_map(bio, buf, output_available);
return bio;
}
@ -502,31 +472,17 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
* We can't use mempool for more than c->sb.encoded_extent_max
* worth of pages, but we'd like to allocate more if we can:
*/
while (bio->bi_iter.bi_size < output_available) {
unsigned len = min_t(unsigned, PAGE_SIZE,
output_available - bio->bi_iter.bi_size);
struct page *p;
bch2_bio_alloc_pages_pool(c, bio,
min_t(unsigned, output_available,
c->sb.encoded_extent_max << 9));
p = alloc_page(GFP_NOIO);
if (!p) {
unsigned pool_max =
min_t(unsigned, output_available,
c->sb.encoded_extent_max << 9);
if (bio->bi_iter.bi_size < output_available)
*page_alloc_failed =
bch2_bio_alloc_pages(bio,
output_available -
bio->bi_iter.bi_size,
GFP_NOFS) != 0;
if (bio_sectors(bio) < pool_max)
bch2_bio_alloc_pages_pool(c, bio, pool_max);
break;
}
bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) {
.bv_page = p,
.bv_len = len,
.bv_offset = 0,
};
bio->bi_iter.bi_size += len;
}
*page_alloc_failed = bio->bi_vcnt < pages;
return bio;
}
@ -830,12 +786,6 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp)
}
dst->bi_iter.bi_size = total_output;
/* Free unneeded pages after compressing: */
if (to_wbio(dst)->bounce)
while (dst->bi_vcnt > DIV_ROUND_UP(dst->bi_iter.bi_size, PAGE_SIZE))
mempool_free(dst->bi_io_vec[--dst->bi_vcnt].bv_page,
&c->bio_bounce_pages);
do_write:
/* might have done a realloc... */

View File

@ -13,7 +13,6 @@
void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
void bch2_bio_alloc_more_pages_pool(struct bch_fs *, struct bio *, size_t);
#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
void bch2_latency_acct(struct bch_dev *, u64, int);

View File

@ -494,9 +494,8 @@ reread:
bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
bio->bi_iter.bi_sector = offset;
bio->bi_iter.bi_size = sectors_read << 9;
bch2_bio_map(bio, buf->data);
bio->bi_iter.bi_sector = offset;
bch2_bio_map(bio, buf->data, sectors_read << 9);
ret = submit_bio_wait(bio);
kfree(bio);
@ -1086,10 +1085,9 @@ void bch2_journal_write(struct closure *cl)
bio_reset(bio, ca->disk_sb.bdev,
REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
bio->bi_iter.bi_sector = ptr->offset;
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = ca;
bch2_bio_map(bio, jset);
bch2_bio_map(bio, jset, sectors << 9);
trace_journal_write(bio);
closure_bio_submit(bio, cl);

View File

@ -476,8 +476,7 @@ static const char *read_one_super(struct bch_sb_handle *sb, u64 offset)
reread:
bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
sb->bio->bi_iter.bi_sector = offset;
sb->bio->bi_iter.bi_size = PAGE_SIZE << sb->page_order;
bch2_bio_map(sb->bio, sb->sb);
bch2_bio_map(sb->bio, sb->sb, PAGE_SIZE << sb->page_order);
if (submit_bio_wait(sb->bio))
return "IO error";
@ -582,12 +581,11 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
*/
bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
sb->bio->bi_iter.bi_size = sizeof(struct bch_sb_layout);
/*
* use sb buffer to read layout, since sb buffer is page aligned but
* layout won't be:
*/
bch2_bio_map(sb->bio, sb->sb);
bch2_bio_map(sb->bio, sb->sb, sizeof(struct bch_sb_layout));
err = "IO error";
if (submit_bio_wait(sb->bio))
@ -653,10 +651,9 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]);
bio->bi_iter.bi_size = PAGE_SIZE;
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
bch2_bio_map(bio, ca->sb_read_scratch);
bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_SB],
bio_sectors(bio));
@ -678,12 +675,11 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
bio->bi_iter.bi_size =
roundup((size_t) vstruct_bytes(sb),
bdev_logical_block_size(ca->disk_sb.bdev));
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
bch2_bio_map(bio, sb);
bch2_bio_map(bio, sb,
roundup((size_t) vstruct_bytes(sb),
bdev_logical_block_size(ca->disk_sb.bdev)));
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_SB],
bio_sectors(bio));

View File

@ -506,33 +506,18 @@ size_t bch2_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf)
/* misc: */
void bch2_bio_map(struct bio *bio, void *base)
void bch2_bio_map(struct bio *bio, void *base, size_t size)
{
size_t size = bio->bi_iter.bi_size;
struct bio_vec *bv = bio->bi_io_vec;
BUG_ON(!bio->bi_iter.bi_size);
BUG_ON(bio->bi_vcnt);
BUG_ON(!bio->bi_max_vecs);
bv->bv_offset = base ? offset_in_page(base) : 0;
goto start;
for (; size; bio->bi_vcnt++, bv++) {
BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
bv->bv_offset = 0;
start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
size);
if (base) {
bv->bv_page = is_vmalloc_addr(base)
while (size) {
struct page *page = is_vmalloc_addr(base)
? vmalloc_to_page(base)
: virt_to_page(base);
unsigned offset = offset_in_page(base);
unsigned len = min_t(size_t, PAGE_SIZE - offset, size);
base += bv->bv_len;
}
size -= bv->bv_len;
BUG_ON(!bio_add_page(bio, page, len, offset));
size -= len;
base += len;
}
}

View File

@ -503,7 +503,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
return x;
}
void bch2_bio_map(struct bio *bio, void *base);
void bch2_bio_map(struct bio *bio, void *base, size_t);
int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
static inline sector_t bdev_sectors(struct block_device *bdev)