bcache: Convert bch_btree_read_async() to bch_btree_map_keys()

This is a fairly straightforward conversion, mostly reshuffling -
op->lookup_done goes away, replaced by MAP_DONE/MAP_CONTINUE. And the
code for handling cache hits and misses wasn't really btree code, so it
gets moved to request.c.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
Kent Overstreet 2013-07-24 17:41:08 -07:00
parent df8e89701f
commit 2c1953e201
5 changed files with 125 additions and 168 deletions

View File

@ -23,7 +23,6 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "request.h"
#include "writeback.h"
#include <linux/slab.h>
@ -2255,138 +2254,6 @@ void bch_btree_set_root(struct btree *b)
closure_sync(&cl);
}
/* Cache lookup */
static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
struct bkey *k)
{
struct search *s = container_of(op, struct search, op);
struct bio *bio = &s->bio.bio;
int ret = 0;
while (!ret &&
!op->lookup_done) {
unsigned sectors = INT_MAX;
if (KEY_INODE(k) == op->inode) {
if (KEY_START(k) <= bio->bi_sector)
break;
sectors = min_t(uint64_t, sectors,
KEY_START(k) - bio->bi_sector);
}
ret = s->d->cache_miss(b, s, bio, sectors);
}
return ret;
}
/*
* Read from a single key, handling the initial cache miss if the key starts in
* the middle of the bio
*/
static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
struct bkey *k)
{
struct search *s = container_of(op, struct search, op);
struct bio *bio = &s->bio.bio;
unsigned ptr;
struct bio *n;
int ret = submit_partial_cache_miss(b, op, k);
if (ret || op->lookup_done)
return ret;
/* XXX: figure out best pointer - for multiple cache devices */
ptr = 0;
PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
while (!op->lookup_done &&
KEY_INODE(k) == op->inode &&
bio->bi_sector < KEY_OFFSET(k)) {
struct bkey *bio_key;
sector_t sector = PTR_OFFSET(k, ptr) +
(bio->bi_sector - KEY_START(k));
unsigned sectors = min_t(uint64_t, INT_MAX,
KEY_OFFSET(k) - bio->bi_sector);
n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
if (n == bio)
op->lookup_done = true;
bio_key = &container_of(n, struct bbio, bio)->key;
/*
* The bucket we're reading from might be reused while our bio
* is in flight, and we could then end up reading the wrong
* data.
*
* We guard against this by checking (in cache_read_endio()) if
* the pointer is stale again; if so, we treat it as an error
* and reread from the backing device (but we don't pass that
* error up anywhere).
*/
bch_bkey_copy_single_ptr(bio_key, k, ptr);
SET_PTR_OFFSET(bio_key, 0, sector);
n->bi_end_io = bch_cache_read_endio;
n->bi_private = &s->cl;
__bch_submit_bbio(n, b->c);
}
return 0;
}
static int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
{
struct search *s = container_of(op, struct search, op);
struct bio *bio = &s->bio.bio;
int ret = 0;
struct bkey *k;
struct btree_iter iter;
bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
do {
k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
if (!k) {
/*
* b->key would be exactly what we want, except that
* pointers to btree nodes have nonzero size - we
* wouldn't go far enough
*/
ret = submit_partial_cache_miss(b, op,
&KEY(KEY_INODE(&b->key),
KEY_OFFSET(&b->key), 0));
break;
}
ret = b->level
? btree(search_recurse, k, b, op)
: submit_partial_cache_hit(b, op, k);
} while (!ret &&
!op->lookup_done);
return ret;
}
void bch_btree_search_async(struct closure *cl)
{
struct btree_op *op = container_of(cl, struct btree_op, cl);
int ret = btree_root(search_recurse, op->c, op);
if (ret == -EAGAIN)
continue_at(cl, bch_btree_search_async, bcache_wq);
closure_return(cl);
}
/* Map across nodes or keys */
static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,

View File

@ -264,7 +264,6 @@ struct btree_op {
unsigned flush_journal:1;
unsigned insert_data_done:1;
unsigned lookup_done:1;
unsigned insert_collision:1;
BKEY_PADDED(replace);
@ -306,8 +305,6 @@ int bch_btree_insert_check_key(struct btree *, struct btree_op *,
struct bkey *);
int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *);
void bch_btree_search_async(struct closure *);
int bch_gc_thread_start(struct cache_set *);
size_t bch_btree_gc_finish(struct cache_set *);
void bch_moving_gc(struct cache_set *);

View File

@ -7,7 +7,6 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "request.h"
#include <trace/events/bcache.h>

View File

@ -638,24 +638,9 @@ void bch_data_insert(struct closure *cl)
bch_data_insert_start(cl);
}
/* Common code for the make_request functions */
/* Cache lookup */
static void request_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
if (error) {
struct search *s = container_of(cl, struct search, cl);
s->error = error;
/* Only cache read errors are recoverable */
s->recoverable = false;
}
bio_put(bio);
closure_put(cl);
}
void bch_cache_read_endio(struct bio *bio, int error)
static void bch_cache_read_endio(struct bio *bio, int error)
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct closure *cl = bio->bi_private;
@ -678,6 +663,120 @@ void bch_cache_read_endio(struct bio *bio, int error)
bch_bbio_endio(s->op.c, bio, error, "reading from cache");
}
static int submit_partial_cache_miss(struct btree *b, struct search *s,
struct bkey *k)
{
struct bio *bio = &s->bio.bio;
int ret = MAP_CONTINUE;
do {
unsigned sectors = INT_MAX;
if (KEY_INODE(k) == s->op.inode) {
if (KEY_START(k) <= bio->bi_sector)
break;
sectors = min_t(uint64_t, sectors,
KEY_START(k) - bio->bi_sector);
}
ret = s->d->cache_miss(b, s, bio, sectors);
} while (ret == MAP_CONTINUE);
return ret;
}
/*
* Read from a single key, handling the initial cache miss if the key starts in
* the middle of the bio
*/
static int submit_partial_cache_hit(struct btree_op *op, struct btree *b,
struct bkey *k)
{
struct search *s = container_of(op, struct search, op);
struct bio *bio = &s->bio.bio;
unsigned ptr;
struct bio *n;
int ret = submit_partial_cache_miss(b, s, k);
if (ret != MAP_CONTINUE || !KEY_SIZE(k))
return ret;
/* XXX: figure out best pointer - for multiple cache devices */
ptr = 0;
PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
while (ret == MAP_CONTINUE &&
KEY_INODE(k) == op->inode &&
bio->bi_sector < KEY_OFFSET(k)) {
struct bkey *bio_key;
sector_t sector = PTR_OFFSET(k, ptr) +
(bio->bi_sector - KEY_START(k));
unsigned sectors = min_t(uint64_t, INT_MAX,
KEY_OFFSET(k) - bio->bi_sector);
n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
if (n == bio)
ret = MAP_DONE;
bio_key = &container_of(n, struct bbio, bio)->key;
/*
* The bucket we're reading from might be reused while our bio
* is in flight, and we could then end up reading the wrong
* data.
*
* We guard against this by checking (in cache_read_endio()) if
* the pointer is stale again; if so, we treat it as an error
* and reread from the backing device (but we don't pass that
* error up anywhere).
*/
bch_bkey_copy_single_ptr(bio_key, k, ptr);
SET_PTR_OFFSET(bio_key, 0, sector);
n->bi_end_io = bch_cache_read_endio;
n->bi_private = &s->cl;
__bch_submit_bbio(n, b->c);
}
return ret;
}
static void cache_lookup(struct closure *cl)
{
struct btree_op *op = container_of(cl, struct btree_op, cl);
struct search *s = container_of(op, struct search, op);
struct bio *bio = &s->bio.bio;
int ret = bch_btree_map_keys(op, op->c,
&KEY(op->inode, bio->bi_sector, 0),
submit_partial_cache_hit, 1);
if (ret == -EAGAIN)
continue_at(cl, cache_lookup, bcache_wq);
closure_return(cl);
}
/* Common code for the make_request functions */
static void request_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
if (error) {
struct search *s = container_of(cl, struct search, cl);
s->error = error;
/* Only cache read errors are recoverable */
s->recoverable = false;
}
bio_put(bio);
closure_put(cl);
}
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
@ -1005,15 +1104,14 @@ static void cached_dev_read_done_bh(struct closure *cl)
static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
int ret = 0;
int ret = MAP_CONTINUE;
unsigned reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
if (s->cache_miss || s->op.bypass) {
miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
if (miss == bio)
s->op.lookup_done = true;
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
goto out_submit;
}
@ -1033,11 +1131,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
return ret;
miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
if (miss == bio)
s->op.lookup_done = true;
else
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = -EINTR;
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
cache_bio = bio_alloc_bioset(GFP_NOWAIT,
DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
@ -1075,7 +1171,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
closure_call(&s->op.cl, bch_btree_search_async, NULL, cl);
closure_call(&s->op.cl, cache_lookup, NULL, cl);
continue_at(cl, cached_dev_read_done_bh, NULL);
}
@ -1287,9 +1383,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
bio_advance(bio, min(sectors << 9, bio->bi_size));
if (!bio->bi_size)
s->op.lookup_done = true;
return MAP_DONE;
return 0;
return MAP_CONTINUE;
}
static void flash_dev_nodata(struct closure *cl)
@ -1339,7 +1435,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
closure_call(&s->op.cl, bch_data_insert, NULL, cl);
} else {
closure_call(&s->op.cl, bch_btree_search_async, NULL, cl);
closure_call(&s->op.cl, cache_lookup, NULL, cl);
}
continue_at(cl, search_free, NULL);

View File

@ -31,10 +31,8 @@ struct search {
struct keylist insert_keys;
};
void bch_cache_read_endio(struct bio *, int);
unsigned bch_get_congested(struct cache_set *);
void bch_data_insert(struct closure *cl);
void bch_cache_read_endio(struct bio *, int);
void bch_open_buckets_free(struct cache_set *);
int bch_open_buckets_alloc(struct cache_set *);