bcache: Convert bch_btree_read_async() to bch_btree_map_keys()
This is a fairly straightforward conversion, mostly reshuffling - op->lookup_done goes away, replaced by MAP_DONE/MAP_CONTINUE. And the code for handling cache hits and misses wasn't really btree code, so it gets moved to request.c. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
@@ -23,7 +23,6 @@
|
||||
#include "bcache.h"
|
||||
#include "btree.h"
|
||||
#include "debug.h"
|
||||
#include "request.h"
|
||||
#include "writeback.h"
|
||||
|
||||
#include <linux/slab.h>
|
||||
@@ -2255,138 +2254,6 @@ void bch_btree_set_root(struct btree *b)
|
||||
closure_sync(&cl);
|
||||
}
|
||||
|
||||
/* Cache lookup */
|
||||
|
||||
static int submit_partial_cache_miss(struct btree *b, struct btree_op *op,
|
||||
struct bkey *k)
|
||||
{
|
||||
struct search *s = container_of(op, struct search, op);
|
||||
struct bio *bio = &s->bio.bio;
|
||||
int ret = 0;
|
||||
|
||||
while (!ret &&
|
||||
!op->lookup_done) {
|
||||
unsigned sectors = INT_MAX;
|
||||
|
||||
if (KEY_INODE(k) == op->inode) {
|
||||
if (KEY_START(k) <= bio->bi_sector)
|
||||
break;
|
||||
|
||||
sectors = min_t(uint64_t, sectors,
|
||||
KEY_START(k) - bio->bi_sector);
|
||||
}
|
||||
|
||||
ret = s->d->cache_miss(b, s, bio, sectors);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read from a single key, handling the initial cache miss if the key starts in
|
||||
* the middle of the bio
|
||||
*/
|
||||
static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
|
||||
struct bkey *k)
|
||||
{
|
||||
struct search *s = container_of(op, struct search, op);
|
||||
struct bio *bio = &s->bio.bio;
|
||||
unsigned ptr;
|
||||
struct bio *n;
|
||||
|
||||
int ret = submit_partial_cache_miss(b, op, k);
|
||||
if (ret || op->lookup_done)
|
||||
return ret;
|
||||
|
||||
/* XXX: figure out best pointer - for multiple cache devices */
|
||||
ptr = 0;
|
||||
|
||||
PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
|
||||
|
||||
while (!op->lookup_done &&
|
||||
KEY_INODE(k) == op->inode &&
|
||||
bio->bi_sector < KEY_OFFSET(k)) {
|
||||
struct bkey *bio_key;
|
||||
sector_t sector = PTR_OFFSET(k, ptr) +
|
||||
(bio->bi_sector - KEY_START(k));
|
||||
unsigned sectors = min_t(uint64_t, INT_MAX,
|
||||
KEY_OFFSET(k) - bio->bi_sector);
|
||||
|
||||
n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
|
||||
if (n == bio)
|
||||
op->lookup_done = true;
|
||||
|
||||
bio_key = &container_of(n, struct bbio, bio)->key;
|
||||
|
||||
/*
|
||||
* The bucket we're reading from might be reused while our bio
|
||||
* is in flight, and we could then end up reading the wrong
|
||||
* data.
|
||||
*
|
||||
* We guard against this by checking (in cache_read_endio()) if
|
||||
* the pointer is stale again; if so, we treat it as an error
|
||||
* and reread from the backing device (but we don't pass that
|
||||
* error up anywhere).
|
||||
*/
|
||||
|
||||
bch_bkey_copy_single_ptr(bio_key, k, ptr);
|
||||
SET_PTR_OFFSET(bio_key, 0, sector);
|
||||
|
||||
n->bi_end_io = bch_cache_read_endio;
|
||||
n->bi_private = &s->cl;
|
||||
|
||||
__bch_submit_bbio(n, b->c);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
|
||||
{
|
||||
struct search *s = container_of(op, struct search, op);
|
||||
struct bio *bio = &s->bio.bio;
|
||||
|
||||
int ret = 0;
|
||||
struct bkey *k;
|
||||
struct btree_iter iter;
|
||||
bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
|
||||
|
||||
do {
|
||||
k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
|
||||
if (!k) {
|
||||
/*
|
||||
* b->key would be exactly what we want, except that
|
||||
* pointers to btree nodes have nonzero size - we
|
||||
* wouldn't go far enough
|
||||
*/
|
||||
|
||||
ret = submit_partial_cache_miss(b, op,
|
||||
&KEY(KEY_INODE(&b->key),
|
||||
KEY_OFFSET(&b->key), 0));
|
||||
break;
|
||||
}
|
||||
|
||||
ret = b->level
|
||||
? btree(search_recurse, k, b, op)
|
||||
: submit_partial_cache_hit(b, op, k);
|
||||
} while (!ret &&
|
||||
!op->lookup_done);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch_btree_search_async(struct closure *cl)
|
||||
{
|
||||
struct btree_op *op = container_of(cl, struct btree_op, cl);
|
||||
|
||||
int ret = btree_root(search_recurse, op->c, op);
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
continue_at(cl, bch_btree_search_async, bcache_wq);
|
||||
|
||||
closure_return(cl);
|
||||
}
|
||||
|
||||
/* Map across nodes or keys */
|
||||
|
||||
static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
|
||||
|
Reference in New Issue
Block a user