bcache: Debug code improvements
Couple changes: * Consolidate bch_check_keys() and bch_check_key_order(), and move the checks that only check_key_order() could do to bch_btree_iter_next(). * Get rid of CONFIG_BCACHE_EDEBUG - now, all that code is compiled in when CONFIG_BCACHE_DEBUG is enabled, and there's now a sysfs file to flip on the EDEBUG checks at runtime. * Dropped an old not terribly useful check in rw_unlock(), and refactored/improved a some of the other debug code. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
@@ -76,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
|
||||
return out - buf;
|
||||
}
|
||||
|
||||
int bch_btree_to_text(char *buf, size_t size, const struct btree *b)
|
||||
{
|
||||
return scnprintf(buf, size, "%zu level %i/%i",
|
||||
PTR_BUCKET_NR(b->c, &b->key, 0),
|
||||
b->level, b->c->root ? b->c->root->level : -1);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
|
||||
|
||||
static bool skipped_backwards(struct btree *b, struct bkey *k)
|
||||
{
|
||||
return bkey_cmp(k, (!b->level)
|
||||
? &START_KEY(bkey_next(k))
|
||||
: bkey_next(k)) > 0;
|
||||
}
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
|
||||
static void dump_bset(struct btree *b, struct bset *i)
|
||||
{
|
||||
struct bkey *k;
|
||||
struct bkey *k, *next;
|
||||
unsigned j;
|
||||
char buf[80];
|
||||
|
||||
for (k = i->start; k < end(i); k = bkey_next(k)) {
|
||||
for (k = i->start; k < end(i); k = next) {
|
||||
next = bkey_next(k);
|
||||
|
||||
bch_bkey_to_text(buf, sizeof(buf), k);
|
||||
printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
|
||||
(uint64_t *) k - i->d, i->keys, buf);
|
||||
@@ -114,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i)
|
||||
|
||||
printk(" %s\n", bch_ptr_status(b->c, k));
|
||||
|
||||
if (bkey_next(k) < end(i) &&
|
||||
skipped_backwards(b, k))
|
||||
if (next < end(i) &&
|
||||
bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
|
||||
printk(KERN_ERR "Key skipped backwards\n");
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
static void bch_dump_bucket(struct btree *b)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
console_lock();
|
||||
for (i = 0; i <= b->nsets; i++)
|
||||
dump_bset(b, b->sets[i].data);
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
void bch_btree_verify(struct btree *b, struct bset *new)
|
||||
{
|
||||
@@ -211,11 +205,7 @@ out_put:
|
||||
bio_put(check);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BCACHE_EDEBUG
|
||||
|
||||
unsigned bch_count_data(struct btree *b)
|
||||
int __bch_count_data(struct btree *b)
|
||||
{
|
||||
unsigned ret = 0;
|
||||
struct btree_iter iter;
|
||||
@@ -227,72 +217,60 @@ unsigned bch_count_data(struct btree *b)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
|
||||
va_list args)
|
||||
{
|
||||
unsigned i;
|
||||
char buf[80];
|
||||
|
||||
console_lock();
|
||||
|
||||
for (i = 0; i <= b->nsets; i++)
|
||||
dump_bset(b, b->sets[i].data);
|
||||
|
||||
vprintk(fmt, args);
|
||||
|
||||
console_unlock();
|
||||
|
||||
bch_btree_to_text(buf, sizeof(buf), b);
|
||||
panic("at %s\n", buf);
|
||||
}
|
||||
|
||||
void bch_check_key_order_msg(struct btree *b, struct bset *i,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct bkey *k;
|
||||
|
||||
if (!i->keys)
|
||||
return;
|
||||
|
||||
for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
|
||||
if (skipped_backwards(b, k)) {
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
|
||||
vdump_bucket_and_panic(b, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
}
|
||||
|
||||
void bch_check_keys(struct btree *b, const char *fmt, ...)
|
||||
void __bch_check_keys(struct btree *b, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
struct bkey *k, *p = NULL;
|
||||
struct btree_iter iter;
|
||||
|
||||
if (b->level)
|
||||
return;
|
||||
const char *err;
|
||||
|
||||
for_each_key(b, k, &iter) {
|
||||
if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) {
|
||||
printk(KERN_ERR "Keys out of order:\n");
|
||||
goto bug;
|
||||
}
|
||||
if (!b->level) {
|
||||
err = "Keys out of order";
|
||||
if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
|
||||
goto bug;
|
||||
|
||||
if (bch_ptr_invalid(b, k))
|
||||
continue;
|
||||
if (bch_ptr_invalid(b, k))
|
||||
continue;
|
||||
|
||||
if (p && bkey_cmp(p, &START_KEY(k)) > 0) {
|
||||
printk(KERN_ERR "Overlapping keys:\n");
|
||||
goto bug;
|
||||
err = "Overlapping keys";
|
||||
if (p && bkey_cmp(p, &START_KEY(k)) > 0)
|
||||
goto bug;
|
||||
} else {
|
||||
if (bch_ptr_bad(b, k))
|
||||
continue;
|
||||
|
||||
err = "Duplicate keys";
|
||||
if (p && !bkey_cmp(p, k))
|
||||
goto bug;
|
||||
}
|
||||
p = k;
|
||||
}
|
||||
|
||||
err = "Key larger than btree node key";
|
||||
if (p && bkey_cmp(p, &b->key) > 0)
|
||||
goto bug;
|
||||
|
||||
return;
|
||||
bug:
|
||||
bch_dump_bucket(b);
|
||||
|
||||
va_start(args, fmt);
|
||||
vdump_bucket_and_panic(b, fmt, args);
|
||||
vprintk(fmt, args);
|
||||
va_end(args);
|
||||
|
||||
panic("bcache error: %s:\n", err);
|
||||
}
|
||||
|
||||
void bch_btree_iter_next_check(struct btree_iter *iter)
|
||||
{
|
||||
struct bkey *k = iter->data->k, *next = bkey_next(k);
|
||||
|
||||
if (next < iter->data->end &&
|
||||
bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
|
||||
bch_dump_bucket(iter->b);
|
||||
panic("Key skipped backwards\n");
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user