bcache: style fix to replace 'unsigned' by 'unsigned int'
This patch fixes warning reported by checkpatch.pl by replacing 'unsigned' with 'unsigned int'. Signed-off-by: Coly Li <colyli@suse.de> Reviewed-by: Shenghui Wang <shhuiw@foxmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b)
|
||||
|
||||
void bkey_put(struct cache_set *c, struct bkey *k)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
if (ptr_available(c, k, i))
|
||||
@@ -479,7 +479,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
|
||||
|
||||
void bch_btree_node_write(struct btree *b, struct closure *parent)
|
||||
{
|
||||
unsigned nsets = b->keys.nsets;
|
||||
unsigned int nsets = b->keys.nsets;
|
||||
|
||||
lockdep_assert_held(&b->lock);
|
||||
|
||||
@@ -581,7 +581,7 @@ static void mca_bucket_free(struct btree *b)
|
||||
list_move(&b->list, &b->c->btree_cache_freeable);
|
||||
}
|
||||
|
||||
static unsigned btree_order(struct bkey *k)
|
||||
static unsigned int btree_order(struct bkey *k)
|
||||
{
|
||||
return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
|
||||
}
|
||||
@@ -589,7 +589,7 @@ static unsigned btree_order(struct bkey *k)
|
||||
static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
|
||||
{
|
||||
if (!bch_btree_keys_alloc(&b->keys,
|
||||
max_t(unsigned,
|
||||
max_t(unsigned int,
|
||||
ilog2(b->c->btree_pages),
|
||||
btree_order(k)),
|
||||
gfp)) {
|
||||
@@ -620,7 +620,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
|
||||
return b;
|
||||
}
|
||||
|
||||
static int mca_reap(struct btree *b, unsigned min_order, bool flush)
|
||||
static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
|
||||
{
|
||||
struct closure cl;
|
||||
|
||||
@@ -786,7 +786,7 @@ void bch_btree_cache_free(struct cache_set *c)
|
||||
|
||||
int bch_btree_cache_alloc(struct cache_set *c)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < mca_reserve(c); i++)
|
||||
if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
|
||||
@@ -1136,7 +1136,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
|
||||
|
||||
static void make_btree_freeing_key(struct btree *b, struct bkey *k)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&b->c->bucket_lock);
|
||||
|
||||
@@ -1157,7 +1157,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op)
|
||||
{
|
||||
struct cache_set *c = b->c;
|
||||
struct cache *ca;
|
||||
unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
|
||||
unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
|
||||
|
||||
mutex_lock(&c->bucket_lock);
|
||||
|
||||
@@ -1181,7 +1181,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
|
||||
struct bkey *k)
|
||||
{
|
||||
uint8_t stale = 0;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
struct bucket *g;
|
||||
|
||||
/*
|
||||
@@ -1219,7 +1219,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
|
||||
SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
|
||||
|
||||
/* guard against overflow */
|
||||
SET_GC_SECTORS_USED(g, min_t(unsigned,
|
||||
SET_GC_SECTORS_USED(g, min_t(unsigned int,
|
||||
GC_SECTORS_USED(g) + KEY_SIZE(k),
|
||||
MAX_GC_SECTORS_USED));
|
||||
|
||||
@@ -1233,7 +1233,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
|
||||
|
||||
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
if (ptr_available(c, k, i) &&
|
||||
@@ -1259,7 +1259,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
|
||||
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
|
||||
{
|
||||
uint8_t stale = 0;
|
||||
unsigned keys = 0, good_keys = 0;
|
||||
unsigned int keys = 0, good_keys = 0;
|
||||
struct bkey *k;
|
||||
struct btree_iter iter;
|
||||
struct bset_tree *t;
|
||||
@@ -1302,7 +1302,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
|
||||
|
||||
struct gc_merge_info {
|
||||
struct btree *b;
|
||||
unsigned keys;
|
||||
unsigned int keys;
|
||||
};
|
||||
|
||||
static int bch_btree_insert_node(struct btree *, struct btree_op *,
|
||||
@@ -1311,7 +1311,7 @@ static int bch_btree_insert_node(struct btree *, struct btree_op *,
|
||||
static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
|
||||
struct gc_stat *gc, struct gc_merge_info *r)
|
||||
{
|
||||
unsigned i, nodes = 0, keys = 0, blocks;
|
||||
unsigned int i, nodes = 0, keys = 0, blocks;
|
||||
struct btree *new_nodes[GC_MERGE_NODES];
|
||||
struct keylist keylist;
|
||||
struct closure cl;
|
||||
@@ -1511,11 +1511,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
static unsigned btree_gc_count_keys(struct btree *b)
|
||||
static unsigned int btree_gc_count_keys(struct btree *b)
|
||||
{
|
||||
struct bkey *k;
|
||||
struct btree_iter iter;
|
||||
unsigned ret = 0;
|
||||
unsigned int ret = 0;
|
||||
|
||||
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
|
||||
ret += bkey_u64s(k);
|
||||
@@ -1678,7 +1678,7 @@ static void btree_gc_start(struct cache_set *c)
|
||||
{
|
||||
struct cache *ca;
|
||||
struct bucket *b;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
if (!c->gc_mark_valid)
|
||||
return;
|
||||
@@ -1704,7 +1704,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
|
||||
{
|
||||
struct bucket *b;
|
||||
struct cache *ca;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&c->bucket_lock);
|
||||
|
||||
@@ -1722,7 +1722,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
|
||||
struct bcache_device *d = c->devices[i];
|
||||
struct cached_dev *dc;
|
||||
struct keybuf_key *w, *n;
|
||||
unsigned j;
|
||||
unsigned int j;
|
||||
|
||||
if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
|
||||
continue;
|
||||
@@ -1814,7 +1814,7 @@ static void bch_btree_gc(struct cache_set *c)
|
||||
static bool gc_should_run(struct cache_set *c)
|
||||
{
|
||||
struct cache *ca;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for_each_cache(ca, c, i)
|
||||
if (ca->invalidate_needs_gc)
|
||||
@@ -1905,7 +1905,7 @@ void bch_initial_gc_finish(struct cache_set *c)
|
||||
{
|
||||
struct cache *ca;
|
||||
struct bucket *b;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
bch_btree_gc_finish(c);
|
||||
|
||||
@@ -1945,7 +1945,7 @@ void bch_initial_gc_finish(struct cache_set *c)
|
||||
static bool btree_insert_key(struct btree *b, struct bkey *k,
|
||||
struct bkey *replace_key)
|
||||
{
|
||||
unsigned status;
|
||||
unsigned int status;
|
||||
|
||||
BUG_ON(bkey_cmp(k, &b->key) > 0);
|
||||
|
||||
@@ -2044,7 +2044,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
|
||||
block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
|
||||
|
||||
if (split) {
|
||||
unsigned keys = 0;
|
||||
unsigned int keys = 0;
|
||||
|
||||
trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
|
||||
|
||||
@@ -2300,7 +2300,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
|
||||
|
||||
void bch_btree_set_root(struct btree *b)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
struct closure cl;
|
||||
|
||||
closure_init_stack(&cl);
|
||||
@@ -2412,7 +2412,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
|
||||
|
||||
struct refill {
|
||||
struct btree_op op;
|
||||
unsigned nr_found;
|
||||
unsigned int nr_found;
|
||||
struct keybuf *buf;
|
||||
struct bkey *end;
|
||||
keybuf_pred_fn *pred;
|
||||
|
Reference in New Issue
Block a user