f2fs: use rb_*_cached friends

As rbtree supports caching leftmost node natively, update f2fs codes
to use rb_*_cached helpers to speed up leftmost node visiting.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu
2018-10-04 11:18:30 +08:00
committed by Jaegeuk Kim
parent ef2a007134
commit 4dada3fd70
3 changed files with 69 additions and 48 deletions

View File

@@ -27,10 +27,10 @@ static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
return NULL;
}
static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
unsigned int ofs)
{
struct rb_node *node = root->rb_node;
struct rb_node *node = root->rb_root.rb_node;
struct rb_entry *re;
while (node) {
@@ -46,7 +46,7 @@ static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
return NULL;
}
struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
struct rb_entry *cached_re, unsigned int ofs)
{
struct rb_entry *re;
@@ -59,22 +59,25 @@ struct rb_entry *f2fs_lookup_rb_tree(struct rb_root *root,
}
struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
struct rb_root *root, struct rb_node **parent,
unsigned int ofs)
struct rb_root_cached *root,
struct rb_node **parent,
unsigned int ofs, bool *leftmost)
{
struct rb_node **p = &root->rb_node;
struct rb_node **p = &root->rb_root.rb_node;
struct rb_entry *re;
while (*p) {
*parent = *p;
re = rb_entry(*parent, struct rb_entry, rb_node);
if (ofs < re->ofs)
if (ofs < re->ofs) {
p = &(*p)->rb_left;
else if (ofs >= re->ofs + re->len)
} else if (ofs >= re->ofs + re->len) {
p = &(*p)->rb_right;
else
*leftmost = false;
} else {
f2fs_bug_on(sbi, 1);
}
}
return p;
@@ -89,16 +92,16 @@ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
* in order to simpfy the insertion after.
* tree must stay unchanged between lookup and insertion.
*/
struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
struct rb_entry *cached_re,
unsigned int ofs,
struct rb_entry **prev_entry,
struct rb_entry **next_entry,
struct rb_node ***insert_p,
struct rb_node **insert_parent,
bool force)
bool force, bool *leftmost)
{
struct rb_node **pnode = &root->rb_node;
struct rb_node **pnode = &root->rb_root.rb_node;
struct rb_node *parent = NULL, *tmp_node;
struct rb_entry *re = cached_re;
@@ -107,7 +110,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
*prev_entry = NULL;
*next_entry = NULL;
if (RB_EMPTY_ROOT(root))
if (RB_EMPTY_ROOT(&root->rb_root))
return NULL;
if (re) {
@@ -115,16 +118,22 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root *root,
goto lookup_neighbors;
}
if (leftmost)
*leftmost = true;
while (*pnode) {
parent = *pnode;
re = rb_entry(*pnode, struct rb_entry, rb_node);
if (ofs < re->ofs)
if (ofs < re->ofs) {
pnode = &(*pnode)->rb_left;
else if (ofs >= re->ofs + re->len)
} else if (ofs >= re->ofs + re->len) {
pnode = &(*pnode)->rb_right;
else
if (leftmost)
*leftmost = false;
} else {
goto lookup_neighbors;
}
}
*insert_p = pnode;
@@ -157,10 +166,10 @@ lookup_neighbors:
}
bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
struct rb_root *root)
struct rb_root_cached *root)
{
#ifdef CONFIG_F2FS_CHECK_FS
struct rb_node *cur = rb_first(root), *next;
struct rb_node *cur = rb_first_cached(root), *next;
struct rb_entry *cur_re, *next_re;
if (!cur)
@@ -193,7 +202,8 @@ static struct kmem_cache *extent_node_slab;
static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei,
struct rb_node *parent, struct rb_node **p)
struct rb_node *parent, struct rb_node **p,
bool leftmost)
{
struct extent_node *en;
@@ -206,7 +216,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
en->et = et;
rb_link_node(&en->rb_node, parent, p);
rb_insert_color(&en->rb_node, &et->root);
rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
atomic_inc(&et->node_cnt);
atomic_inc(&sbi->total_ext_node);
return en;
@@ -215,7 +225,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
static void __detach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_node *en)
{
rb_erase(&en->rb_node, &et->root);
rb_erase_cached(&en->rb_node, &et->root);
atomic_dec(&et->node_cnt);
atomic_dec(&sbi->total_ext_node);
@@ -254,7 +264,7 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
memset(et, 0, sizeof(struct extent_tree));
et->ino = ino;
et->root = RB_ROOT;
et->root = RB_ROOT_CACHED;
et->cached_en = NULL;
rwlock_init(&et->lock);
INIT_LIST_HEAD(&et->list);
@@ -275,10 +285,10 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei)
{
struct rb_node **p = &et->root.rb_node;
struct rb_node **p = &et->root.rb_root.rb_node;
struct extent_node *en;
en = __attach_extent_node(sbi, et, ei, NULL, p);
en = __attach_extent_node(sbi, et, ei, NULL, p, true);
if (!en)
return NULL;
@@ -294,7 +304,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
struct extent_node *en;
unsigned int count = atomic_read(&et->node_cnt);
node = rb_first(&et->root);
node = rb_first_cached(&et->root);
while (node) {
next = rb_next(node);
en = rb_entry(node, struct extent_node, rb_node);
@@ -452,7 +462,8 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei,
struct rb_node **insert_p,
struct rb_node *insert_parent)
struct rb_node *insert_parent,
bool leftmost)
{
struct rb_node **p;
struct rb_node *parent = NULL;
@@ -464,9 +475,12 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
goto do_insert;
}
p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
leftmost = true;
p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
ei->fofs, &leftmost);
do_insert:
en = __attach_extent_node(sbi, et, ei, parent, p);
en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
if (!en)
return NULL;
@@ -492,6 +506,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
unsigned int end = fofs + len;
unsigned int pos = (unsigned int)fofs;
bool updated = false;
bool leftmost;
if (!et)
return;
@@ -519,7 +534,8 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
(struct rb_entry *)et->cached_en, fofs,
(struct rb_entry **)&prev_en,
(struct rb_entry **)&next_en,
&insert_p, &insert_parent, false);
&insert_p, &insert_parent, false,
&leftmost);
if (!en)
en = next_en;
@@ -546,7 +562,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
end - dei.fofs + dei.blk,
org_end - end);
en1 = __insert_extent_tree(sbi, et, &ei,
NULL, NULL);
NULL, NULL, true);
next_en = en1;
} else {
en->ei.fofs = end;
@@ -587,7 +603,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
set_extent_info(&ei, fofs, blkaddr, len);
if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
__insert_extent_tree(sbi, et, &ei,
insert_p, insert_parent);
insert_p, insert_parent, leftmost);
/* give up extent_cache, if split and small updates happen */
if (dei.len >= 1 &&