btrfs: migrate the block group lookup code
Move these bits first as they are the easiest to move. Export two of the helpers so they can be moved all at once. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> [ minor style updates ] Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:

committed by
David Sterba

parent
aac0023c21
commit
2e405ad842
95
fs/btrfs/block-group.c
Normal file
95
fs/btrfs/block-group.c
Normal file
@@ -0,0 +1,95 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "ctree.h"
|
||||
#include "block-group.h"
|
||||
|
||||
/*
|
||||
* This will return the block group at or after bytenr if contains is 0, else
|
||||
* it will return the block group that contains the bytenr
|
||||
*/
|
||||
static struct btrfs_block_group_cache *block_group_cache_tree_search(
|
||||
struct btrfs_fs_info *info, u64 bytenr, int contains)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache, *ret = NULL;
|
||||
struct rb_node *n;
|
||||
u64 end, start;
|
||||
|
||||
spin_lock(&info->block_group_cache_lock);
|
||||
n = info->block_group_cache_tree.rb_node;
|
||||
|
||||
while (n) {
|
||||
cache = rb_entry(n, struct btrfs_block_group_cache,
|
||||
cache_node);
|
||||
end = cache->key.objectid + cache->key.offset - 1;
|
||||
start = cache->key.objectid;
|
||||
|
||||
if (bytenr < start) {
|
||||
if (!contains && (!ret || start < ret->key.objectid))
|
||||
ret = cache;
|
||||
n = n->rb_left;
|
||||
} else if (bytenr > start) {
|
||||
if (contains && bytenr <= end) {
|
||||
ret = cache;
|
||||
break;
|
||||
}
|
||||
n = n->rb_right;
|
||||
} else {
|
||||
ret = cache;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret) {
|
||||
btrfs_get_block_group(ret);
|
||||
if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
|
||||
info->first_logical_byte = ret->key.objectid;
|
||||
}
|
||||
spin_unlock(&info->block_group_cache_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the block group that starts at or after bytenr
|
||||
*/
|
||||
struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
|
||||
struct btrfs_fs_info *info, u64 bytenr)
|
||||
{
|
||||
return block_group_cache_tree_search(info, bytenr, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the block group that contains the given bytenr
|
||||
*/
|
||||
struct btrfs_block_group_cache *btrfs_lookup_block_group(
|
||||
struct btrfs_fs_info *info, u64 bytenr)
|
||||
{
|
||||
return block_group_cache_tree_search(info, bytenr, 1);
|
||||
}
|
||||
|
||||
struct btrfs_block_group_cache *btrfs_next_block_group(
|
||||
struct btrfs_block_group_cache *cache)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = cache->fs_info;
|
||||
struct rb_node *node;
|
||||
|
||||
spin_lock(&fs_info->block_group_cache_lock);
|
||||
|
||||
/* If our block group was removed, we need a full search. */
|
||||
if (RB_EMPTY_NODE(&cache->cache_node)) {
|
||||
const u64 next_bytenr = cache->key.objectid + cache->key.offset;
|
||||
|
||||
spin_unlock(&fs_info->block_group_cache_lock);
|
||||
btrfs_put_block_group(cache);
|
||||
cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
|
||||
}
|
||||
node = rb_next(&cache->cache_node);
|
||||
btrfs_put_block_group(cache);
|
||||
if (node) {
|
||||
cache = rb_entry(node, struct btrfs_block_group_cache,
|
||||
cache_node);
|
||||
btrfs_get_block_group(cache);
|
||||
} else
|
||||
cache = NULL;
|
||||
spin_unlock(&fs_info->block_group_cache_lock);
|
||||
return cache;
|
||||
}
|
Reference in New Issue
Block a user