Merge branch 'xarray' of git://git.infradead.org/users/willy/linux-dax
Pull XArray conversion from Matthew Wilcox: "The XArray provides an improved interface to the radix tree data structure, providing locking as part of the API, specifying GFP flags at allocation time, eliminating preloading, less re-walking the tree, more efficient iterations and not exposing RCU-protected pointers to its users. This patch set 1. Introduces the XArray implementation 2. Converts the pagecache to use it 3. Converts memremap to use it The page cache is the most complex and important user of the radix tree, so converting it was most important. Converting the memremap code removes the only other user of the multiorder code, which allows us to remove the radix tree code that supported it. I have 40+ followup patches to convert many other users of the radix tree over to the XArray, but I'd like to get this part in first. The other conversions haven't been in linux-next and aren't suitable for applying yet, but you can see them in the xarray-conv branch if you're interested" * 'xarray' of git://git.infradead.org/users/willy/linux-dax: (90 commits) radix tree: Remove multiorder support radix tree test: Convert multiorder tests to XArray radix tree tests: Convert item_delete_rcu to XArray radix tree tests: Convert item_kill_tree to XArray radix tree tests: Move item_insert_order radix tree test suite: Remove multiorder benchmarking radix tree test suite: Remove __item_insert memremap: Convert to XArray xarray: Add range store functionality xarray: Move multiorder_check to in-kernel tests xarray: Move multiorder_shrink to kernel tests xarray: Move multiorder account test in-kernel radix tree test suite: Convert iteration test to XArray radix tree test suite: Convert tag_tagged_items to XArray radix tree: Remove radix_tree_clear_tags radix tree: Remove radix_tree_maybe_preload_order radix tree: Remove split/join code radix tree: Remove radix_tree_update_node_t page cache: Finish XArray conversion dax: Convert page fault handlers to XArray ...
This commit is contained in:
@@ -399,8 +399,11 @@ config INTERVAL_TREE
|
||||
|
||||
for more information.
|
||||
|
||||
config RADIX_TREE_MULTIORDER
|
||||
config XARRAY_MULTI
|
||||
bool
|
||||
help
|
||||
Support entries which occupy multiple consecutive indices in the
|
||||
XArray.
|
||||
|
||||
config ASSOCIATIVE_ARRAY
|
||||
bool
|
||||
|
@@ -1813,6 +1813,9 @@ config TEST_BITFIELD
|
||||
config TEST_UUID
|
||||
tristate "Test functions located in the uuid module at runtime"
|
||||
|
||||
config TEST_XARRAY
|
||||
tristate "Test the XArray code at runtime"
|
||||
|
||||
config TEST_OVERFLOW
|
||||
tristate "Test check_*_overflow() functions at runtime"
|
||||
|
||||
|
@@ -18,7 +18,7 @@ KCOV_INSTRUMENT_debugobjects.o := n
|
||||
KCOV_INSTRUMENT_dynamic_debug.o := n
|
||||
|
||||
lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
||||
rbtree.o radix-tree.o timerqueue.o\
|
||||
rbtree.o radix-tree.o timerqueue.o xarray.o \
|
||||
idr.o int_sqrt.o extable.o \
|
||||
sha1.o chacha20.o irq_regs.o argv_split.o \
|
||||
flex_proportions.o ratelimit.o show_mem.o \
|
||||
@@ -68,6 +68,7 @@ obj-$(CONFIG_TEST_PRINTF) += test_printf.o
|
||||
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
|
||||
obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
|
||||
obj-$(CONFIG_TEST_UUID) += test_uuid.o
|
||||
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
|
||||
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
|
||||
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
|
||||
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
|
||||
|
411
lib/idr.c
411
lib/idr.c
@@ -6,8 +6,6 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
|
||||
|
||||
/**
|
||||
* idr_alloc_u32() - Allocate an ID.
|
||||
* @idr: IDR handle.
|
||||
@@ -39,10 +37,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
|
||||
unsigned int base = idr->idr_base;
|
||||
unsigned int id = *nextid;
|
||||
|
||||
if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
|
||||
return -EINVAL;
|
||||
if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR)))
|
||||
idr->idr_rt.gfp_mask |= IDR_RT_MARKER;
|
||||
if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
|
||||
idr->idr_rt.xa_flags |= IDR_RT_MARKER;
|
||||
|
||||
id = (id < base) ? 0 : id - base;
|
||||
radix_tree_iter_init(&iter, id);
|
||||
@@ -295,15 +291,13 @@ void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
|
||||
void __rcu **slot = NULL;
|
||||
void *entry;
|
||||
|
||||
if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
id -= idr->idr_base;
|
||||
|
||||
entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
|
||||
if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
__radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL);
|
||||
__radix_tree_replace(&idr->idr_rt, node, slot, ptr);
|
||||
|
||||
return entry;
|
||||
}
|
||||
@@ -324,6 +318,9 @@ EXPORT_SYMBOL(idr_replace);
|
||||
* free the individual IDs in it. You can use ida_is_empty() to find
|
||||
* out whether the IDA has any IDs currently allocated.
|
||||
*
|
||||
* The IDA handles its own locking. It is safe to call any of the IDA
|
||||
* functions without synchronisation in your code.
|
||||
*
|
||||
* IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
|
||||
* limitation, it should be quite straightforward to raise the maximum.
|
||||
*/
|
||||
@@ -331,190 +328,38 @@ EXPORT_SYMBOL(idr_replace);
|
||||
/*
|
||||
* Developer's notes:
|
||||
*
|
||||
* The IDA uses the functionality provided by the IDR & radix tree to store
|
||||
* bitmaps in each entry. The IDR_FREE tag means there is at least one bit
|
||||
* free, unlike the IDR where it means at least one entry is free.
|
||||
* The IDA uses the functionality provided by the XArray to store bitmaps in
|
||||
* each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
|
||||
* have been set.
|
||||
*
|
||||
* I considered telling the radix tree that each slot is an order-10 node
|
||||
* and storing the bit numbers in the radix tree, but the radix tree can't
|
||||
* allow a single multiorder entry at index 0, which would significantly
|
||||
* increase memory consumption for the IDA. So instead we divide the index
|
||||
* by the number of bits in the leaf bitmap before doing a radix tree lookup.
|
||||
* I considered telling the XArray that each slot is an order-10 node
|
||||
* and indexing by bit number, but the XArray can't allow a single multi-index
|
||||
* entry in the head, which would significantly increase memory consumption
|
||||
* for the IDA. So instead we divide the index by the number of bits in the
|
||||
* leaf bitmap before doing a radix tree lookup.
|
||||
*
|
||||
* As an optimisation, if there are only a few low bits set in any given
|
||||
* leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional
|
||||
* entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits
|
||||
* directly in the entry. By being really tricksy, we could store
|
||||
* BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising
|
||||
* for 0-3 allocated IDs.
|
||||
* leaf, instead of allocating a 128-byte bitmap, we store the bits
|
||||
* as a value entry. Value entries never have the XA_FREE_MARK cleared
|
||||
* because we can always convert them into a bitmap entry.
|
||||
*
|
||||
* We allow the radix tree 'exceptional' count to get out of date. Nothing
|
||||
* in the IDA nor the radix tree code checks it. If it becomes important
|
||||
* to maintain an accurate exceptional count, switch the rcu_assign_pointer()
|
||||
* calls to radix_tree_iter_replace() which will correct the exceptional
|
||||
* count.
|
||||
* It would be possible to optimise further; once we've run out of a
|
||||
* single 128-byte bitmap, we currently switch to a 576-byte node, put
|
||||
* the 128-byte bitmap in the first entry and then start allocating extra
|
||||
* 128-byte entries. We could instead use the 512 bytes of the node's
|
||||
* data as a bitmap before moving to that scheme. I do not believe this
|
||||
* is a worthwhile optimisation; Rasmus Villemoes surveyed the current
|
||||
* users of the IDA and almost none of them use more than 1024 entries.
|
||||
* Those that do use more than the 8192 IDs that the 512 bytes would
|
||||
* provide.
|
||||
*
|
||||
* The IDA always requires a lock to alloc/free. If we add a 'test_bit'
|
||||
* The IDA always uses a lock to alloc/free. If we add a 'test_bit'
|
||||
* equivalent, it will still need locking. Going to RCU lookup would require
|
||||
* using RCU to free bitmaps, and that's not trivial without embedding an
|
||||
* RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
|
||||
* bitmap, which is excessive.
|
||||
*/
|
||||
|
||||
#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1)
|
||||
|
||||
static int ida_get_new_above(struct ida *ida, int start)
|
||||
{
|
||||
struct radix_tree_root *root = &ida->ida_rt;
|
||||
void __rcu **slot;
|
||||
struct radix_tree_iter iter;
|
||||
struct ida_bitmap *bitmap;
|
||||
unsigned long index;
|
||||
unsigned bit, ebit;
|
||||
int new;
|
||||
|
||||
index = start / IDA_BITMAP_BITS;
|
||||
bit = start % IDA_BITMAP_BITS;
|
||||
ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT;
|
||||
|
||||
slot = radix_tree_iter_init(&iter, index);
|
||||
for (;;) {
|
||||
if (slot)
|
||||
slot = radix_tree_next_slot(slot, &iter,
|
||||
RADIX_TREE_ITER_TAGGED);
|
||||
if (!slot) {
|
||||
slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
|
||||
if (IS_ERR(slot)) {
|
||||
if (slot == ERR_PTR(-ENOMEM))
|
||||
return -EAGAIN;
|
||||
return PTR_ERR(slot);
|
||||
}
|
||||
}
|
||||
if (iter.index > index) {
|
||||
bit = 0;
|
||||
ebit = RADIX_TREE_EXCEPTIONAL_SHIFT;
|
||||
}
|
||||
new = iter.index * IDA_BITMAP_BITS;
|
||||
bitmap = rcu_dereference_raw(*slot);
|
||||
if (radix_tree_exception(bitmap)) {
|
||||
unsigned long tmp = (unsigned long)bitmap;
|
||||
ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit);
|
||||
if (ebit < BITS_PER_LONG) {
|
||||
tmp |= 1UL << ebit;
|
||||
rcu_assign_pointer(*slot, (void *)tmp);
|
||||
return new + ebit -
|
||||
RADIX_TREE_EXCEPTIONAL_SHIFT;
|
||||
}
|
||||
bitmap = this_cpu_xchg(ida_bitmap, NULL);
|
||||
if (!bitmap)
|
||||
return -EAGAIN;
|
||||
bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
|
||||
rcu_assign_pointer(*slot, bitmap);
|
||||
}
|
||||
|
||||
if (bitmap) {
|
||||
bit = find_next_zero_bit(bitmap->bitmap,
|
||||
IDA_BITMAP_BITS, bit);
|
||||
new += bit;
|
||||
if (new < 0)
|
||||
return -ENOSPC;
|
||||
if (bit == IDA_BITMAP_BITS)
|
||||
continue;
|
||||
|
||||
__set_bit(bit, bitmap->bitmap);
|
||||
if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
|
||||
radix_tree_iter_tag_clear(root, &iter,
|
||||
IDR_FREE);
|
||||
} else {
|
||||
new += bit;
|
||||
if (new < 0)
|
||||
return -ENOSPC;
|
||||
if (ebit < BITS_PER_LONG) {
|
||||
bitmap = (void *)((1UL << ebit) |
|
||||
RADIX_TREE_EXCEPTIONAL_ENTRY);
|
||||
radix_tree_iter_replace(root, &iter, slot,
|
||||
bitmap);
|
||||
return new;
|
||||
}
|
||||
bitmap = this_cpu_xchg(ida_bitmap, NULL);
|
||||
if (!bitmap)
|
||||
return -EAGAIN;
|
||||
__set_bit(bit, bitmap->bitmap);
|
||||
radix_tree_iter_replace(root, &iter, slot, bitmap);
|
||||
}
|
||||
|
||||
return new;
|
||||
}
|
||||
}
|
||||
|
||||
static void ida_remove(struct ida *ida, int id)
|
||||
{
|
||||
unsigned long index = id / IDA_BITMAP_BITS;
|
||||
unsigned offset = id % IDA_BITMAP_BITS;
|
||||
struct ida_bitmap *bitmap;
|
||||
unsigned long *btmp;
|
||||
struct radix_tree_iter iter;
|
||||
void __rcu **slot;
|
||||
|
||||
slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
|
||||
if (!slot)
|
||||
goto err;
|
||||
|
||||
bitmap = rcu_dereference_raw(*slot);
|
||||
if (radix_tree_exception(bitmap)) {
|
||||
btmp = (unsigned long *)slot;
|
||||
offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
|
||||
if (offset >= BITS_PER_LONG)
|
||||
goto err;
|
||||
} else {
|
||||
btmp = bitmap->bitmap;
|
||||
}
|
||||
if (!test_bit(offset, btmp))
|
||||
goto err;
|
||||
|
||||
__clear_bit(offset, btmp);
|
||||
radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
|
||||
if (radix_tree_exception(bitmap)) {
|
||||
if (rcu_dereference_raw(*slot) ==
|
||||
(void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
|
||||
radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
|
||||
} else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
|
||||
kfree(bitmap);
|
||||
radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
|
||||
}
|
||||
return;
|
||||
err:
|
||||
WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
|
||||
}
|
||||
|
||||
/**
|
||||
* ida_destroy() - Free all IDs.
|
||||
* @ida: IDA handle.
|
||||
*
|
||||
* Calling this function frees all IDs and releases all resources used
|
||||
* by an IDA. When this call returns, the IDA is empty and can be reused
|
||||
* or freed. If the IDA is already empty, there is no need to call this
|
||||
* function.
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
void ida_destroy(struct ida *ida)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct radix_tree_iter iter;
|
||||
void __rcu **slot;
|
||||
|
||||
xa_lock_irqsave(&ida->ida_rt, flags);
|
||||
radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
|
||||
struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
|
||||
if (!radix_tree_exception(bitmap))
|
||||
kfree(bitmap);
|
||||
radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
|
||||
}
|
||||
xa_unlock_irqrestore(&ida->ida_rt, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ida_destroy);
|
||||
|
||||
/**
|
||||
* ida_alloc_range() - Allocate an unused ID.
|
||||
* @ida: IDA handle.
|
||||
@@ -532,8 +377,10 @@ EXPORT_SYMBOL(ida_destroy);
|
||||
int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
|
||||
gfp_t gfp)
|
||||
{
|
||||
int id = 0;
|
||||
XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
|
||||
unsigned bit = min % IDA_BITMAP_BITS;
|
||||
unsigned long flags;
|
||||
struct ida_bitmap *bitmap, *alloc = NULL;
|
||||
|
||||
if ((int)min < 0)
|
||||
return -ENOSPC;
|
||||
@@ -541,22 +388,87 @@ int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
|
||||
if ((int)max < 0)
|
||||
max = INT_MAX;
|
||||
|
||||
again:
|
||||
xa_lock_irqsave(&ida->ida_rt, flags);
|
||||
id = ida_get_new_above(ida, min);
|
||||
if (id > (int)max) {
|
||||
ida_remove(ida, id);
|
||||
id = -ENOSPC;
|
||||
}
|
||||
xa_unlock_irqrestore(&ida->ida_rt, flags);
|
||||
retry:
|
||||
xas_lock_irqsave(&xas, flags);
|
||||
next:
|
||||
bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
|
||||
if (xas.xa_index > min / IDA_BITMAP_BITS)
|
||||
bit = 0;
|
||||
if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
|
||||
goto nospc;
|
||||
|
||||
if (unlikely(id == -EAGAIN)) {
|
||||
if (!ida_pre_get(ida, gfp))
|
||||
return -ENOMEM;
|
||||
goto again;
|
||||
if (xa_is_value(bitmap)) {
|
||||
unsigned long tmp = xa_to_value(bitmap);
|
||||
|
||||
if (bit < BITS_PER_XA_VALUE) {
|
||||
bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
|
||||
if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
|
||||
goto nospc;
|
||||
if (bit < BITS_PER_XA_VALUE) {
|
||||
tmp |= 1UL << bit;
|
||||
xas_store(&xas, xa_mk_value(tmp));
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
bitmap = alloc;
|
||||
if (!bitmap)
|
||||
bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
|
||||
if (!bitmap)
|
||||
goto alloc;
|
||||
bitmap->bitmap[0] = tmp;
|
||||
xas_store(&xas, bitmap);
|
||||
if (xas_error(&xas)) {
|
||||
bitmap->bitmap[0] = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
return id;
|
||||
if (bitmap) {
|
||||
bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
|
||||
if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
|
||||
goto nospc;
|
||||
if (bit == IDA_BITMAP_BITS)
|
||||
goto next;
|
||||
|
||||
__set_bit(bit, bitmap->bitmap);
|
||||
if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
|
||||
xas_clear_mark(&xas, XA_FREE_MARK);
|
||||
} else {
|
||||
if (bit < BITS_PER_XA_VALUE) {
|
||||
bitmap = xa_mk_value(1UL << bit);
|
||||
} else {
|
||||
bitmap = alloc;
|
||||
if (!bitmap)
|
||||
bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
|
||||
if (!bitmap)
|
||||
goto alloc;
|
||||
__set_bit(bit, bitmap->bitmap);
|
||||
}
|
||||
xas_store(&xas, bitmap);
|
||||
}
|
||||
out:
|
||||
xas_unlock_irqrestore(&xas, flags);
|
||||
if (xas_nomem(&xas, gfp)) {
|
||||
xas.xa_index = min / IDA_BITMAP_BITS;
|
||||
bit = min % IDA_BITMAP_BITS;
|
||||
goto retry;
|
||||
}
|
||||
if (bitmap != alloc)
|
||||
kfree(alloc);
|
||||
if (xas_error(&xas))
|
||||
return xas_error(&xas);
|
||||
return xas.xa_index * IDA_BITMAP_BITS + bit;
|
||||
alloc:
|
||||
xas_unlock_irqrestore(&xas, flags);
|
||||
alloc = kzalloc(sizeof(*bitmap), gfp);
|
||||
if (!alloc)
|
||||
return -ENOMEM;
|
||||
xas_set(&xas, min / IDA_BITMAP_BITS);
|
||||
bit = min % IDA_BITMAP_BITS;
|
||||
goto retry;
|
||||
nospc:
|
||||
xas_unlock_irqrestore(&xas, flags);
|
||||
return -ENOSPC;
|
||||
}
|
||||
EXPORT_SYMBOL(ida_alloc_range);
|
||||
|
||||
@@ -569,11 +481,112 @@ EXPORT_SYMBOL(ida_alloc_range);
|
||||
*/
|
||||
void ida_free(struct ida *ida, unsigned int id)
|
||||
{
|
||||
XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
|
||||
unsigned bit = id % IDA_BITMAP_BITS;
|
||||
struct ida_bitmap *bitmap;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON((int)id < 0);
|
||||
xa_lock_irqsave(&ida->ida_rt, flags);
|
||||
ida_remove(ida, id);
|
||||
xa_unlock_irqrestore(&ida->ida_rt, flags);
|
||||
|
||||
xas_lock_irqsave(&xas, flags);
|
||||
bitmap = xas_load(&xas);
|
||||
|
||||
if (xa_is_value(bitmap)) {
|
||||
unsigned long v = xa_to_value(bitmap);
|
||||
if (bit >= BITS_PER_XA_VALUE)
|
||||
goto err;
|
||||
if (!(v & (1UL << bit)))
|
||||
goto err;
|
||||
v &= ~(1UL << bit);
|
||||
if (!v)
|
||||
goto delete;
|
||||
xas_store(&xas, xa_mk_value(v));
|
||||
} else {
|
||||
if (!test_bit(bit, bitmap->bitmap))
|
||||
goto err;
|
||||
__clear_bit(bit, bitmap->bitmap);
|
||||
xas_set_mark(&xas, XA_FREE_MARK);
|
||||
if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
|
||||
kfree(bitmap);
|
||||
delete:
|
||||
xas_store(&xas, NULL);
|
||||
}
|
||||
}
|
||||
xas_unlock_irqrestore(&xas, flags);
|
||||
return;
|
||||
err:
|
||||
xas_unlock_irqrestore(&xas, flags);
|
||||
WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
|
||||
}
|
||||
EXPORT_SYMBOL(ida_free);
|
||||
|
||||
/**
|
||||
* ida_destroy() - Free all IDs.
|
||||
* @ida: IDA handle.
|
||||
*
|
||||
* Calling this function frees all IDs and releases all resources used
|
||||
* by an IDA. When this call returns, the IDA is empty and can be reused
|
||||
* or freed. If the IDA is already empty, there is no need to call this
|
||||
* function.
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
void ida_destroy(struct ida *ida)
|
||||
{
|
||||
XA_STATE(xas, &ida->xa, 0);
|
||||
struct ida_bitmap *bitmap;
|
||||
unsigned long flags;
|
||||
|
||||
xas_lock_irqsave(&xas, flags);
|
||||
xas_for_each(&xas, bitmap, ULONG_MAX) {
|
||||
if (!xa_is_value(bitmap))
|
||||
kfree(bitmap);
|
||||
xas_store(&xas, NULL);
|
||||
}
|
||||
xas_unlock_irqrestore(&xas, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ida_destroy);
|
||||
|
||||
#ifndef __KERNEL__
|
||||
extern void xa_dump_index(unsigned long index, unsigned int shift);
|
||||
#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
|
||||
|
||||
static void ida_dump_entry(void *entry, unsigned long index)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
if (xa_is_node(entry)) {
|
||||
struct xa_node *node = xa_to_node(entry);
|
||||
unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
|
||||
XA_CHUNK_SHIFT;
|
||||
|
||||
xa_dump_index(index * IDA_BITMAP_BITS, shift);
|
||||
xa_dump_node(node);
|
||||
for (i = 0; i < XA_CHUNK_SIZE; i++)
|
||||
ida_dump_entry(node->slots[i],
|
||||
index | (i << node->shift));
|
||||
} else if (xa_is_value(entry)) {
|
||||
xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
|
||||
pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
|
||||
} else {
|
||||
struct ida_bitmap *bitmap = entry;
|
||||
|
||||
xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
|
||||
pr_cont("bitmap: %p data", bitmap);
|
||||
for (i = 0; i < IDA_BITMAP_LONGS; i++)
|
||||
pr_cont(" %lx", bitmap->bitmap[i]);
|
||||
pr_cont("\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void ida_dump(struct ida *ida)
|
||||
{
|
||||
struct xarray *xa = &ida->xa;
|
||||
pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
|
||||
xa->xa_flags >> ROOT_TAG_SHIFT);
|
||||
ida_dump_entry(xa->xa_head, 0);
|
||||
}
|
||||
#endif
|
||||
|
834
lib/radix-tree.c
834
lib/radix-tree.c
File diff suppressed because it is too large
Load Diff
1238
lib/test_xarray.c
Normal file
1238
lib/test_xarray.c
Normal file
File diff suppressed because it is too large
Load Diff
2036
lib/xarray.c
Normal file
2036
lib/xarray.c
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user