XArray: Add support for 1s-based allocation

A lot of places want to allocate IDs starting at 1 instead of 0.
While the xa_alloc() API supports this, it's not very efficient if lots
of IDs are allocated, due to having to walk down to the bottom of the
tree to see if ID 1 is available, then all the way over to the next
non-allocated ID.  This method marks ID 0 as being occupied which wastes
one slot in the XArray, but preserves xa_empty() as working.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
Matthew Wilcox
2018-10-26 14:43:22 -04:00
szülő fd9dc93e36
commit 3ccaf57a6a
4 fájl változott, egészen pontosan 86 új sor hozzáadva és 37 régi sor törölve

Fájl megtekintése

@@ -589,64 +589,86 @@ static noinline void check_multi_store(struct xarray *xa)
#endif
}
static DEFINE_XARRAY_ALLOC(xa0);
static noinline void check_xa_alloc(void)
static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
{
int i;
u32 id;
/* An empty array should assign 0 to the first alloc */
xa_alloc_index(&xa0, 0, GFP_KERNEL);
XA_BUG_ON(xa, !xa_empty(xa));
/* An empty array should assign %base to the first alloc */
xa_alloc_index(xa, base, GFP_KERNEL);
/* Erasing it should make the array empty again */
xa_erase_index(&xa0, 0);
XA_BUG_ON(&xa0, !xa_empty(&xa0));
xa_erase_index(xa, base);
XA_BUG_ON(xa, !xa_empty(xa));
/* And it should assign 0 again */
xa_alloc_index(&xa0, 0, GFP_KERNEL);
/* And it should assign %base again */
xa_alloc_index(xa, base, GFP_KERNEL);
/* The next assigned ID should be 1 */
xa_alloc_index(&xa0, 1, GFP_KERNEL);
xa_erase_index(&xa0, 1);
/* Allocating and then erasing a lot should not lose base */
for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
xa_alloc_index(xa, i, GFP_KERNEL);
for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
xa_erase_index(xa, i);
xa_alloc_index(xa, base, GFP_KERNEL);
/* Destroying the array should do the same as erasing */
xa_destroy(xa);
/* And it should assign %base again */
xa_alloc_index(xa, base, GFP_KERNEL);
/* The next assigned ID should be base+1 */
xa_alloc_index(xa, base + 1, GFP_KERNEL);
xa_erase_index(xa, base + 1);
/* Storing a value should mark it used */
xa_store_index(&xa0, 1, GFP_KERNEL);
xa_alloc_index(&xa0, 2, GFP_KERNEL);
xa_store_index(xa, base + 1, GFP_KERNEL);
xa_alloc_index(xa, base + 2, GFP_KERNEL);
/* If we then erase 0, it should be free */
xa_erase_index(&xa0, 0);
xa_alloc_index(&xa0, 0, GFP_KERNEL);
/* If we then erase base, it should be free */
xa_erase_index(xa, base);
xa_alloc_index(xa, base, GFP_KERNEL);
xa_erase_index(&xa0, 1);
xa_erase_index(&xa0, 2);
xa_erase_index(xa, base + 1);
xa_erase_index(xa, base + 2);
for (i = 1; i < 5000; i++) {
xa_alloc_index(&xa0, i, GFP_KERNEL);
xa_alloc_index(xa, base + i, GFP_KERNEL);
}
xa_destroy(&xa0);
xa_destroy(xa);
/* Check that we fail properly at the limit of allocation */
id = 0xfffffffeU;
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id),
GFP_KERNEL) != 0);
XA_BUG_ON(&xa0, id != 0xfffffffeU);
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
XA_BUG_ON(xa, id != 0xfffffffeU);
XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id),
GFP_KERNEL) != 0);
XA_BUG_ON(&xa0, id != 0xffffffffU);
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
XA_BUG_ON(xa, id != 0xffffffffU);
XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id),
GFP_KERNEL) != -ENOSPC);
XA_BUG_ON(&xa0, id != 0xffffffffU);
xa_destroy(&xa0);
XA_BUG_ON(xa, id != 0xffffffffU);
xa_destroy(xa);
id = 10;
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id),
GFP_KERNEL) != -ENOSPC);
XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id),
GFP_KERNEL) != -ENOSPC);
xa_erase_index(&xa0, 3);
XA_BUG_ON(&xa0, !xa_empty(&xa0));
xa_erase_index(xa, 3);
XA_BUG_ON(xa, !xa_empty(xa));
}
static DEFINE_XARRAY_ALLOC(xa0);
static DEFINE_XARRAY_ALLOC1(xa1);
static noinline void check_xa_alloc(void)
{
check_xa_alloc_1(&xa0, 0);
check_xa_alloc_1(&xa1, 1);
}
static noinline void __check_store_iter(struct xarray *xa, unsigned long start,

Fájl megtekintése

@@ -57,6 +57,11 @@ static inline bool xa_track_free(const struct xarray *xa)
return xa->xa_flags & XA_FLAGS_TRACK_FREE;
}
static inline bool xa_zero_busy(const struct xarray *xa)
{
return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
}
static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
{
if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
@@ -432,6 +437,8 @@ static void xas_shrink(struct xa_state *xas)
break;
if (!xa_is_node(entry) && node->shift)
break;
if (xa_is_zero(entry) && xa_zero_busy(xa))
entry = NULL;
xas->xa_node = XAS_BOUNDS;
RCU_INIT_POINTER(xa->xa_head, entry);
@@ -628,6 +635,8 @@ static void *xas_create(struct xa_state *xas, bool allow_root)
if (xas_top(node)) {
entry = xa_head_locked(xa);
xas->xa_node = NULL;
if (!entry && xa_zero_busy(xa))
entry = XA_ZERO_ENTRY;
shift = xas_expand(xas, entry);
if (shift < 0)
return NULL;
@@ -1942,6 +1951,8 @@ void xa_destroy(struct xarray *xa)
entry = xa_head_locked(xa);
RCU_INIT_POINTER(xa->xa_head, NULL);
xas_init_marks(&xas);
if (xa_zero_busy(xa))
xa_mark_clear(xa, XA_FREE_MARK);
/* lockdep checks we're still holding the lock in xas_free_nodes() */
if (xa_is_node(entry))
xas_free_nodes(&xas, xa_to_node(entry));