Merge branch 'xarray' of git://git.infradead.org/users/willy/linux-dax

Pull XArray conversion from Matthew Wilcox:
 "The XArray provides an improved interface to the radix tree data
  structure, providing locking as part of the API, specifying GFP flags
  at allocation time, eliminating preloading, less re-walking the tree,
  more efficient iterations and not exposing RCU-protected pointers to
  its users.

  This patch set

   1. Introduces the XArray implementation

   2. Converts the pagecache to use it

   3. Converts memremap to use it

  The page cache is the most complex and important user of the radix
  tree, so converting it was most important. Converting the memremap
  code removes the only other user of the multiorder code, which allows
  us to remove the radix tree code that supported it.

  I have 40+ followup patches to convert many other users of the radix
  tree over to the XArray, but I'd like to get this part in first. The
  other conversions haven't been in linux-next and aren't suitable for
  applying yet, but you can see them in the xarray-conv branch if you're
  interested"

* 'xarray' of git://git.infradead.org/users/willy/linux-dax: (90 commits)
  radix tree: Remove multiorder support
  radix tree test: Convert multiorder tests to XArray
  radix tree tests: Convert item_delete_rcu to XArray
  radix tree tests: Convert item_kill_tree to XArray
  radix tree tests: Move item_insert_order
  radix tree test suite: Remove multiorder benchmarking
  radix tree test suite: Remove __item_insert
  memremap: Convert to XArray
  xarray: Add range store functionality
  xarray: Move multiorder_check to in-kernel tests
  xarray: Move multiorder_shrink to kernel tests
  xarray: Move multiorder account test in-kernel
  radix tree test suite: Convert iteration test to XArray
  radix tree test suite: Convert tag_tagged_items to XArray
  radix tree: Remove radix_tree_clear_tags
  radix tree: Remove radix_tree_maybe_preload_order
  radix tree: Remove split/join code
  radix tree: Remove radix_tree_update_node_t
  page cache: Finish XArray conversion
  dax: Convert page fault handlers to XArray
  ...
This commit is contained in:
Linus Torvalds
2018-10-28 11:35:40 -07:00
93 changed files with 7056 additions and 3825 deletions

View File

@@ -4,3 +4,4 @@ idr-test
main
multiorder
radix-tree.c
xarray

View File

@@ -4,8 +4,8 @@ CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
-fsanitize=undefined
LDFLAGS += -fsanitize=address -fsanitize=undefined
LDLIBS+= -lpthread -lurcu
TARGETS = main idr-test multiorder
CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
TARGETS = main idr-test multiorder xarray
CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
@@ -25,6 +25,8 @@ main: $(OFILES)
idr-test.o: ../../../lib/test_ida.c
idr-test: idr-test.o $(CORE_OFILES)
xarray: $(CORE_OFILES)
multiorder: multiorder.o $(CORE_OFILES)
clean:
@@ -35,6 +37,7 @@ vpath %.c ../../lib
$(OFILES): Makefile *.h */*.h generated/map-shift.h \
../../include/linux/*.h \
../../include/asm/*.h \
../../../include/linux/xarray.h \
../../../include/linux/radix-tree.h \
../../../include/linux/idr.h
@@ -44,8 +47,10 @@ radix-tree.c: ../../../lib/radix-tree.c
idr.c: ../../../lib/idr.c
sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
xarray.o: ../../../lib/xarray.c ../../../lib/test_xarray.c
generated/map-shift.h:
@if ! grep -qws $(SHIFT) generated/map-shift.h; then \
echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \
echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \
generated/map-shift.h; \
fi

View File

@@ -17,9 +17,6 @@
#include <time.h>
#include "test.h"
#define for_each_index(i, base, order) \
for (i = base; i < base + (1 << order); i++)
#define NSEC_PER_SEC 1000000000L
static long long benchmark_iter(struct radix_tree_root *root, bool tagged)
@@ -61,7 +58,7 @@ again:
}
static void benchmark_insert(struct radix_tree_root *root,
unsigned long size, unsigned long step, int order)
unsigned long size, unsigned long step)
{
struct timespec start, finish;
unsigned long index;
@@ -70,19 +67,19 @@ static void benchmark_insert(struct radix_tree_root *root,
clock_gettime(CLOCK_MONOTONIC, &start);
for (index = 0 ; index < size ; index += step)
item_insert_order(root, index, order);
item_insert(root, index);
clock_gettime(CLOCK_MONOTONIC, &finish);
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
(finish.tv_nsec - start.tv_nsec);
printv(2, "Size: %8ld, step: %8ld, order: %d, insertion: %15lld ns\n",
size, step, order, nsec);
printv(2, "Size: %8ld, step: %8ld, insertion: %15lld ns\n",
size, step, nsec);
}
static void benchmark_tagging(struct radix_tree_root *root,
unsigned long size, unsigned long step, int order)
unsigned long size, unsigned long step)
{
struct timespec start, finish;
unsigned long index;
@@ -98,138 +95,53 @@ static void benchmark_tagging(struct radix_tree_root *root,
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
(finish.tv_nsec - start.tv_nsec);
printv(2, "Size: %8ld, step: %8ld, order: %d, tagging: %17lld ns\n",
size, step, order, nsec);
printv(2, "Size: %8ld, step: %8ld, tagging: %17lld ns\n",
size, step, nsec);
}
static void benchmark_delete(struct radix_tree_root *root,
unsigned long size, unsigned long step, int order)
unsigned long size, unsigned long step)
{
struct timespec start, finish;
unsigned long index, i;
unsigned long index;
long long nsec;
clock_gettime(CLOCK_MONOTONIC, &start);
for (index = 0 ; index < size ; index += step)
for_each_index(i, index, order)
item_delete(root, i);
item_delete(root, index);
clock_gettime(CLOCK_MONOTONIC, &finish);
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
(finish.tv_nsec - start.tv_nsec);
printv(2, "Size: %8ld, step: %8ld, order: %d, deletion: %16lld ns\n",
size, step, order, nsec);
printv(2, "Size: %8ld, step: %8ld, deletion: %16lld ns\n",
size, step, nsec);
}
static void benchmark_size(unsigned long size, unsigned long step, int order)
static void benchmark_size(unsigned long size, unsigned long step)
{
RADIX_TREE(tree, GFP_KERNEL);
long long normal, tagged;
benchmark_insert(&tree, size, step, order);
benchmark_tagging(&tree, size, step, order);
benchmark_insert(&tree, size, step);
benchmark_tagging(&tree, size, step);
tagged = benchmark_iter(&tree, true);
normal = benchmark_iter(&tree, false);
printv(2, "Size: %8ld, step: %8ld, order: %d, tagged iteration: %8lld ns\n",
size, step, order, tagged);
printv(2, "Size: %8ld, step: %8ld, order: %d, normal iteration: %8lld ns\n",
size, step, order, normal);
printv(2, "Size: %8ld, step: %8ld, tagged iteration: %8lld ns\n",
size, step, tagged);
printv(2, "Size: %8ld, step: %8ld, normal iteration: %8lld ns\n",
size, step, normal);
benchmark_delete(&tree, size, step, order);
benchmark_delete(&tree, size, step);
item_kill_tree(&tree);
rcu_barrier();
}
static long long __benchmark_split(unsigned long index,
int old_order, int new_order)
{
struct timespec start, finish;
long long nsec;
RADIX_TREE(tree, GFP_ATOMIC);
item_insert_order(&tree, index, old_order);
clock_gettime(CLOCK_MONOTONIC, &start);
radix_tree_split(&tree, index, new_order);
clock_gettime(CLOCK_MONOTONIC, &finish);
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
(finish.tv_nsec - start.tv_nsec);
item_kill_tree(&tree);
return nsec;
}
static void benchmark_split(unsigned long size, unsigned long step)
{
int i, j, idx;
long long nsec = 0;
for (idx = 0; idx < size; idx += step) {
for (i = 3; i < 11; i++) {
for (j = 0; j < i; j++) {
nsec += __benchmark_split(idx, i, j);
}
}
}
printv(2, "Size %8ld, step %8ld, split time %10lld ns\n",
size, step, nsec);
}
static long long __benchmark_join(unsigned long index,
unsigned order1, unsigned order2)
{
unsigned long loc;
struct timespec start, finish;
long long nsec;
void *item, *item2 = item_create(index + 1, order1);
RADIX_TREE(tree, GFP_KERNEL);
item_insert_order(&tree, index, order2);
item = radix_tree_lookup(&tree, index);
clock_gettime(CLOCK_MONOTONIC, &start);
radix_tree_join(&tree, index + 1, order1, item2);
clock_gettime(CLOCK_MONOTONIC, &finish);
nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
(finish.tv_nsec - start.tv_nsec);
loc = find_item(&tree, item);
if (loc == -1)
free(item);
item_kill_tree(&tree);
return nsec;
}
static void benchmark_join(unsigned long step)
{
int i, j, idx;
long long nsec = 0;
for (idx = 0; idx < 1 << 10; idx += step) {
for (i = 1; i < 15; i++) {
for (j = 0; j < i; j++) {
nsec += __benchmark_join(idx, i, j);
}
}
}
printv(2, "Size %8d, step %8ld, join time %10lld ns\n",
1 << 10, step, nsec);
}
void benchmark(void)
{
unsigned long size[] = {1 << 10, 1 << 20, 0};
@@ -242,16 +154,5 @@ void benchmark(void)
for (c = 0; size[c]; c++)
for (s = 0; step[s]; s++)
benchmark_size(size[c], step[s], 0);
for (c = 0; size[c]; c++)
for (s = 0; step[s]; s++)
benchmark_size(size[c], step[s] << 9, 9);
for (c = 0; size[c]; c++)
for (s = 0; step[s]; s++)
benchmark_split(size[c], step[s]);
for (s = 0; step[s]; s++)
benchmark_join(step[s]);
benchmark_size(size[c], step[s]);
}

View File

@@ -0,0 +1,23 @@
/* lib/bitmap.c pulls in at least two other files. */
#include <linux/bitmap.h>
void bitmap_clear(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
const unsigned int size = start + len;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (len - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
len -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (len) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
}

View File

@@ -1 +1 @@
#define CONFIG_RADIX_TREE_MULTIORDER 1
#define CONFIG_XARRAY_MULTI 1

View File

@@ -19,7 +19,7 @@
#include "test.h"
#define DUMMY_PTR ((void *)0x12)
#define DUMMY_PTR ((void *)0x10)
int item_idr_free(int id, void *p, void *data)
{
@@ -227,6 +227,66 @@ void idr_u32_test(int base)
idr_u32_test1(&idr, 0xffffffff);
}
static void idr_align_test(struct idr *idr)
{
char name[] = "Motorola 68000";
int i, id;
void *entry;
for (i = 0; i < 9; i++) {
BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i);
idr_for_each_entry(idr, entry, id);
}
idr_destroy(idr);
for (i = 1; i < 10; i++) {
BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1);
idr_for_each_entry(idr, entry, id);
}
idr_destroy(idr);
for (i = 2; i < 11; i++) {
BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2);
idr_for_each_entry(idr, entry, id);
}
idr_destroy(idr);
for (i = 3; i < 12; i++) {
BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3);
idr_for_each_entry(idr, entry, id);
}
idr_destroy(idr);
for (i = 0; i < 8; i++) {
BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1);
idr_for_each_entry(idr, entry, id);
idr_remove(idr, 1);
idr_for_each_entry(idr, entry, id);
idr_remove(idr, 0);
BUG_ON(!idr_is_empty(idr));
}
for (i = 0; i < 8; i++) {
BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0);
idr_for_each_entry(idr, entry, id);
idr_replace(idr, &name[i], 0);
idr_for_each_entry(idr, entry, id);
BUG_ON(idr_find(idr, 0) != &name[i]);
idr_remove(idr, 0);
}
for (i = 0; i < 8; i++) {
BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1);
idr_remove(idr, 1);
idr_for_each_entry(idr, entry, id);
idr_replace(idr, &name[i + 1], 0);
idr_for_each_entry(idr, entry, id);
idr_remove(idr, 0);
}
}
void idr_checks(void)
{
unsigned long i;
@@ -307,6 +367,7 @@ void idr_checks(void)
idr_u32_test(4);
idr_u32_test(1);
idr_u32_test(0);
idr_align_test(&idr);
}
#define module_init(x)
@@ -344,16 +405,16 @@ void ida_check_conv_user(void)
DEFINE_IDA(ida);
unsigned long i;
radix_tree_cpu_dead(1);
for (i = 0; i < 1000000; i++) {
int id = ida_alloc(&ida, GFP_NOWAIT);
if (id == -ENOMEM) {
IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) !=
BITS_PER_LONG - 2);
IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) !=
BITS_PER_XA_VALUE) &&
((i % IDA_BITMAP_BITS) != 0));
id = ida_alloc(&ida, GFP_KERNEL);
} else {
IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) ==
BITS_PER_LONG - 2);
BITS_PER_XA_VALUE);
}
IDA_BUG_ON(&ida, id != i);
}

View File

@@ -1,5 +1,5 @@
/*
* iteration_check.c: test races having to do with radix tree iteration
* iteration_check.c: test races having to do with xarray iteration
* Copyright (c) 2016 Intel Corporation
* Author: Ross Zwisler <ross.zwisler@linux.intel.com>
*
@@ -12,41 +12,54 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/radix-tree.h>
#include <pthread.h>
#include "test.h"
#define NUM_THREADS 5
#define MAX_IDX 100
#define TAG 0
#define NEW_TAG 1
#define TAG XA_MARK_0
#define NEW_TAG XA_MARK_1
static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_t threads[NUM_THREADS];
static unsigned int seeds[3];
static RADIX_TREE(tree, GFP_KERNEL);
static DEFINE_XARRAY(array);
static bool test_complete;
static int max_order;
/* relentlessly fill the tree with tagged entries */
void my_item_insert(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
struct item *item = item_create(index, 0);
int order;
retry:
xas_lock(&xas);
for (order = max_order; order >= 0; order--) {
xas_set_order(&xas, index, order);
item->order = order;
if (xas_find_conflict(&xas))
continue;
xas_store(&xas, item);
xas_set_mark(&xas, TAG);
break;
}
xas_unlock(&xas);
if (xas_nomem(&xas, GFP_KERNEL))
goto retry;
if (order < 0)
free(item);
}
/* relentlessly fill the array with tagged entries */
static void *add_entries_fn(void *arg)
{
rcu_register_thread();
while (!test_complete) {
unsigned long pgoff;
int order;
for (pgoff = 0; pgoff < MAX_IDX; pgoff++) {
pthread_mutex_lock(&tree_lock);
for (order = max_order; order >= 0; order--) {
if (item_insert_order(&tree, pgoff, order)
== 0) {
item_tag_set(&tree, pgoff, TAG);
break;
}
}
pthread_mutex_unlock(&tree_lock);
my_item_insert(&array, pgoff);
}
}
@@ -56,33 +69,25 @@ static void *add_entries_fn(void *arg)
}
/*
* Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find
* things that have been removed and randomly resetting our iteration to the
* next chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and
* radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
* NULL 'slot' variable.
* Iterate over tagged entries, retrying when we find ourselves in a deleted
* node and randomly pausing the iteration.
*/
static void *tagged_iteration_fn(void *arg)
{
struct radix_tree_iter iter;
void **slot;
XA_STATE(xas, &array, 0);
void *entry;
rcu_register_thread();
while (!test_complete) {
xas_set(&xas, 0);
rcu_read_lock();
radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) {
void *entry = radix_tree_deref_slot(slot);
if (unlikely(!entry))
xas_for_each_marked(&xas, entry, ULONG_MAX, TAG) {
if (xas_retry(&xas, entry))
continue;
if (radix_tree_deref_retry(entry)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
if (rand_r(&seeds[0]) % 50 == 0) {
slot = radix_tree_iter_resume(slot, &iter);
xas_pause(&xas);
rcu_read_unlock();
rcu_barrier();
rcu_read_lock();
@@ -97,33 +102,25 @@ static void *tagged_iteration_fn(void *arg)
}
/*
* Iterate over the entries, doing a radix_tree_iter_retry() as we find things
* that have been removed and randomly resetting our iteration to the next
* chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and
* radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
* NULL 'slot' variable.
* Iterate over the entries, retrying when we find ourselves in a deleted
* node and randomly pausing the iteration.
*/
static void *untagged_iteration_fn(void *arg)
{
struct radix_tree_iter iter;
void **slot;
XA_STATE(xas, &array, 0);
void *entry;
rcu_register_thread();
while (!test_complete) {
xas_set(&xas, 0);
rcu_read_lock();
radix_tree_for_each_slot(slot, &tree, &iter, 0) {
void *entry = radix_tree_deref_slot(slot);
if (unlikely(!entry))
xas_for_each(&xas, entry, ULONG_MAX) {
if (xas_retry(&xas, entry))
continue;
if (radix_tree_deref_retry(entry)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
if (rand_r(&seeds[1]) % 50 == 0) {
slot = radix_tree_iter_resume(slot, &iter);
xas_pause(&xas);
rcu_read_unlock();
rcu_barrier();
rcu_read_lock();
@@ -138,7 +135,7 @@ static void *untagged_iteration_fn(void *arg)
}
/*
* Randomly remove entries to help induce radix_tree_iter_retry() calls in the
* Randomly remove entries to help induce retries in the
* two iteration functions.
*/
static void *remove_entries_fn(void *arg)
@@ -147,12 +144,13 @@ static void *remove_entries_fn(void *arg)
while (!test_complete) {
int pgoff;
struct item *item;
pgoff = rand_r(&seeds[2]) % MAX_IDX;
pthread_mutex_lock(&tree_lock);
item_delete(&tree, pgoff);
pthread_mutex_unlock(&tree_lock);
item = xa_erase(&array, pgoff);
if (item)
item_free(item, pgoff);
}
rcu_unregister_thread();
@@ -165,8 +163,7 @@ static void *tag_entries_fn(void *arg)
rcu_register_thread();
while (!test_complete) {
tag_tagged_items(&tree, &tree_lock, 0, MAX_IDX, 10, TAG,
NEW_TAG);
tag_tagged_items(&array, 0, MAX_IDX, 10, TAG, NEW_TAG);
}
rcu_unregister_thread();
return NULL;
@@ -217,5 +214,5 @@ void iteration_test(unsigned order, unsigned test_duration)
}
}
item_kill_tree(&tree);
item_kill_tree(&array);
}

View File

@@ -1 +1,2 @@
#include <stdio.h>
#include "asm/bug.h"

View File

@@ -0,0 +1 @@
#include "../../../../include/linux/kconfig.h"

View File

@@ -14,7 +14,12 @@
#include "../../../include/linux/kconfig.h"
#define printk printf
#define pr_info printk
#define pr_debug printk
#define pr_cont printk
#define __acquires(x)
#define __releases(x)
#define __must_hold(x)
#endif /* _KERNEL_H */

View File

@@ -0,0 +1,11 @@
#ifndef _LINUX_LOCKDEP_H
#define _LINUX_LOCKDEP_H
struct lock_class_key {
unsigned int a;
};
static inline void lockdep_set_class(spinlock_t *lock,
struct lock_class_key *key)
{
}
#endif /* _LINUX_LOCKDEP_H */

View File

@@ -2,7 +2,6 @@
#ifndef _TEST_RADIX_TREE_H
#define _TEST_RADIX_TREE_H
#include "generated/map-shift.h"
#include "../../../../include/linux/radix-tree.h"
extern int kmalloc_verbose;

View File

@@ -6,5 +6,7 @@
#define rcu_dereference_raw(p) rcu_dereference(p)
#define rcu_dereference_protected(p, cond) rcu_dereference(p)
#define rcu_dereference_check(p, cond) rcu_dereference(p)
#define RCU_INIT_POINTER(p, v) (p) = (v)
#endif

View File

@@ -214,7 +214,7 @@ void copy_tag_check(void)
}
// printf("\ncopying tags...\n");
tagged = tag_tagged_items(&tree, NULL, start, end, ITEMS, 0, 1);
tagged = tag_tagged_items(&tree, start, end, ITEMS, XA_MARK_0, XA_MARK_1);
// printf("checking copied tags\n");
assert(tagged == count);
@@ -223,7 +223,7 @@ void copy_tag_check(void)
/* Copy tags in several rounds */
// printf("\ncopying tags...\n");
tmp = rand() % (count / 10 + 2);
tagged = tag_tagged_items(&tree, NULL, start, end, tmp, 0, 2);
tagged = tag_tagged_items(&tree, start, end, tmp, XA_MARK_0, XA_MARK_2);
assert(tagged == count);
// printf("%lu %lu %lu\n", tagged, tmp, count);
@@ -236,63 +236,6 @@ void copy_tag_check(void)
item_kill_tree(&tree);
}
static void __locate_check(struct radix_tree_root *tree, unsigned long index,
unsigned order)
{
struct item *item;
unsigned long index2;
item_insert_order(tree, index, order);
item = item_lookup(tree, index);
index2 = find_item(tree, item);
if (index != index2) {
printv(2, "index %ld order %d inserted; found %ld\n",
index, order, index2);
abort();
}
}
static void __order_0_locate_check(void)
{
RADIX_TREE(tree, GFP_KERNEL);
int i;
for (i = 0; i < 50; i++)
__locate_check(&tree, rand() % INT_MAX, 0);
item_kill_tree(&tree);
}
static void locate_check(void)
{
RADIX_TREE(tree, GFP_KERNEL);
unsigned order;
unsigned long offset, index;
__order_0_locate_check();
for (order = 0; order < 20; order++) {
for (offset = 0; offset < (1 << (order + 3));
offset += (1UL << order)) {
for (index = 0; index < (1UL << (order + 5));
index += (1UL << order)) {
__locate_check(&tree, index + offset, order);
}
if (find_item(&tree, &tree) != -1)
abort();
item_kill_tree(&tree);
}
}
if (find_item(&tree, &tree) != -1)
abort();
__locate_check(&tree, -1, 0);
if (find_item(&tree, &tree) != -1)
abort();
item_kill_tree(&tree);
}
static void single_thread_tests(bool long_run)
{
int i;
@@ -303,10 +246,6 @@ static void single_thread_tests(bool long_run)
rcu_barrier();
printv(2, "after multiorder_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count);
locate_check();
rcu_barrier();
printv(2, "after locate_check: %d allocated, preempt %d\n",
nr_allocated, preempt_count);
tag_check();
rcu_barrier();
printv(2, "after tag_check: %d allocated, preempt %d\n",
@@ -365,6 +304,7 @@ int main(int argc, char **argv)
rcu_register_thread();
radix_tree_init();
xarray_tests();
regression1_test();
regression2_test();
regression3_test();

View File

@@ -20,230 +20,39 @@
#include "test.h"
#define for_each_index(i, base, order) \
for (i = base; i < base + (1 << order); i++)
static void __multiorder_tag_test(int index, int order)
static int item_insert_order(struct xarray *xa, unsigned long index,
unsigned order)
{
RADIX_TREE(tree, GFP_KERNEL);
int base, err, i;
XA_STATE_ORDER(xas, xa, index, order);
struct item *item = item_create(index, order);
/* our canonical entry */
base = index & ~((1 << order) - 1);
do {
xas_lock(&xas);
xas_store(&xas, item);
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));
printv(2, "Multiorder tag test with index %d, canonical entry %d\n",
index, base);
if (!xas_error(&xas))
return 0;
err = item_insert_order(&tree, index, order);
assert(!err);
/*
* Verify we get collisions for covered indices. We try and fail to
* insert an exceptional entry so we don't leak memory via
* item_insert_order().
*/
for_each_index(i, base, order) {
err = __radix_tree_insert(&tree, i, order,
(void *)(0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY));
assert(err == -EEXIST);
}
for_each_index(i, base, order) {
assert(!radix_tree_tag_get(&tree, i, 0));
assert(!radix_tree_tag_get(&tree, i, 1));
}
assert(radix_tree_tag_set(&tree, index, 0));
for_each_index(i, base, order) {
assert(radix_tree_tag_get(&tree, i, 0));
assert(!radix_tree_tag_get(&tree, i, 1));
}
assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 1);
assert(radix_tree_tag_clear(&tree, index, 0));
for_each_index(i, base, order) {
assert(!radix_tree_tag_get(&tree, i, 0));
assert(radix_tree_tag_get(&tree, i, 1));
}
assert(radix_tree_tag_clear(&tree, index, 1));
assert(!radix_tree_tagged(&tree, 0));
assert(!radix_tree_tagged(&tree, 1));
item_kill_tree(&tree);
free(item);
return xas_error(&xas);
}
static void __multiorder_tag_test2(unsigned order, unsigned long index2)
void multiorder_iteration(struct xarray *xa)
{
RADIX_TREE(tree, GFP_KERNEL);
unsigned long index = (1 << order);
index2 += index;
assert(item_insert_order(&tree, 0, order) == 0);
assert(item_insert(&tree, index2) == 0);
assert(radix_tree_tag_set(&tree, 0, 0));
assert(radix_tree_tag_set(&tree, index2, 0));
assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 2);
item_kill_tree(&tree);
}
static void multiorder_tag_tests(void)
{
int i, j;
/* test multi-order entry for indices 0-7 with no sibling pointers */
__multiorder_tag_test(0, 3);
__multiorder_tag_test(5, 3);
/* test multi-order entry for indices 8-15 with no sibling pointers */
__multiorder_tag_test(8, 3);
__multiorder_tag_test(15, 3);
/*
* Our order 5 entry covers indices 0-31 in a tree with height=2.
* This is broken up as follows:
* 0-7: canonical entry
* 8-15: sibling 1
* 16-23: sibling 2
* 24-31: sibling 3
*/
__multiorder_tag_test(0, 5);
__multiorder_tag_test(29, 5);
/* same test, but with indices 32-63 */
__multiorder_tag_test(32, 5);
__multiorder_tag_test(44, 5);
/*
* Our order 8 entry covers indices 0-255 in a tree with height=3.
* This is broken up as follows:
* 0-63: canonical entry
* 64-127: sibling 1
* 128-191: sibling 2
* 192-255: sibling 3
*/
__multiorder_tag_test(0, 8);
__multiorder_tag_test(190, 8);
/* same test, but with indices 256-511 */
__multiorder_tag_test(256, 8);
__multiorder_tag_test(300, 8);
__multiorder_tag_test(0x12345678UL, 8);
for (i = 1; i < 10; i++)
for (j = 0; j < (10 << i); j++)
__multiorder_tag_test2(i, j);
}
static void multiorder_check(unsigned long index, int order)
{
unsigned long i;
unsigned long min = index & ~((1UL << order) - 1);
unsigned long max = min + (1UL << order);
void **slot;
struct item *item2 = item_create(min, order);
RADIX_TREE(tree, GFP_KERNEL);
printv(2, "Multiorder index %ld, order %d\n", index, order);
assert(item_insert_order(&tree, index, order) == 0);
for (i = min; i < max; i++) {
struct item *item = item_lookup(&tree, i);
assert(item != 0);
assert(item->index == index);
}
for (i = 0; i < min; i++)
item_check_absent(&tree, i);
for (i = max; i < 2*max; i++)
item_check_absent(&tree, i);
for (i = min; i < max; i++)
assert(radix_tree_insert(&tree, i, item2) == -EEXIST);
slot = radix_tree_lookup_slot(&tree, index);
free(*slot);
radix_tree_replace_slot(&tree, slot, item2);
for (i = min; i < max; i++) {
struct item *item = item_lookup(&tree, i);
assert(item != 0);
assert(item->index == min);
}
assert(item_delete(&tree, min) != 0);
for (i = 0; i < 2*max; i++)
item_check_absent(&tree, i);
}
static void multiorder_shrink(unsigned long index, int order)
{
unsigned long i;
unsigned long max = 1 << order;
RADIX_TREE(tree, GFP_KERNEL);
struct radix_tree_node *node;
printv(2, "Multiorder shrink index %ld, order %d\n", index, order);
assert(item_insert_order(&tree, 0, order) == 0);
node = tree.rnode;
assert(item_insert(&tree, index) == 0);
assert(node != tree.rnode);
assert(item_delete(&tree, index) != 0);
assert(node == tree.rnode);
for (i = 0; i < max; i++) {
struct item *item = item_lookup(&tree, i);
assert(item != 0);
assert(item->index == 0);
}
for (i = max; i < 2*max; i++)
item_check_absent(&tree, i);
if (!item_delete(&tree, 0)) {
printv(2, "failed to delete index %ld (order %d)\n", index, order);
abort();
}
for (i = 0; i < 2*max; i++)
item_check_absent(&tree, i);
}
static void multiorder_insert_bug(void)
{
RADIX_TREE(tree, GFP_KERNEL);
item_insert(&tree, 0);
radix_tree_tag_set(&tree, 0, 0);
item_insert_order(&tree, 3 << 6, 6);
item_kill_tree(&tree);
}
void multiorder_iteration(void)
{
RADIX_TREE(tree, GFP_KERNEL);
struct radix_tree_iter iter;
void **slot;
XA_STATE(xas, xa, 0);
struct item *item;
int i, j, err;
printv(1, "Multiorder iteration test\n");
#define NUM_ENTRIES 11
int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7};
printv(1, "Multiorder iteration test\n");
for (i = 0; i < NUM_ENTRIES; i++) {
err = item_insert_order(&tree, index[i], order[i]);
err = item_insert_order(xa, index[i], order[i]);
assert(!err);
}
@@ -252,14 +61,14 @@ void multiorder_iteration(void)
if (j <= (index[i] | ((1 << order[i]) - 1)))
break;
radix_tree_for_each_slot(slot, &tree, &iter, j) {
int height = order[i] / RADIX_TREE_MAP_SHIFT;
int shift = height * RADIX_TREE_MAP_SHIFT;
xas_set(&xas, j);
xas_for_each(&xas, item, ULONG_MAX) {
int height = order[i] / XA_CHUNK_SHIFT;
int shift = height * XA_CHUNK_SHIFT;
unsigned long mask = (1UL << order[i]) - 1;
struct item *item = *slot;
assert((iter.index | mask) == (index[i] | mask));
assert(iter.shift == shift);
assert((xas.xa_index | mask) == (index[i] | mask));
assert(xas.xa_node->shift == shift);
assert(!radix_tree_is_internal_node(item));
assert((item->index | mask) == (index[i] | mask));
assert(item->order == order[i]);
@@ -267,18 +76,15 @@ void multiorder_iteration(void)
}
}
item_kill_tree(&tree);
item_kill_tree(xa);
}
void multiorder_tagged_iteration(void)
void multiorder_tagged_iteration(struct xarray *xa)
{
RADIX_TREE(tree, GFP_KERNEL);
struct radix_tree_iter iter;
void **slot;
XA_STATE(xas, xa, 0);
struct item *item;
int i, j;
printv(1, "Multiorder tagged iteration test\n");
#define MT_NUM_ENTRIES 9
int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7};
@@ -286,13 +92,15 @@ void multiorder_tagged_iteration(void)
#define TAG_ENTRIES 7
int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128};
for (i = 0; i < MT_NUM_ENTRIES; i++)
assert(!item_insert_order(&tree, index[i], order[i]));
printv(1, "Multiorder tagged iteration test\n");
assert(!radix_tree_tagged(&tree, 1));
for (i = 0; i < MT_NUM_ENTRIES; i++)
assert(!item_insert_order(xa, index[i], order[i]));
assert(!xa_marked(xa, XA_MARK_1));
for (i = 0; i < TAG_ENTRIES; i++)
assert(radix_tree_tag_set(&tree, tag_index[i], 1));
xa_set_mark(xa, tag_index[i], XA_MARK_1);
for (j = 0; j < 256; j++) {
int k;
@@ -304,23 +112,23 @@ void multiorder_tagged_iteration(void)
break;
}
radix_tree_for_each_tagged(slot, &tree, &iter, j, 1) {
xas_set(&xas, j);
xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_1) {
unsigned long mask;
struct item *item = *slot;
for (k = i; index[k] < tag_index[i]; k++)
;
mask = (1UL << order[k]) - 1;
assert((iter.index | mask) == (tag_index[i] | mask));
assert(!radix_tree_is_internal_node(item));
assert((xas.xa_index | mask) == (tag_index[i] | mask));
assert(!xa_is_internal(item));
assert((item->index | mask) == (tag_index[i] | mask));
assert(item->order == order[k]);
i++;
}
}
assert(tag_tagged_items(&tree, NULL, 0, ~0UL, TAG_ENTRIES, 1, 2) ==
TAG_ENTRIES);
assert(tag_tagged_items(xa, 0, ULONG_MAX, TAG_ENTRIES, XA_MARK_1,
XA_MARK_2) == TAG_ENTRIES);
for (j = 0; j < 256; j++) {
int mask, k;
@@ -332,297 +140,31 @@ void multiorder_tagged_iteration(void)
break;
}
radix_tree_for_each_tagged(slot, &tree, &iter, j, 2) {
struct item *item = *slot;
xas_set(&xas, j);
xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_2) {
for (k = i; index[k] < tag_index[i]; k++)
;
mask = (1 << order[k]) - 1;
assert((iter.index | mask) == (tag_index[i] | mask));
assert(!radix_tree_is_internal_node(item));
assert((xas.xa_index | mask) == (tag_index[i] | mask));
assert(!xa_is_internal(item));
assert((item->index | mask) == (tag_index[i] | mask));
assert(item->order == order[k]);
i++;
}
}
assert(tag_tagged_items(&tree, NULL, 1, ~0UL, MT_NUM_ENTRIES * 2, 1, 0)
== TAG_ENTRIES);
assert(tag_tagged_items(xa, 1, ULONG_MAX, MT_NUM_ENTRIES * 2, XA_MARK_1,
XA_MARK_0) == TAG_ENTRIES);
i = 0;
radix_tree_for_each_tagged(slot, &tree, &iter, 0, 0) {
assert(iter.index == tag_index[i]);
xas_set(&xas, 0);
xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_0) {
assert(xas.xa_index == tag_index[i]);
i++;
}
assert(i == TAG_ENTRIES);
item_kill_tree(&tree);
}
/*
* Basic join checks: make sure we can't find an entry in the tree after
* a larger entry has replaced it
*/
static void multiorder_join1(unsigned long index,
unsigned order1, unsigned order2)
{
unsigned long loc;
void *item, *item2 = item_create(index + 1, order1);
RADIX_TREE(tree, GFP_KERNEL);
item_insert_order(&tree, index, order2);
item = radix_tree_lookup(&tree, index);
radix_tree_join(&tree, index + 1, order1, item2);
loc = find_item(&tree, item);
if (loc == -1)
free(item);
item = radix_tree_lookup(&tree, index + 1);
assert(item == item2);
item_kill_tree(&tree);
}
/*
* Check that the accounting of exceptional entries is handled correctly
* by joining an exceptional entry to a normal pointer.
*/
static void multiorder_join2(unsigned order1, unsigned order2)
{
RADIX_TREE(tree, GFP_KERNEL);
struct radix_tree_node *node;
void *item1 = item_create(0, order1);
void *item2;
item_insert_order(&tree, 0, order2);
radix_tree_insert(&tree, 1 << order2, (void *)0x12UL);
item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
assert(item2 == (void *)0x12UL);
assert(node->exceptional == 1);
item2 = radix_tree_lookup(&tree, 0);
free(item2);
radix_tree_join(&tree, 0, order1, item1);
item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
assert(item2 == item1);
assert(node->exceptional == 0);
item_kill_tree(&tree);
}
/*
* This test revealed an accounting bug for exceptional entries at one point.
* Nodes were being freed back into the pool with an elevated exception count
* by radix_tree_join() and then radix_tree_split() was failing to zero the
* count of exceptional entries.
*/
static void multiorder_join3(unsigned int order)
{
RADIX_TREE(tree, GFP_KERNEL);
struct radix_tree_node *node;
void **slot;
struct radix_tree_iter iter;
unsigned long i;
for (i = 0; i < (1 << order); i++) {
radix_tree_insert(&tree, i, (void *)0x12UL);
}
radix_tree_join(&tree, 0, order, (void *)0x16UL);
rcu_barrier();
radix_tree_split(&tree, 0, 0);
radix_tree_for_each_slot(slot, &tree, &iter, 0) {
radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL);
}
__radix_tree_lookup(&tree, 0, &node, NULL);
assert(node->exceptional == node->count);
item_kill_tree(&tree);
}
static void multiorder_join(void)
{
int i, j, idx;
for (idx = 0; idx < 1024; idx = idx * 2 + 3) {
for (i = 1; i < 15; i++) {
for (j = 0; j < i; j++) {
multiorder_join1(idx, i, j);
}
}
}
for (i = 1; i < 15; i++) {
for (j = 0; j < i; j++) {
multiorder_join2(i, j);
}
}
for (i = 3; i < 10; i++) {
multiorder_join3(i);
}
}
static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
{
struct radix_tree_preload *rtp = &radix_tree_preloads;
if (rtp->nr != 0)
printv(2, "split(%u %u) remaining %u\n", old_order, new_order,
rtp->nr);
/*
* Can't check for equality here as some nodes may have been
* RCU-freed while we ran. But we should never finish with more
* nodes allocated since they should have all been preloaded.
*/
if (nr_allocated > alloc)
printv(2, "split(%u %u) allocated %u %u\n", old_order, new_order,
alloc, nr_allocated);
}
static void __multiorder_split(int old_order, int new_order)
{
RADIX_TREE(tree, GFP_ATOMIC);
void **slot;
struct radix_tree_iter iter;
unsigned alloc;
struct item *item;
radix_tree_preload(GFP_KERNEL);
assert(item_insert_order(&tree, 0, old_order) == 0);
radix_tree_preload_end();
/* Wipe out the preloaded cache or it'll confuse check_mem() */
radix_tree_cpu_dead(0);
item = radix_tree_tag_set(&tree, 0, 2);
radix_tree_split_preload(old_order, new_order, GFP_KERNEL);
alloc = nr_allocated;
radix_tree_split(&tree, 0, new_order);
check_mem(old_order, new_order, alloc);
radix_tree_for_each_slot(slot, &tree, &iter, 0) {
radix_tree_iter_replace(&tree, &iter, slot,
item_create(iter.index, new_order));
}
radix_tree_preload_end();
item_kill_tree(&tree);
free(item);
}
static void __multiorder_split2(int old_order, int new_order)
{
RADIX_TREE(tree, GFP_KERNEL);
void **slot;
struct radix_tree_iter iter;
struct radix_tree_node *node;
void *item;
__radix_tree_insert(&tree, 0, old_order, (void *)0x12);
item = __radix_tree_lookup(&tree, 0, &node, NULL);
assert(item == (void *)0x12);
assert(node->exceptional > 0);
radix_tree_split(&tree, 0, new_order);
radix_tree_for_each_slot(slot, &tree, &iter, 0) {
radix_tree_iter_replace(&tree, &iter, slot,
item_create(iter.index, new_order));
}
item = __radix_tree_lookup(&tree, 0, &node, NULL);
assert(item != (void *)0x12);
assert(node->exceptional == 0);
item_kill_tree(&tree);
}
static void __multiorder_split3(int old_order, int new_order)
{
RADIX_TREE(tree, GFP_KERNEL);
void **slot;
struct radix_tree_iter iter;
struct radix_tree_node *node;
void *item;
__radix_tree_insert(&tree, 0, old_order, (void *)0x12);
item = __radix_tree_lookup(&tree, 0, &node, NULL);
assert(item == (void *)0x12);
assert(node->exceptional > 0);
radix_tree_split(&tree, 0, new_order);
radix_tree_for_each_slot(slot, &tree, &iter, 0) {
radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16);
}
item = __radix_tree_lookup(&tree, 0, &node, NULL);
assert(item == (void *)0x16);
assert(node->exceptional > 0);
item_kill_tree(&tree);
__radix_tree_insert(&tree, 0, old_order, (void *)0x12);
item = __radix_tree_lookup(&tree, 0, &node, NULL);
assert(item == (void *)0x12);
assert(node->exceptional > 0);
radix_tree_split(&tree, 0, new_order);
radix_tree_for_each_slot(slot, &tree, &iter, 0) {
if (iter.index == (1 << new_order))
radix_tree_iter_replace(&tree, &iter, slot,
(void *)0x16);
else
radix_tree_iter_replace(&tree, &iter, slot, NULL);
}
item = __radix_tree_lookup(&tree, 1 << new_order, &node, NULL);
assert(item == (void *)0x16);
assert(node->count == node->exceptional);
do {
node = node->parent;
if (!node)
break;
assert(node->count == 1);
assert(node->exceptional == 0);
} while (1);
item_kill_tree(&tree);
}
static void multiorder_split(void)
{
int i, j;
for (i = 3; i < 11; i++)
for (j = 0; j < i; j++) {
__multiorder_split(i, j);
__multiorder_split2(i, j);
__multiorder_split3(i, j);
}
}
static void multiorder_account(void)
{
RADIX_TREE(tree, GFP_KERNEL);
struct radix_tree_node *node;
void **slot;
item_insert_order(&tree, 0, 5);
__radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
__radix_tree_lookup(&tree, 0, &node, NULL);
assert(node->count == node->exceptional * 2);
radix_tree_delete(&tree, 1 << 5);
assert(node->exceptional == 0);
__radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
__radix_tree_lookup(&tree, 1 << 5, &node, &slot);
assert(node->count == node->exceptional * 2);
__radix_tree_replace(&tree, node, slot, NULL, NULL);
assert(node->exceptional == 0);
item_kill_tree(&tree);
item_kill_tree(xa);
}
bool stop_iteration = false;
@@ -645,68 +187,45 @@ static void *creator_func(void *ptr)
static void *iterator_func(void *ptr)
{
struct radix_tree_root *tree = ptr;
struct radix_tree_iter iter;
XA_STATE(xas, ptr, 0);
struct item *item;
void **slot;
while (!stop_iteration) {
rcu_read_lock();
radix_tree_for_each_slot(slot, tree, &iter, 0) {
item = radix_tree_deref_slot(slot);
if (!item)
xas_for_each(&xas, item, ULONG_MAX) {
if (xas_retry(&xas, item))
continue;
if (radix_tree_deref_retry(item)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
item_sanity(item, iter.index);
item_sanity(item, xas.xa_index);
}
rcu_read_unlock();
}
return NULL;
}
static void multiorder_iteration_race(void)
static void multiorder_iteration_race(struct xarray *xa)
{
const int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
pthread_t worker_thread[num_threads];
RADIX_TREE(tree, GFP_KERNEL);
int i;
pthread_create(&worker_thread[0], NULL, &creator_func, &tree);
pthread_create(&worker_thread[0], NULL, &creator_func, xa);
for (i = 1; i < num_threads; i++)
pthread_create(&worker_thread[i], NULL, &iterator_func, &tree);
pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
for (i = 0; i < num_threads; i++)
pthread_join(worker_thread[i], NULL);
item_kill_tree(&tree);
item_kill_tree(xa);
}
static DEFINE_XARRAY(array);
void multiorder_checks(void)
{
int i;
for (i = 0; i < 20; i++) {
multiorder_check(200, i);
multiorder_check(0, i);
multiorder_check((1UL << i) + 1, i);
}
for (i = 0; i < 15; i++)
multiorder_shrink((1UL << (i + RADIX_TREE_MAP_SHIFT)), i);
multiorder_insert_bug();
multiorder_tag_tests();
multiorder_iteration();
multiorder_tagged_iteration();
multiorder_join();
multiorder_split();
multiorder_account();
multiorder_iteration_race();
multiorder_iteration(&array);
multiorder_tagged_iteration(&array);
multiorder_iteration_race(&array);
radix_tree_cpu_dead(0);
}

View File

@@ -44,7 +44,6 @@
#include "regression.h"
static RADIX_TREE(mt_tree, GFP_KERNEL);
static pthread_mutex_t mt_lock = PTHREAD_MUTEX_INITIALIZER;
struct page {
pthread_mutex_t lock;
@@ -53,12 +52,12 @@ struct page {
unsigned long index;
};
static struct page *page_alloc(void)
static struct page *page_alloc(int index)
{
struct page *p;
p = malloc(sizeof(struct page));
p->count = 1;
p->index = 1;
p->index = index;
pthread_mutex_init(&p->lock, NULL);
return p;
@@ -80,53 +79,33 @@ static void page_free(struct page *p)
static unsigned find_get_pages(unsigned long start,
unsigned int nr_pages, struct page **pages)
{
unsigned int i;
unsigned int ret;
unsigned int nr_found;
XA_STATE(xas, &mt_tree, start);
struct page *page;
unsigned int ret = 0;
rcu_read_lock();
restart:
nr_found = radix_tree_gang_lookup_slot(&mt_tree,
(void ***)pages, NULL, start, nr_pages);
ret = 0;
for (i = 0; i < nr_found; i++) {
struct page *page;
repeat:
page = radix_tree_deref_slot((void **)pages[i]);
if (unlikely(!page))
xas_for_each(&xas, page, ULONG_MAX) {
if (xas_retry(&xas, page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
assert((start | i) == 0);
goto restart;
}
/*
* No exceptional entries are inserted in this test.
*/
assert(0);
}
pthread_mutex_lock(&page->lock);
if (!page->count) {
pthread_mutex_unlock(&page->lock);
goto repeat;
}
if (!page->count)
goto unlock;
/* don't actually update page refcount */
pthread_mutex_unlock(&page->lock);
/* Has the page moved? */
if (unlikely(page != *((void **)pages[i]))) {
goto repeat;
}
if (unlikely(page != xas_reload(&xas)))
goto put_page;
pages[ret] = page;
ret++;
continue;
unlock:
pthread_mutex_unlock(&page->lock);
put_page:
xas_reset(&xas);
}
rcu_read_unlock();
return ret;
@@ -145,30 +124,30 @@ static void *regression1_fn(void *arg)
for (j = 0; j < 1000000; j++) {
struct page *p;
p = page_alloc();
pthread_mutex_lock(&mt_lock);
p = page_alloc(0);
xa_lock(&mt_tree);
radix_tree_insert(&mt_tree, 0, p);
pthread_mutex_unlock(&mt_lock);
xa_unlock(&mt_tree);
p = page_alloc();
pthread_mutex_lock(&mt_lock);
p = page_alloc(1);
xa_lock(&mt_tree);
radix_tree_insert(&mt_tree, 1, p);
pthread_mutex_unlock(&mt_lock);
xa_unlock(&mt_tree);
pthread_mutex_lock(&mt_lock);
xa_lock(&mt_tree);
p = radix_tree_delete(&mt_tree, 1);
pthread_mutex_lock(&p->lock);
p->count--;
pthread_mutex_unlock(&p->lock);
pthread_mutex_unlock(&mt_lock);
xa_unlock(&mt_tree);
page_free(p);
pthread_mutex_lock(&mt_lock);
xa_lock(&mt_tree);
p = radix_tree_delete(&mt_tree, 0);
pthread_mutex_lock(&p->lock);
p->count--;
pthread_mutex_unlock(&p->lock);
pthread_mutex_unlock(&mt_lock);
xa_unlock(&mt_tree);
page_free(p);
}
} else {

View File

@@ -53,9 +53,9 @@
#include "regression.h"
#include "test.h"
#define PAGECACHE_TAG_DIRTY 0
#define PAGECACHE_TAG_WRITEBACK 1
#define PAGECACHE_TAG_TOWRITE 2
#define PAGECACHE_TAG_DIRTY XA_MARK_0
#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
#define PAGECACHE_TAG_TOWRITE XA_MARK_2
static RADIX_TREE(mt_tree, GFP_KERNEL);
unsigned long page_count = 0;
@@ -92,7 +92,7 @@ void regression2_test(void)
/* 1. */
start = 0;
end = max_slots - 2;
tag_tagged_items(&mt_tree, NULL, start, end, 1,
tag_tagged_items(&mt_tree, start, end, 1,
PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
/* 2. */

View File

@@ -69,21 +69,6 @@ void regression3_test(void)
continue;
}
}
radix_tree_delete(&root, 1);
first = true;
radix_tree_for_each_contig(slot, &root, &iter, 0) {
printv(2, "contig %ld %p\n", iter.index, *slot);
if (first) {
radix_tree_insert(&root, 1, ptr);
first = false;
}
if (radix_tree_deref_retry(*slot)) {
printv(2, "retry at %ld\n", iter.index);
slot = radix_tree_iter_retry(&iter);
continue;
}
}
radix_tree_for_each_slot(slot, &root, &iter, 0) {
printv(2, "slot %ld %p\n", iter.index, *slot);
@@ -93,14 +78,6 @@ void regression3_test(void)
}
}
radix_tree_for_each_contig(slot, &root, &iter, 0) {
printv(2, "contig %ld %p\n", iter.index, *slot);
if (!iter.index) {
printv(2, "next at %ld\n", iter.index);
slot = radix_tree_iter_resume(slot, &iter);
}
}
radix_tree_tag_set(&root, 0, 0);
radix_tree_tag_set(&root, 1, 0);
radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {

View File

@@ -24,7 +24,7 @@ __simple_checks(struct radix_tree_root *tree, unsigned long index, int tag)
item_tag_set(tree, index, tag);
ret = item_tag_get(tree, index, tag);
assert(ret != 0);
ret = tag_tagged_items(tree, NULL, first, ~0UL, 10, tag, !tag);
ret = tag_tagged_items(tree, first, ~0UL, 10, tag, !tag);
assert(ret == 1);
ret = item_tag_get(tree, index, !tag);
assert(ret != 0);
@@ -321,7 +321,7 @@ static void single_check(void)
assert(ret == 0);
verify_tag_consistency(&tree, 0);
verify_tag_consistency(&tree, 1);
ret = tag_tagged_items(&tree, NULL, first, 10, 10, 0, 1);
ret = tag_tagged_items(&tree, first, 10, 10, XA_MARK_0, XA_MARK_1);
assert(ret == 1);
ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 0, BATCH, 1);
assert(ret == 1);
@@ -331,34 +331,6 @@ static void single_check(void)
item_kill_tree(&tree);
}
void radix_tree_clear_tags_test(void)
{
unsigned long index;
struct radix_tree_node *node;
struct radix_tree_iter iter;
void **slot;
RADIX_TREE(tree, GFP_KERNEL);
item_insert(&tree, 0);
item_tag_set(&tree, 0, 0);
__radix_tree_lookup(&tree, 0, &node, &slot);
radix_tree_clear_tags(&tree, node, slot);
assert(item_tag_get(&tree, 0, 0) == 0);
for (index = 0; index < 1000; index++) {
item_insert(&tree, index);
item_tag_set(&tree, index, 0);
}
radix_tree_for_each_slot(slot, &tree, &iter, 0) {
radix_tree_clear_tags(&tree, iter.node, slot);
assert(item_tag_get(&tree, iter.index, 0) == 0);
}
item_kill_tree(&tree);
}
void tag_check(void)
{
single_check();
@@ -376,5 +348,4 @@ void tag_check(void)
thrash_tags();
rcu_barrier();
printv(2, "after thrash_tags: %d allocated\n", nr_allocated);
radix_tree_clear_tags_test();
}

View File

@@ -25,11 +25,6 @@ int item_tag_get(struct radix_tree_root *root, unsigned long index, int tag)
return radix_tree_tag_get(root, index, tag);
}
int __item_insert(struct radix_tree_root *root, struct item *item)
{
return __radix_tree_insert(root, item->index, item->order, item);
}
struct item *item_create(unsigned long index, unsigned int order)
{
struct item *ret = malloc(sizeof(*ret));
@@ -39,21 +34,15 @@ struct item *item_create(unsigned long index, unsigned int order)
return ret;
}
int item_insert_order(struct radix_tree_root *root, unsigned long index,
unsigned order)
int item_insert(struct radix_tree_root *root, unsigned long index)
{
struct item *item = item_create(index, order);
int err = __item_insert(root, item);
struct item *item = item_create(index, 0);
int err = radix_tree_insert(root, item->index, item);
if (err)
free(item);
return err;
}
int item_insert(struct radix_tree_root *root, unsigned long index)
{
return item_insert_order(root, index, 0);
}
void item_sanity(struct item *item, unsigned long index)
{
unsigned long mask;
@@ -63,16 +52,21 @@ void item_sanity(struct item *item, unsigned long index)
assert((item->index | mask) == (index | mask));
}
void item_free(struct item *item, unsigned long index)
{
item_sanity(item, index);
free(item);
}
int item_delete(struct radix_tree_root *root, unsigned long index)
{
struct item *item = radix_tree_delete(root, index);
if (item) {
item_sanity(item, index);
free(item);
return 1;
}
return 0;
if (!item)
return 0;
item_free(item, index);
return 1;
}
static void item_free_rcu(struct rcu_head *head)
@@ -82,9 +76,9 @@ static void item_free_rcu(struct rcu_head *head)
free(item);
}
int item_delete_rcu(struct radix_tree_root *root, unsigned long index)
int item_delete_rcu(struct xarray *xa, unsigned long index)
{
struct item *item = radix_tree_delete(root, index);
struct item *item = xa_erase(xa, index);
if (item) {
item_sanity(item, index);
@@ -176,61 +170,32 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start,
}
/* Use the same pattern as tag_pages_for_writeback() in mm/page-writeback.c */
int tag_tagged_items(struct radix_tree_root *root, pthread_mutex_t *lock,
unsigned long start, unsigned long end, unsigned batch,
unsigned iftag, unsigned thentag)
int tag_tagged_items(struct xarray *xa, unsigned long start, unsigned long end,
unsigned batch, xa_mark_t iftag, xa_mark_t thentag)
{
unsigned long tagged = 0;
struct radix_tree_iter iter;
void **slot;
XA_STATE(xas, xa, start);
unsigned int tagged = 0;
struct item *item;
if (batch == 0)
batch = 1;
if (lock)
pthread_mutex_lock(lock);
radix_tree_for_each_tagged(slot, root, &iter, start, iftag) {
if (iter.index > end)
break;
radix_tree_iter_tag_set(root, &iter, thentag);
tagged++;
if ((tagged % batch) != 0)
xas_lock_irq(&xas);
xas_for_each_marked(&xas, item, end, iftag) {
xas_set_mark(&xas, thentag);
if (++tagged % batch)
continue;
slot = radix_tree_iter_resume(slot, &iter);
if (lock) {
pthread_mutex_unlock(lock);
rcu_barrier();
pthread_mutex_lock(lock);
}
xas_pause(&xas);
xas_unlock_irq(&xas);
rcu_barrier();
xas_lock_irq(&xas);
}
if (lock)
pthread_mutex_unlock(lock);
xas_unlock_irq(&xas);
return tagged;
}
/* Use the same pattern as find_swap_entry() in mm/shmem.c */
unsigned long find_item(struct radix_tree_root *root, void *item)
{
struct radix_tree_iter iter;
void **slot;
unsigned long found = -1;
unsigned long checked = 0;
radix_tree_for_each_slot(slot, root, &iter, 0) {
if (*slot == item) {
found = iter.index;
break;
}
checked++;
if ((checked % 4) != 0)
continue;
slot = radix_tree_iter_resume(slot, &iter);
}
return found;
}
static int verify_node(struct radix_tree_node *slot, unsigned int tag,
int tagged)
{
@@ -281,43 +246,31 @@ static int verify_node(struct radix_tree_node *slot, unsigned int tag,
void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag)
{
struct radix_tree_node *node = root->rnode;
struct radix_tree_node *node = root->xa_head;
if (!radix_tree_is_internal_node(node))
return;
verify_node(node, tag, !!root_tag_get(root, tag));
}
void item_kill_tree(struct radix_tree_root *root)
void item_kill_tree(struct xarray *xa)
{
struct radix_tree_iter iter;
void **slot;
struct item *items[32];
int nfound;
XA_STATE(xas, xa, 0);
void *entry;
radix_tree_for_each_slot(slot, root, &iter, 0) {
if (radix_tree_exceptional_entry(*slot))
radix_tree_delete(root, iter.index);
}
while ((nfound = radix_tree_gang_lookup(root, (void **)items, 0, 32))) {
int i;
for (i = 0; i < nfound; i++) {
void *ret;
ret = radix_tree_delete(root, items[i]->index);
assert(ret == items[i]);
free(items[i]);
xas_for_each(&xas, entry, ULONG_MAX) {
if (!xa_is_value(entry)) {
item_free(entry, xas.xa_index);
}
xas_store(&xas, NULL);
}
assert(radix_tree_gang_lookup(root, (void **)items, 0, 32) == 0);
assert(root->rnode == NULL);
assert(xa_empty(xa));
}
void tree_verify_min_height(struct radix_tree_root *root, int maxindex)
{
unsigned shift;
struct radix_tree_node *node = root->rnode;
struct radix_tree_node *node = root->xa_head;
if (!radix_tree_is_internal_node(node)) {
assert(maxindex == 0);
return;

View File

@@ -11,13 +11,11 @@ struct item {
};
struct item *item_create(unsigned long index, unsigned int order);
int __item_insert(struct radix_tree_root *root, struct item *item);
int item_insert(struct radix_tree_root *root, unsigned long index);
void item_sanity(struct item *item, unsigned long index);
int item_insert_order(struct radix_tree_root *root, unsigned long index,
unsigned order);
void item_free(struct item *item, unsigned long index);
int item_delete(struct radix_tree_root *root, unsigned long index);
int item_delete_rcu(struct radix_tree_root *root, unsigned long index);
int item_delete_rcu(struct xarray *xa, unsigned long index);
struct item *item_lookup(struct radix_tree_root *root, unsigned long index);
void item_check_present(struct radix_tree_root *root, unsigned long index);
@@ -29,11 +27,10 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start,
unsigned long nr, int chunk);
void item_kill_tree(struct radix_tree_root *root);
int tag_tagged_items(struct radix_tree_root *, pthread_mutex_t *,
unsigned long start, unsigned long end, unsigned batch,
unsigned iftag, unsigned thentag);
unsigned long find_item(struct radix_tree_root *, void *item);
int tag_tagged_items(struct xarray *, unsigned long start, unsigned long end,
unsigned batch, xa_mark_t iftag, xa_mark_t thentag);
void xarray_tests(void);
void tag_check(void);
void multiorder_checks(void);
void iteration_test(unsigned order, unsigned duration);

View File

@@ -0,0 +1,35 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* xarray.c: Userspace shim for XArray test-suite
* Copyright (c) 2018 Matthew Wilcox <willy@infradead.org>
*/
#define XA_DEBUG
#include "test.h"
#define module_init(x)
#define module_exit(x)
#define MODULE_AUTHOR(x)
#define MODULE_LICENSE(x)
#define dump_stack() assert(0)
#include "../../../lib/xarray.c"
#undef XA_DEBUG
#include "../../../lib/test_xarray.c"
void xarray_tests(void)
{
xarray_checks();
xarray_exit();
}
int __weak main(void)
{
radix_tree_init();
xarray_tests();
radix_tree_cpu_dead(1);
rcu_barrier();
if (nr_allocated)
printf("nr_allocated = %d\n", nr_allocated);
return 0;
}