multiorder.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * multiorder.c: Multi-order radix tree entry testing
  4. * Copyright (c) 2016 Intel Corporation
  5. * Author: Ross Zwisler <[email protected]>
  6. * Author: Matthew Wilcox <[email protected]>
  7. */
  8. #include <linux/radix-tree.h>
  9. #include <linux/slab.h>
  10. #include <linux/errno.h>
  11. #include <pthread.h>
  12. #include "test.h"
  13. static int item_insert_order(struct xarray *xa, unsigned long index,
  14. unsigned order)
  15. {
  16. XA_STATE_ORDER(xas, xa, index, order);
  17. struct item *item = item_create(index, order);
  18. do {
  19. xas_lock(&xas);
  20. xas_store(&xas, item);
  21. xas_unlock(&xas);
  22. } while (xas_nomem(&xas, GFP_KERNEL));
  23. if (!xas_error(&xas))
  24. return 0;
  25. free(item);
  26. return xas_error(&xas);
  27. }
  28. void multiorder_iteration(struct xarray *xa)
  29. {
  30. XA_STATE(xas, xa, 0);
  31. struct item *item;
  32. int i, j, err;
  33. #define NUM_ENTRIES 11
  34. int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
  35. int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7};
  36. printv(1, "Multiorder iteration test\n");
  37. for (i = 0; i < NUM_ENTRIES; i++) {
  38. err = item_insert_order(xa, index[i], order[i]);
  39. assert(!err);
  40. }
  41. for (j = 0; j < 256; j++) {
  42. for (i = 0; i < NUM_ENTRIES; i++)
  43. if (j <= (index[i] | ((1 << order[i]) - 1)))
  44. break;
  45. xas_set(&xas, j);
  46. xas_for_each(&xas, item, ULONG_MAX) {
  47. int height = order[i] / XA_CHUNK_SHIFT;
  48. int shift = height * XA_CHUNK_SHIFT;
  49. unsigned long mask = (1UL << order[i]) - 1;
  50. assert((xas.xa_index | mask) == (index[i] | mask));
  51. assert(xas.xa_node->shift == shift);
  52. assert(!radix_tree_is_internal_node(item));
  53. assert((item->index | mask) == (index[i] | mask));
  54. assert(item->order == order[i]);
  55. i++;
  56. }
  57. }
  58. item_kill_tree(xa);
  59. }
  60. void multiorder_tagged_iteration(struct xarray *xa)
  61. {
  62. XA_STATE(xas, xa, 0);
  63. struct item *item;
  64. int i, j;
  65. #define MT_NUM_ENTRIES 9
  66. int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
  67. int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7};
  68. #define TAG_ENTRIES 7
  69. int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128};
  70. printv(1, "Multiorder tagged iteration test\n");
  71. for (i = 0; i < MT_NUM_ENTRIES; i++)
  72. assert(!item_insert_order(xa, index[i], order[i]));
  73. assert(!xa_marked(xa, XA_MARK_1));
  74. for (i = 0; i < TAG_ENTRIES; i++)
  75. xa_set_mark(xa, tag_index[i], XA_MARK_1);
  76. for (j = 0; j < 256; j++) {
  77. int k;
  78. for (i = 0; i < TAG_ENTRIES; i++) {
  79. for (k = i; index[k] < tag_index[i]; k++)
  80. ;
  81. if (j <= (index[k] | ((1 << order[k]) - 1)))
  82. break;
  83. }
  84. xas_set(&xas, j);
  85. xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_1) {
  86. unsigned long mask;
  87. for (k = i; index[k] < tag_index[i]; k++)
  88. ;
  89. mask = (1UL << order[k]) - 1;
  90. assert((xas.xa_index | mask) == (tag_index[i] | mask));
  91. assert(!xa_is_internal(item));
  92. assert((item->index | mask) == (tag_index[i] | mask));
  93. assert(item->order == order[k]);
  94. i++;
  95. }
  96. }
  97. assert(tag_tagged_items(xa, 0, ULONG_MAX, TAG_ENTRIES, XA_MARK_1,
  98. XA_MARK_2) == TAG_ENTRIES);
  99. for (j = 0; j < 256; j++) {
  100. int mask, k;
  101. for (i = 0; i < TAG_ENTRIES; i++) {
  102. for (k = i; index[k] < tag_index[i]; k++)
  103. ;
  104. if (j <= (index[k] | ((1 << order[k]) - 1)))
  105. break;
  106. }
  107. xas_set(&xas, j);
  108. xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_2) {
  109. for (k = i; index[k] < tag_index[i]; k++)
  110. ;
  111. mask = (1 << order[k]) - 1;
  112. assert((xas.xa_index | mask) == (tag_index[i] | mask));
  113. assert(!xa_is_internal(item));
  114. assert((item->index | mask) == (tag_index[i] | mask));
  115. assert(item->order == order[k]);
  116. i++;
  117. }
  118. }
  119. assert(tag_tagged_items(xa, 1, ULONG_MAX, MT_NUM_ENTRIES * 2, XA_MARK_1,
  120. XA_MARK_0) == TAG_ENTRIES);
  121. i = 0;
  122. xas_set(&xas, 0);
  123. xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_0) {
  124. assert(xas.xa_index == tag_index[i]);
  125. i++;
  126. }
  127. assert(i == TAG_ENTRIES);
  128. item_kill_tree(xa);
  129. }
  130. bool stop_iteration;
  131. static void *creator_func(void *ptr)
  132. {
  133. /* 'order' is set up to ensure we have sibling entries */
  134. unsigned int order = RADIX_TREE_MAP_SHIFT - 1;
  135. struct radix_tree_root *tree = ptr;
  136. int i;
  137. for (i = 0; i < 10000; i++) {
  138. item_insert_order(tree, 0, order);
  139. item_delete_rcu(tree, 0);
  140. }
  141. stop_iteration = true;
  142. return NULL;
  143. }
  144. static void *iterator_func(void *ptr)
  145. {
  146. XA_STATE(xas, ptr, 0);
  147. struct item *item;
  148. while (!stop_iteration) {
  149. rcu_read_lock();
  150. xas_for_each(&xas, item, ULONG_MAX) {
  151. if (xas_retry(&xas, item))
  152. continue;
  153. item_sanity(item, xas.xa_index);
  154. }
  155. rcu_read_unlock();
  156. }
  157. return NULL;
  158. }
  159. static void multiorder_iteration_race(struct xarray *xa)
  160. {
  161. const int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
  162. pthread_t worker_thread[num_threads];
  163. int i;
  164. stop_iteration = false;
  165. pthread_create(&worker_thread[0], NULL, &creator_func, xa);
  166. for (i = 1; i < num_threads; i++)
  167. pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
  168. for (i = 0; i < num_threads; i++)
  169. pthread_join(worker_thread[i], NULL);
  170. item_kill_tree(xa);
  171. }
  172. static void *load_creator(void *ptr)
  173. {
  174. /* 'order' is set up to ensure we have sibling entries */
  175. unsigned int order;
  176. struct radix_tree_root *tree = ptr;
  177. int i;
  178. rcu_register_thread();
  179. item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0);
  180. item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0);
  181. for (i = 0; i < 10000; i++) {
  182. for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) {
  183. unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
  184. (1 << order);
  185. item_insert_order(tree, index, order);
  186. item_delete_rcu(tree, index);
  187. }
  188. }
  189. rcu_unregister_thread();
  190. stop_iteration = true;
  191. return NULL;
  192. }
  193. static void *load_worker(void *ptr)
  194. {
  195. unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1;
  196. rcu_register_thread();
  197. while (!stop_iteration) {
  198. struct item *item = xa_load(ptr, index);
  199. assert(!xa_is_internal(item));
  200. }
  201. rcu_unregister_thread();
  202. return NULL;
  203. }
  204. static void load_race(struct xarray *xa)
  205. {
  206. const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
  207. pthread_t worker_thread[num_threads];
  208. int i;
  209. stop_iteration = false;
  210. pthread_create(&worker_thread[0], NULL, &load_creator, xa);
  211. for (i = 1; i < num_threads; i++)
  212. pthread_create(&worker_thread[i], NULL, &load_worker, xa);
  213. for (i = 0; i < num_threads; i++)
  214. pthread_join(worker_thread[i], NULL);
  215. item_kill_tree(xa);
  216. }
  217. static DEFINE_XARRAY(array);
  218. void multiorder_checks(void)
  219. {
  220. multiorder_iteration(&array);
  221. multiorder_tagged_iteration(&array);
  222. multiorder_iteration_race(&array);
  223. load_race(&array);
  224. radix_tree_cpu_dead(0);
  225. }
  226. int __weak main(int argc, char **argv)
  227. {
  228. int opt;
  229. while ((opt = getopt(argc, argv, "ls:v")) != -1) {
  230. if (opt == 'v')
  231. test_verbose++;
  232. }
  233. rcu_register_thread();
  234. radix_tree_init();
  235. multiorder_checks();
  236. rcu_unregister_thread();
  237. return 0;
  238. }