btree.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * lib/btree.c - Simple In-memory B+Tree
  4. *
  5. * Copyright (c) 2007-2008 Joern Engel <[email protected]>
  6. * Bits and pieces stolen from Peter Zijlstra's code, which is
  7. * Copyright 2007, Red Hat Inc. Peter Zijlstra
  8. *
  9. * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
  10. *
  11. * A relatively simple B+Tree implementation. I have written it as a learning
  12. * exercise to understand how B+Trees work. Turned out to be useful as well.
  13. *
  14. * B+Trees can be used similar to Linux radix trees (which don't have anything
  15. * in common with textbook radix trees, beware). Prerequisite for them working
  16. * well is that access to a random tree node is much faster than a large number
  17. * of operations within each node.
  18. *
  19. * Disks have fulfilled the prerequisite for a long time. More recently DRAM
  20. * has gained similar properties, as memory access times, when measured in cpu
  21. * cycles, have increased. Cacheline sizes have increased as well, which also
  22. * helps B+Trees.
  23. *
  24. * Compared to radix trees, B+Trees are more efficient when dealing with a
  25. * sparsely populated address space. Between 25% and 50% of the memory is
  26. * occupied with valid pointers. When densely populated, radix trees contain
  27. * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
  28. * pointers.
  29. *
  30. * This particular implementation stores pointers identified by a long value.
  31. * Storing NULL pointers is illegal, lookup will return NULL when no entry
  32. * was found.
  33. *
  34. * A tricks was used that is not commonly found in textbooks. The lowest
  35. * values are to the right, not to the left. All used slots within a node
  36. * are on the left, all unused slots contain NUL values. Most operations
  37. * simply loop once over all slots and terminate on the first NUL.
  38. */
  39. #include <linux/btree.h>
  40. #include <linux/cache.h>
  41. #include <linux/kernel.h>
  42. #include <linux/slab.h>
  43. #include <linux/module.h>
  44. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  45. #define NODESIZE MAX(L1_CACHE_BYTES, 128)
  46. struct btree_geo {
  47. int keylen;
  48. int no_pairs;
  49. int no_longs;
  50. };
  51. struct btree_geo btree_geo32 = {
  52. .keylen = 1,
  53. .no_pairs = NODESIZE / sizeof(long) / 2,
  54. .no_longs = NODESIZE / sizeof(long) / 2,
  55. };
  56. EXPORT_SYMBOL_GPL(btree_geo32);
  57. #define LONG_PER_U64 (64 / BITS_PER_LONG)
  58. struct btree_geo btree_geo64 = {
  59. .keylen = LONG_PER_U64,
  60. .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
  61. .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
  62. };
  63. EXPORT_SYMBOL_GPL(btree_geo64);
  64. struct btree_geo btree_geo128 = {
  65. .keylen = 2 * LONG_PER_U64,
  66. .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
  67. .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
  68. };
  69. EXPORT_SYMBOL_GPL(btree_geo128);
  70. #define MAX_KEYLEN (2 * LONG_PER_U64)
  71. static struct kmem_cache *btree_cachep;
  72. void *btree_alloc(gfp_t gfp_mask, void *pool_data)
  73. {
  74. return kmem_cache_alloc(btree_cachep, gfp_mask);
  75. }
  76. EXPORT_SYMBOL_GPL(btree_alloc);
  77. void btree_free(void *element, void *pool_data)
  78. {
  79. kmem_cache_free(btree_cachep, element);
  80. }
  81. EXPORT_SYMBOL_GPL(btree_free);
  82. static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
  83. {
  84. unsigned long *node;
  85. node = mempool_alloc(head->mempool, gfp);
  86. if (likely(node))
  87. memset(node, 0, NODESIZE);
  88. return node;
  89. }
  90. static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
  91. {
  92. size_t i;
  93. for (i = 0; i < n; i++) {
  94. if (l1[i] < l2[i])
  95. return -1;
  96. if (l1[i] > l2[i])
  97. return 1;
  98. }
  99. return 0;
  100. }
  101. static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
  102. size_t n)
  103. {
  104. size_t i;
  105. for (i = 0; i < n; i++)
  106. dest[i] = src[i];
  107. return dest;
  108. }
  109. static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
  110. {
  111. size_t i;
  112. for (i = 0; i < n; i++)
  113. s[i] = c;
  114. return s;
  115. }
  116. static void dec_key(struct btree_geo *geo, unsigned long *key)
  117. {
  118. unsigned long val;
  119. int i;
  120. for (i = geo->keylen - 1; i >= 0; i--) {
  121. val = key[i];
  122. key[i] = val - 1;
  123. if (val)
  124. break;
  125. }
  126. }
  127. static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
  128. {
  129. return &node[n * geo->keylen];
  130. }
  131. static void *bval(struct btree_geo *geo, unsigned long *node, int n)
  132. {
  133. return (void *)node[geo->no_longs + n];
  134. }
  135. static void setkey(struct btree_geo *geo, unsigned long *node, int n,
  136. unsigned long *key)
  137. {
  138. longcpy(bkey(geo, node, n), key, geo->keylen);
  139. }
  140. static void setval(struct btree_geo *geo, unsigned long *node, int n,
  141. void *val)
  142. {
  143. node[geo->no_longs + n] = (unsigned long) val;
  144. }
  145. static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
  146. {
  147. longset(bkey(geo, node, n), 0, geo->keylen);
  148. node[geo->no_longs + n] = 0;
  149. }
  150. static inline void __btree_init(struct btree_head *head)
  151. {
  152. head->node = NULL;
  153. head->height = 0;
  154. }
  155. void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
  156. {
  157. __btree_init(head);
  158. head->mempool = mempool;
  159. }
  160. EXPORT_SYMBOL_GPL(btree_init_mempool);
  161. int btree_init(struct btree_head *head)
  162. {
  163. __btree_init(head);
  164. head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
  165. if (!head->mempool)
  166. return -ENOMEM;
  167. return 0;
  168. }
  169. EXPORT_SYMBOL_GPL(btree_init);
  170. void btree_destroy(struct btree_head *head)
  171. {
  172. mempool_free(head->node, head->mempool);
  173. mempool_destroy(head->mempool);
  174. head->mempool = NULL;
  175. }
  176. EXPORT_SYMBOL_GPL(btree_destroy);
  177. void *btree_last(struct btree_head *head, struct btree_geo *geo,
  178. unsigned long *key)
  179. {
  180. int height = head->height;
  181. unsigned long *node = head->node;
  182. if (height == 0)
  183. return NULL;
  184. for ( ; height > 1; height--)
  185. node = bval(geo, node, 0);
  186. longcpy(key, bkey(geo, node, 0), geo->keylen);
  187. return bval(geo, node, 0);
  188. }
  189. EXPORT_SYMBOL_GPL(btree_last);
  190. static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
  191. unsigned long *key)
  192. {
  193. return longcmp(bkey(geo, node, pos), key, geo->keylen);
  194. }
  195. static int keyzero(struct btree_geo *geo, unsigned long *key)
  196. {
  197. int i;
  198. for (i = 0; i < geo->keylen; i++)
  199. if (key[i])
  200. return 0;
  201. return 1;
  202. }
  203. static void *btree_lookup_node(struct btree_head *head, struct btree_geo *geo,
  204. unsigned long *key)
  205. {
  206. int i, height = head->height;
  207. unsigned long *node = head->node;
  208. if (height == 0)
  209. return NULL;
  210. for ( ; height > 1; height--) {
  211. for (i = 0; i < geo->no_pairs; i++)
  212. if (keycmp(geo, node, i, key) <= 0)
  213. break;
  214. if (i == geo->no_pairs)
  215. return NULL;
  216. node = bval(geo, node, i);
  217. if (!node)
  218. return NULL;
  219. }
  220. return node;
  221. }
  222. void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
  223. unsigned long *key)
  224. {
  225. int i;
  226. unsigned long *node;
  227. node = btree_lookup_node(head, geo, key);
  228. if (!node)
  229. return NULL;
  230. for (i = 0; i < geo->no_pairs; i++)
  231. if (keycmp(geo, node, i, key) == 0)
  232. return bval(geo, node, i);
  233. return NULL;
  234. }
  235. EXPORT_SYMBOL_GPL(btree_lookup);
  236. int btree_update(struct btree_head *head, struct btree_geo *geo,
  237. unsigned long *key, void *val)
  238. {
  239. int i;
  240. unsigned long *node;
  241. node = btree_lookup_node(head, geo, key);
  242. if (!node)
  243. return -ENOENT;
  244. for (i = 0; i < geo->no_pairs; i++)
  245. if (keycmp(geo, node, i, key) == 0) {
  246. setval(geo, node, i, val);
  247. return 0;
  248. }
  249. return -ENOENT;
  250. }
  251. EXPORT_SYMBOL_GPL(btree_update);
  252. /*
  253. * Usually this function is quite similar to normal lookup. But the key of
  254. * a parent node may be smaller than the smallest key of all its siblings.
  255. * In such a case we cannot just return NULL, as we have only proven that no
  256. * key smaller than __key, but larger than this parent key exists.
  257. * So we set __key to the parent key and retry. We have to use the smallest
  258. * such parent key, which is the last parent key we encountered.
  259. */
  260. void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
  261. unsigned long *__key)
  262. {
  263. int i, height;
  264. unsigned long *node, *oldnode;
  265. unsigned long *retry_key = NULL, key[MAX_KEYLEN];
  266. if (keyzero(geo, __key))
  267. return NULL;
  268. if (head->height == 0)
  269. return NULL;
  270. longcpy(key, __key, geo->keylen);
  271. retry:
  272. dec_key(geo, key);
  273. node = head->node;
  274. for (height = head->height ; height > 1; height--) {
  275. for (i = 0; i < geo->no_pairs; i++)
  276. if (keycmp(geo, node, i, key) <= 0)
  277. break;
  278. if (i == geo->no_pairs)
  279. goto miss;
  280. oldnode = node;
  281. node = bval(geo, node, i);
  282. if (!node)
  283. goto miss;
  284. retry_key = bkey(geo, oldnode, i);
  285. }
  286. if (!node)
  287. goto miss;
  288. for (i = 0; i < geo->no_pairs; i++) {
  289. if (keycmp(geo, node, i, key) <= 0) {
  290. if (bval(geo, node, i)) {
  291. longcpy(__key, bkey(geo, node, i), geo->keylen);
  292. return bval(geo, node, i);
  293. } else
  294. goto miss;
  295. }
  296. }
  297. miss:
  298. if (retry_key) {
  299. longcpy(key, retry_key, geo->keylen);
  300. retry_key = NULL;
  301. goto retry;
  302. }
  303. return NULL;
  304. }
  305. EXPORT_SYMBOL_GPL(btree_get_prev);
  306. static int getpos(struct btree_geo *geo, unsigned long *node,
  307. unsigned long *key)
  308. {
  309. int i;
  310. for (i = 0; i < geo->no_pairs; i++) {
  311. if (keycmp(geo, node, i, key) <= 0)
  312. break;
  313. }
  314. return i;
  315. }
  316. static int getfill(struct btree_geo *geo, unsigned long *node, int start)
  317. {
  318. int i;
  319. for (i = start; i < geo->no_pairs; i++)
  320. if (!bval(geo, node, i))
  321. break;
  322. return i;
  323. }
  324. /*
  325. * locate the correct leaf node in the btree
  326. */
  327. static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
  328. unsigned long *key, int level)
  329. {
  330. unsigned long *node = head->node;
  331. int i, height;
  332. for (height = head->height; height > level; height--) {
  333. for (i = 0; i < geo->no_pairs; i++)
  334. if (keycmp(geo, node, i, key) <= 0)
  335. break;
  336. if ((i == geo->no_pairs) || !bval(geo, node, i)) {
  337. /* right-most key is too large, update it */
  338. /* FIXME: If the right-most key on higher levels is
  339. * always zero, this wouldn't be necessary. */
  340. i--;
  341. setkey(geo, node, i, key);
  342. }
  343. BUG_ON(i < 0);
  344. node = bval(geo, node, i);
  345. }
  346. BUG_ON(!node);
  347. return node;
  348. }
  349. static int btree_grow(struct btree_head *head, struct btree_geo *geo,
  350. gfp_t gfp)
  351. {
  352. unsigned long *node;
  353. int fill;
  354. node = btree_node_alloc(head, gfp);
  355. if (!node)
  356. return -ENOMEM;
  357. if (head->node) {
  358. fill = getfill(geo, head->node, 0);
  359. setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
  360. setval(geo, node, 0, head->node);
  361. }
  362. head->node = node;
  363. head->height++;
  364. return 0;
  365. }
  366. static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
  367. {
  368. unsigned long *node;
  369. int fill;
  370. if (head->height <= 1)
  371. return;
  372. node = head->node;
  373. fill = getfill(geo, node, 0);
  374. BUG_ON(fill > 1);
  375. head->node = bval(geo, node, 0);
  376. head->height--;
  377. mempool_free(node, head->mempool);
  378. }
  379. static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
  380. unsigned long *key, void *val, int level,
  381. gfp_t gfp)
  382. {
  383. unsigned long *node;
  384. int i, pos, fill, err;
  385. BUG_ON(!val);
  386. if (head->height < level) {
  387. err = btree_grow(head, geo, gfp);
  388. if (err)
  389. return err;
  390. }
  391. retry:
  392. node = find_level(head, geo, key, level);
  393. pos = getpos(geo, node, key);
  394. fill = getfill(geo, node, pos);
  395. /* two identical keys are not allowed */
  396. BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
  397. if (fill == geo->no_pairs) {
  398. /* need to split node */
  399. unsigned long *new;
  400. new = btree_node_alloc(head, gfp);
  401. if (!new)
  402. return -ENOMEM;
  403. err = btree_insert_level(head, geo,
  404. bkey(geo, node, fill / 2 - 1),
  405. new, level + 1, gfp);
  406. if (err) {
  407. mempool_free(new, head->mempool);
  408. return err;
  409. }
  410. for (i = 0; i < fill / 2; i++) {
  411. setkey(geo, new, i, bkey(geo, node, i));
  412. setval(geo, new, i, bval(geo, node, i));
  413. setkey(geo, node, i, bkey(geo, node, i + fill / 2));
  414. setval(geo, node, i, bval(geo, node, i + fill / 2));
  415. clearpair(geo, node, i + fill / 2);
  416. }
  417. if (fill & 1) {
  418. setkey(geo, node, i, bkey(geo, node, fill - 1));
  419. setval(geo, node, i, bval(geo, node, fill - 1));
  420. clearpair(geo, node, fill - 1);
  421. }
  422. goto retry;
  423. }
  424. BUG_ON(fill >= geo->no_pairs);
  425. /* shift and insert */
  426. for (i = fill; i > pos; i--) {
  427. setkey(geo, node, i, bkey(geo, node, i - 1));
  428. setval(geo, node, i, bval(geo, node, i - 1));
  429. }
  430. setkey(geo, node, pos, key);
  431. setval(geo, node, pos, val);
  432. return 0;
  433. }
  434. int btree_insert(struct btree_head *head, struct btree_geo *geo,
  435. unsigned long *key, void *val, gfp_t gfp)
  436. {
  437. BUG_ON(!val);
  438. return btree_insert_level(head, geo, key, val, 1, gfp);
  439. }
  440. EXPORT_SYMBOL_GPL(btree_insert);
  441. static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
  442. unsigned long *key, int level);
  443. static void merge(struct btree_head *head, struct btree_geo *geo, int level,
  444. unsigned long *left, int lfill,
  445. unsigned long *right, int rfill,
  446. unsigned long *parent, int lpos)
  447. {
  448. int i;
  449. for (i = 0; i < rfill; i++) {
  450. /* Move all keys to the left */
  451. setkey(geo, left, lfill + i, bkey(geo, right, i));
  452. setval(geo, left, lfill + i, bval(geo, right, i));
  453. }
  454. /* Exchange left and right child in parent */
  455. setval(geo, parent, lpos, right);
  456. setval(geo, parent, lpos + 1, left);
  457. /* Remove left (formerly right) child from parent */
  458. btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
  459. mempool_free(right, head->mempool);
  460. }
  461. static void rebalance(struct btree_head *head, struct btree_geo *geo,
  462. unsigned long *key, int level, unsigned long *child, int fill)
  463. {
  464. unsigned long *parent, *left = NULL, *right = NULL;
  465. int i, no_left, no_right;
  466. if (fill == 0) {
  467. /* Because we don't steal entries from a neighbour, this case
  468. * can happen. Parent node contains a single child, this
  469. * node, so merging with a sibling never happens.
  470. */
  471. btree_remove_level(head, geo, key, level + 1);
  472. mempool_free(child, head->mempool);
  473. return;
  474. }
  475. parent = find_level(head, geo, key, level + 1);
  476. i = getpos(geo, parent, key);
  477. BUG_ON(bval(geo, parent, i) != child);
  478. if (i > 0) {
  479. left = bval(geo, parent, i - 1);
  480. no_left = getfill(geo, left, 0);
  481. if (fill + no_left <= geo->no_pairs) {
  482. merge(head, geo, level,
  483. left, no_left,
  484. child, fill,
  485. parent, i - 1);
  486. return;
  487. }
  488. }
  489. if (i + 1 < getfill(geo, parent, i)) {
  490. right = bval(geo, parent, i + 1);
  491. no_right = getfill(geo, right, 0);
  492. if (fill + no_right <= geo->no_pairs) {
  493. merge(head, geo, level,
  494. child, fill,
  495. right, no_right,
  496. parent, i);
  497. return;
  498. }
  499. }
  500. /*
  501. * We could also try to steal one entry from the left or right
  502. * neighbor. By not doing so we changed the invariant from
  503. * "all nodes are at least half full" to "no two neighboring
  504. * nodes can be merged". Which means that the average fill of
  505. * all nodes is still half or better.
  506. */
  507. }
  508. static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
  509. unsigned long *key, int level)
  510. {
  511. unsigned long *node;
  512. int i, pos, fill;
  513. void *ret;
  514. if (level > head->height) {
  515. /* we recursed all the way up */
  516. head->height = 0;
  517. head->node = NULL;
  518. return NULL;
  519. }
  520. node = find_level(head, geo, key, level);
  521. pos = getpos(geo, node, key);
  522. fill = getfill(geo, node, pos);
  523. if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
  524. return NULL;
  525. ret = bval(geo, node, pos);
  526. /* remove and shift */
  527. for (i = pos; i < fill - 1; i++) {
  528. setkey(geo, node, i, bkey(geo, node, i + 1));
  529. setval(geo, node, i, bval(geo, node, i + 1));
  530. }
  531. clearpair(geo, node, fill - 1);
  532. if (fill - 1 < geo->no_pairs / 2) {
  533. if (level < head->height)
  534. rebalance(head, geo, key, level, node, fill - 1);
  535. else if (fill - 1 == 1)
  536. btree_shrink(head, geo);
  537. }
  538. return ret;
  539. }
  540. void *btree_remove(struct btree_head *head, struct btree_geo *geo,
  541. unsigned long *key)
  542. {
  543. if (head->height == 0)
  544. return NULL;
  545. return btree_remove_level(head, geo, key, 1);
  546. }
  547. EXPORT_SYMBOL_GPL(btree_remove);
  548. int btree_merge(struct btree_head *target, struct btree_head *victim,
  549. struct btree_geo *geo, gfp_t gfp)
  550. {
  551. unsigned long key[MAX_KEYLEN];
  552. unsigned long dup[MAX_KEYLEN];
  553. void *val;
  554. int err;
  555. BUG_ON(target == victim);
  556. if (!(target->node)) {
  557. /* target is empty, just copy fields over */
  558. target->node = victim->node;
  559. target->height = victim->height;
  560. __btree_init(victim);
  561. return 0;
  562. }
  563. /* TODO: This needs some optimizations. Currently we do three tree
  564. * walks to remove a single object from the victim.
  565. */
  566. for (;;) {
  567. if (!btree_last(victim, geo, key))
  568. break;
  569. val = btree_lookup(victim, geo, key);
  570. err = btree_insert(target, geo, key, val, gfp);
  571. if (err)
  572. return err;
  573. /* We must make a copy of the key, as the original will get
  574. * mangled inside btree_remove. */
  575. longcpy(dup, key, geo->keylen);
  576. btree_remove(victim, geo, dup);
  577. }
  578. return 0;
  579. }
  580. EXPORT_SYMBOL_GPL(btree_merge);
  581. static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
  582. unsigned long *node, unsigned long opaque,
  583. void (*func)(void *elem, unsigned long opaque,
  584. unsigned long *key, size_t index,
  585. void *func2),
  586. void *func2, int reap, int height, size_t count)
  587. {
  588. int i;
  589. unsigned long *child;
  590. for (i = 0; i < geo->no_pairs; i++) {
  591. child = bval(geo, node, i);
  592. if (!child)
  593. break;
  594. if (height > 1)
  595. count = __btree_for_each(head, geo, child, opaque,
  596. func, func2, reap, height - 1, count);
  597. else
  598. func(child, opaque, bkey(geo, node, i), count++,
  599. func2);
  600. }
  601. if (reap)
  602. mempool_free(node, head->mempool);
  603. return count;
  604. }
  605. static void empty(void *elem, unsigned long opaque, unsigned long *key,
  606. size_t index, void *func2)
  607. {
  608. }
  609. void visitorl(void *elem, unsigned long opaque, unsigned long *key,
  610. size_t index, void *__func)
  611. {
  612. visitorl_t func = __func;
  613. func(elem, opaque, *key, index);
  614. }
  615. EXPORT_SYMBOL_GPL(visitorl);
  616. void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
  617. size_t index, void *__func)
  618. {
  619. visitor32_t func = __func;
  620. u32 *key = (void *)__key;
  621. func(elem, opaque, *key, index);
  622. }
  623. EXPORT_SYMBOL_GPL(visitor32);
  624. void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
  625. size_t index, void *__func)
  626. {
  627. visitor64_t func = __func;
  628. u64 *key = (void *)__key;
  629. func(elem, opaque, *key, index);
  630. }
  631. EXPORT_SYMBOL_GPL(visitor64);
  632. void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
  633. size_t index, void *__func)
  634. {
  635. visitor128_t func = __func;
  636. u64 *key = (void *)__key;
  637. func(elem, opaque, key[0], key[1], index);
  638. }
  639. EXPORT_SYMBOL_GPL(visitor128);
  640. size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
  641. unsigned long opaque,
  642. void (*func)(void *elem, unsigned long opaque,
  643. unsigned long *key,
  644. size_t index, void *func2),
  645. void *func2)
  646. {
  647. size_t count = 0;
  648. if (!func2)
  649. func = empty;
  650. if (head->node)
  651. count = __btree_for_each(head, geo, head->node, opaque, func,
  652. func2, 0, head->height, 0);
  653. return count;
  654. }
  655. EXPORT_SYMBOL_GPL(btree_visitor);
  656. size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
  657. unsigned long opaque,
  658. void (*func)(void *elem, unsigned long opaque,
  659. unsigned long *key,
  660. size_t index, void *func2),
  661. void *func2)
  662. {
  663. size_t count = 0;
  664. if (!func2)
  665. func = empty;
  666. if (head->node)
  667. count = __btree_for_each(head, geo, head->node, opaque, func,
  668. func2, 1, head->height, 0);
  669. __btree_init(head);
  670. return count;
  671. }
  672. EXPORT_SYMBOL_GPL(btree_grim_visitor);
  673. static int __init btree_module_init(void)
  674. {
  675. btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
  676. SLAB_HWCACHE_ALIGN, NULL);
  677. return 0;
  678. }
  679. static void __exit btree_module_exit(void)
  680. {
  681. kmem_cache_destroy(btree_cachep);
  682. }
  683. /* If core code starts using btree, initialization should happen even earlier */
  684. module_init(btree_module_init);
  685. module_exit(btree_module_exit);
  686. MODULE_AUTHOR("Joern Engel <[email protected]>");
  687. MODULE_AUTHOR("Johannes Berg <[email protected]>");
  688. MODULE_LICENSE("GPL");