maple_tree.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. #ifndef _LINUX_MAPLE_TREE_H
  3. #define _LINUX_MAPLE_TREE_H
  4. /*
  5. * Maple Tree - An RCU-safe adaptive tree for storing ranges
  6. * Copyright (c) 2018-2022 Oracle
  7. * Authors: Liam R. Howlett <[email protected]>
  8. * Matthew Wilcox <[email protected]>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/rcupdate.h>
  12. #include <linux/spinlock.h>
  13. /* #define CONFIG_MAPLE_RCU_DISABLED */
  14. /* #define CONFIG_DEBUG_MAPLE_TREE_VERBOSE */
  15. /*
  16. * Allocated nodes are mutable until they have been inserted into the tree,
  17. * at which time they cannot change their type until they have been removed
  18. * from the tree and an RCU grace period has passed.
  19. *
  20. * Removed nodes have their ->parent set to point to themselves. RCU readers
  21. * check ->parent before relying on the value that they loaded from the
  22. * slots array. This lets us reuse the slots array for the RCU head.
  23. *
  24. * Nodes in the tree point to their parent unless bit 0 is set.
  25. */
  26. #if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
  27. /* 64bit sizes */
  28. #define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
  29. #define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
  30. #define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
  31. #define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */
  32. #define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
  33. #else
  34. /* 32bit sizes */
  35. #define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
  36. #define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
  37. #define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
  38. #define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */
  39. #define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
  40. #endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
  41. #define MAPLE_NODE_MASK 255UL
  42. /*
  43. * The node->parent of the root node has bit 0 set and the rest of the pointer
  44. * is a pointer to the tree itself. No more bits are available in this pointer
  45. * (on m68k, the data structure may only be 2-byte aligned).
  46. *
  47. * Internal non-root nodes can only have maple_range_* nodes as parents. The
  48. * parent pointer is 256B aligned like all other tree nodes. When storing a 32
  49. * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
  50. * extra bit to store the offset. This extra bit comes from a reuse of the last
  51. * bit in the node type. This is possible by using bit 1 to indicate if bit 2
  52. * is part of the type or the slot.
  53. *
  54. * Once the type is decided, the decision of an allocation range type or a range
  55. * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
  56. * flag.
  57. *
  58. * Node types:
  59. * 0x??1 = Root
  60. * 0x?00 = 16 bit nodes
  61. * 0x010 = 32 bit nodes
  62. * 0x110 = 64 bit nodes
  63. *
  64. * Slot size and location in the parent pointer:
  65. * type : slot location
  66. * 0x??1 : Root
  67. * 0x?00 : 16 bit values, type in 0-1, slot in 2-6
  68. * 0x010 : 32 bit values, type in 0-2, slot in 3-6
  69. * 0x110 : 64 bit values, type in 0-2, slot in 3-6
  70. */
  71. /*
  72. * This metadata is used to optimize the gap updating code and in reverse
  73. * searching for gaps or any other code that needs to find the end of the data.
  74. */
  75. struct maple_metadata {
  76. unsigned char end;
  77. unsigned char gap;
  78. };
  79. /*
  80. * Leaf nodes do not store pointers to nodes, they store user data. Users may
  81. * store almost any bit pattern. As noted above, the optimisation of storing an
  82. * entry at 0 in the root pointer cannot be done for data which have the bottom
  83. * two bits set to '10'. We also reserve values with the bottom two bits set to
  84. * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
  85. * return errnos as a negative errno shifted right by two bits and the bottom
  86. * two bits set to '10', and while choosing to store these values in the array
  87. * is not an error, it may lead to confusion if you're testing for an error with
  88. * mas_is_err().
  89. *
  90. * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
  91. * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
  92. *
  93. * In regular B-Tree terms, pivots are called keys. The term pivot is used to
  94. * indicate that the tree is specifying ranges, Pivots may appear in the
  95. * subtree with an entry attached to the value whereas keys are unique to a
  96. * specific position of a B-tree. Pivot values are inclusive of the slot with
  97. * the same index.
  98. */
  99. struct maple_range_64 {
  100. struct maple_pnode *parent;
  101. unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
  102. union {
  103. void __rcu *slot[MAPLE_RANGE64_SLOTS];
  104. struct {
  105. void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
  106. struct maple_metadata meta;
  107. };
  108. };
  109. };
  110. /*
  111. * At tree creation time, the user can specify that they're willing to trade off
  112. * storing fewer entries in a tree in return for storing more information in
  113. * each node.
  114. *
  115. * The maple tree supports recording the largest range of NULL entries available
  116. * in this node, also called gaps. This optimises the tree for allocating a
  117. * range.
  118. */
  119. struct maple_arange_64 {
  120. struct maple_pnode *parent;
  121. unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
  122. void __rcu *slot[MAPLE_ARANGE64_SLOTS];
  123. unsigned long gap[MAPLE_ARANGE64_SLOTS];
  124. struct maple_metadata meta;
  125. };
  126. struct maple_alloc {
  127. unsigned long total;
  128. unsigned char node_count;
  129. unsigned int request_count;
  130. struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
  131. };
  132. struct maple_topiary {
  133. struct maple_pnode *parent;
  134. struct maple_enode *next; /* Overlaps the pivot */
  135. };
  136. enum maple_type {
  137. maple_dense,
  138. maple_leaf_64,
  139. maple_range_64,
  140. maple_arange_64,
  141. };
  142. /**
  143. * DOC: Maple tree flags
  144. *
  145. * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
  146. * * MT_FLAGS_USE_RCU - Operate in RCU mode
  147. * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
  148. * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
  149. * * MT_FLAGS_LOCK_MASK - How the mt_lock is used
  150. * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
  151. * * MT_FLAGS_LOCK_BH - Acquired bh-safe
  152. * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
  153. *
  154. * MAPLE_HEIGHT_MAX The largest height that can be stored
  155. */
  156. #define MT_FLAGS_ALLOC_RANGE 0x01
  157. #define MT_FLAGS_USE_RCU 0x02
  158. #define MT_FLAGS_HEIGHT_OFFSET 0x02
  159. #define MT_FLAGS_HEIGHT_MASK 0x7C
  160. #define MT_FLAGS_LOCK_MASK 0x300
  161. #define MT_FLAGS_LOCK_IRQ 0x100
  162. #define MT_FLAGS_LOCK_BH 0x200
  163. #define MT_FLAGS_LOCK_EXTERN 0x300
  164. #define MAPLE_HEIGHT_MAX 31
  165. #define MAPLE_NODE_TYPE_MASK 0x0F
  166. #define MAPLE_NODE_TYPE_SHIFT 0x03
  167. #define MAPLE_RESERVED_RANGE 4096
  168. #ifdef CONFIG_LOCKDEP
  169. typedef struct lockdep_map *lockdep_map_p;
  170. #define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock)
  171. #define mt_set_external_lock(mt, lock) \
  172. (mt)->ma_external_lock = &(lock)->dep_map
  173. #else
  174. typedef struct { /* nothing */ } lockdep_map_p;
  175. #define mt_lock_is_held(mt) 1
  176. #define mt_set_external_lock(mt, lock) do { } while (0)
  177. #endif
  178. /*
  179. * If the tree contains a single entry at index 0, it is usually stored in
  180. * tree->ma_root. To optimise for the page cache, an entry which ends in '00',
  181. * '01' or '11' is stored in the root, but an entry which ends in '10' will be
  182. * stored in a node. Bits 3-6 are used to store enum maple_type.
  183. *
  184. * The flags are used both to store some immutable information about this tree
  185. * (set at tree creation time) and dynamic information set under the spinlock.
  186. *
  187. * Another use of flags are to indicate global states of the tree. This is the
  188. * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in
  189. * RCU mode. This mode was added to allow the tree to reuse nodes instead of
  190. * re-allocating and RCU freeing nodes when there is a single user.
  191. */
  192. struct maple_tree {
  193. union {
  194. spinlock_t ma_lock;
  195. lockdep_map_p ma_external_lock;
  196. };
  197. void __rcu *ma_root;
  198. unsigned int ma_flags;
  199. };
  200. /**
  201. * MTREE_INIT() - Initialize a maple tree
  202. * @name: The maple tree name
  203. * @__flags: The maple tree flags
  204. *
  205. */
  206. #define MTREE_INIT(name, __flags) { \
  207. .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
  208. .ma_flags = __flags, \
  209. .ma_root = NULL, \
  210. }
  211. /**
  212. * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
  213. * @name: The tree name
  214. * @__flags: The maple tree flags
  215. * @__lock: The external lock
  216. */
  217. #ifdef CONFIG_LOCKDEP
  218. #define MTREE_INIT_EXT(name, __flags, __lock) { \
  219. .ma_external_lock = &(__lock).dep_map, \
  220. .ma_flags = (__flags), \
  221. .ma_root = NULL, \
  222. }
  223. #else
  224. #define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
  225. #endif
  226. #define DEFINE_MTREE(name) \
  227. struct maple_tree name = MTREE_INIT(name, 0)
  228. #define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
  229. #define mtree_lock_nested(mas, subclass) \
  230. spin_lock_nested((&(mt)->ma_lock), subclass)
  231. #define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
  232. /*
  233. * The Maple Tree squeezes various bits in at various points which aren't
  234. * necessarily obvious. Usually, this is done by observing that pointers are
  235. * N-byte aligned and thus the bottom log_2(N) bits are available for use. We
  236. * don't use the high bits of pointers to store additional information because
  237. * we don't know what bits are unused on any given architecture.
  238. *
  239. * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
  240. * low bits for our own purposes. Nodes are currently of 4 types:
  241. * 1. Single pointer (Range is 0-0)
  242. * 2. Non-leaf Allocation Range nodes
  243. * 3. Non-leaf Range nodes
  244. * 4. Leaf Range nodes All nodes consist of a number of node slots,
  245. * pivots, and a parent pointer.
  246. */
  247. struct maple_node {
  248. union {
  249. struct {
  250. struct maple_pnode *parent;
  251. void __rcu *slot[MAPLE_NODE_SLOTS];
  252. };
  253. struct {
  254. void *pad;
  255. struct rcu_head rcu;
  256. struct maple_enode *piv_parent;
  257. unsigned char parent_slot;
  258. enum maple_type type;
  259. unsigned char slot_len;
  260. unsigned int ma_flags;
  261. };
  262. struct maple_range_64 mr64;
  263. struct maple_arange_64 ma64;
  264. struct maple_alloc alloc;
  265. };
  266. };
  267. /*
  268. * More complicated stores can cause two nodes to become one or three and
  269. * potentially alter the height of the tree. Either half of the tree may need
  270. * to be rebalanced against the other. The ma_topiary struct is used to track
  271. * which nodes have been 'cut' from the tree so that the change can be done
  272. * safely at a later date. This is done to support RCU.
  273. */
  274. struct ma_topiary {
  275. struct maple_enode *head;
  276. struct maple_enode *tail;
  277. struct maple_tree *mtree;
  278. };
  279. void *mtree_load(struct maple_tree *mt, unsigned long index);
  280. int mtree_insert(struct maple_tree *mt, unsigned long index,
  281. void *entry, gfp_t gfp);
  282. int mtree_insert_range(struct maple_tree *mt, unsigned long first,
  283. unsigned long last, void *entry, gfp_t gfp);
  284. int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
  285. void *entry, unsigned long size, unsigned long min,
  286. unsigned long max, gfp_t gfp);
  287. int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
  288. void *entry, unsigned long size, unsigned long min,
  289. unsigned long max, gfp_t gfp);
  290. int mtree_store_range(struct maple_tree *mt, unsigned long first,
  291. unsigned long last, void *entry, gfp_t gfp);
  292. int mtree_store(struct maple_tree *mt, unsigned long index,
  293. void *entry, gfp_t gfp);
  294. void *mtree_erase(struct maple_tree *mt, unsigned long index);
  295. int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
  296. int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
  297. void mtree_destroy(struct maple_tree *mt);
  298. void __mt_destroy(struct maple_tree *mt);
  299. /**
  300. * mtree_empty() - Determine if a tree has any present entries.
  301. * @mt: Maple Tree.
  302. *
  303. * Context: Any context.
  304. * Return: %true if the tree contains only NULL pointers.
  305. */
  306. static inline bool mtree_empty(const struct maple_tree *mt)
  307. {
  308. return mt->ma_root == NULL;
  309. }
  310. /* Advanced API */
  311. /*
  312. * The maple state is defined in the struct ma_state and is used to keep track
  313. * of information during operations, and even between operations when using the
  314. * advanced API.
  315. *
  316. * If state->node has bit 0 set then it references a tree location which is not
  317. * a node (eg the root). If bit 1 is set, the rest of the bits are a negative
  318. * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
  319. * node type.
  320. *
  321. * state->alloc either has a request number of nodes or an allocated node. If
  322. * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
  323. * and the remaining bits are the value. If state->alloc is a node, then the
  324. * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
  325. * storing more allocated nodes, a total number of nodes allocated, and the
  326. * node_count in this node. node_count is the number of allocated nodes in this
  327. * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
  328. * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
  329. * by removing a node from the state->alloc node until state->alloc->node_count
  330. * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
  331. * to state->alloc. Nodes are pushed onto state->alloc by putting the current
  332. * state->alloc into the pushed node's slot[0].
  333. *
  334. * The state also contains the implied min/max of the state->node, the depth of
  335. * this search, and the offset. The implied min/max are either from the parent
  336. * node or are 0-oo for the root node. The depth is incremented or decremented
  337. * every time a node is walked down or up. The offset is the slot/pivot of
  338. * interest in the node - either for reading or writing.
  339. *
  340. * When returning a value the maple state index and last respectively contain
  341. * the start and end of the range for the entry. Ranges are inclusive in the
  342. * Maple Tree.
  343. */
  344. struct ma_state {
  345. struct maple_tree *tree; /* The tree we're operating in */
  346. unsigned long index; /* The index we're operating on - range start */
  347. unsigned long last; /* The last index we're operating on - range end */
  348. struct maple_enode *node; /* The node containing this entry */
  349. unsigned long min; /* The minimum index of this node - implied pivot min */
  350. unsigned long max; /* The maximum index of this node - implied pivot max */
  351. struct maple_alloc *alloc; /* Allocated nodes for this operation */
  352. unsigned char depth; /* depth of tree descent during write */
  353. unsigned char offset;
  354. unsigned char mas_flags;
  355. };
  356. struct ma_wr_state {
  357. struct ma_state *mas;
  358. struct maple_node *node; /* Decoded mas->node */
  359. unsigned long r_min; /* range min */
  360. unsigned long r_max; /* range max */
  361. enum maple_type type; /* mas->node type */
  362. unsigned char offset_end; /* The offset where the write ends */
  363. unsigned char node_end; /* mas->node end */
  364. unsigned long *pivots; /* mas->node->pivots pointer */
  365. unsigned long end_piv; /* The pivot at the offset end */
  366. void __rcu **slots; /* mas->node->slots pointer */
  367. void *entry; /* The entry to write */
  368. void *content; /* The existing entry that is being overwritten */
  369. };
  370. #define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
  371. #define mas_lock_nested(mas, subclass) \
  372. spin_lock_nested(&((mas)->tree->ma_lock), subclass)
  373. #define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
  374. /*
  375. * Special values for ma_state.node.
  376. * MAS_START means we have not searched the tree.
  377. * MAS_ROOT means we have searched the tree and the entry we found lives in
  378. * the root of the tree (ie it has index 0, length 1 and is the only entry in
  379. * the tree).
  380. * MAS_NONE means we have searched the tree and there is no node in the
  381. * tree for this entry. For example, we searched for index 1 in an empty
  382. * tree. Or we have a tree which points to a full leaf node and we
  383. * searched for an entry which is larger than can be contained in that
  384. * leaf node.
  385. * MA_ERROR represents an errno. After dropping the lock and attempting
  386. * to resolve the error, the walk would have to be restarted from the
  387. * top of the tree as the tree may have been modified.
  388. */
  389. #define MAS_START ((struct maple_enode *)1UL)
  390. #define MAS_ROOT ((struct maple_enode *)5UL)
  391. #define MAS_NONE ((struct maple_enode *)9UL)
  392. #define MAS_PAUSE ((struct maple_enode *)17UL)
  393. #define MA_ERROR(err) \
  394. ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
  395. #define MA_STATE(name, mt, first, end) \
  396. struct ma_state name = { \
  397. .tree = mt, \
  398. .index = first, \
  399. .last = end, \
  400. .node = MAS_START, \
  401. .min = 0, \
  402. .max = ULONG_MAX, \
  403. .alloc = NULL, \
  404. }
  405. #define MA_WR_STATE(name, ma_state, wr_entry) \
  406. struct ma_wr_state name = { \
  407. .mas = ma_state, \
  408. .content = NULL, \
  409. .entry = wr_entry, \
  410. }
  411. #define MA_TOPIARY(name, tree) \
  412. struct ma_topiary name = { \
  413. .head = NULL, \
  414. .tail = NULL, \
  415. .mtree = tree, \
  416. }
  417. void *mas_walk(struct ma_state *mas);
  418. void *mas_store(struct ma_state *mas, void *entry);
  419. void *mas_erase(struct ma_state *mas);
  420. int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
  421. void mas_store_prealloc(struct ma_state *mas, void *entry);
  422. void *mas_find(struct ma_state *mas, unsigned long max);
  423. void *mas_find_range(struct ma_state *mas, unsigned long max);
  424. void *mas_find_rev(struct ma_state *mas, unsigned long min);
  425. void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
  426. int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
  427. bool mas_is_err(struct ma_state *mas);
  428. bool mas_nomem(struct ma_state *mas, gfp_t gfp);
  429. void mas_pause(struct ma_state *mas);
  430. void maple_tree_init(void);
  431. void mas_destroy(struct ma_state *mas);
  432. int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
  433. void *mas_prev(struct ma_state *mas, unsigned long min);
  434. void *mas_prev_range(struct ma_state *mas, unsigned long max);
  435. void *mas_next(struct ma_state *mas, unsigned long max);
  436. void *mas_next_range(struct ma_state *mas, unsigned long max);
  437. int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
  438. unsigned long size);
  439. /* Checks if a mas has not found anything */
  440. static inline bool mas_is_none(struct ma_state *mas)
  441. {
  442. return mas->node == MAS_NONE;
  443. }
  444. /* Checks if a mas has been paused */
  445. static inline bool mas_is_paused(struct ma_state *mas)
  446. {
  447. return mas->node == MAS_PAUSE;
  448. }
  449. void mas_dup_tree(struct ma_state *oldmas, struct ma_state *mas);
  450. void mas_dup_store(struct ma_state *mas, void *entry);
  451. /*
  452. * This finds an empty area from the highest address to the lowest.
  453. * AKA "Topdown" version,
  454. */
  455. int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
  456. unsigned long max, unsigned long size);
  457. /**
  458. * mas_reset() - Reset a Maple Tree operation state.
  459. * @mas: Maple Tree operation state.
  460. *
  461. * Resets the error or walk state of the @mas so future walks of the
  462. * array will start from the root. Use this if you have dropped the
  463. * lock and want to reuse the ma_state.
  464. *
  465. * Context: Any context.
  466. */
  467. static inline void mas_reset(struct ma_state *mas)
  468. {
  469. mas->node = MAS_START;
  470. }
  471. /**
  472. * mas_for_each() - Iterate over a range of the maple tree.
  473. * @__mas: Maple Tree operation state (maple_state)
  474. * @__entry: Entry retrieved from the tree
  475. * @__max: maximum index to retrieve from the tree
  476. *
  477. * When returned, mas->index and mas->last will hold the entire range for the
  478. * entry.
  479. *
  480. * Note: may return the zero entry.
  481. *
  482. */
  483. #define mas_for_each(__mas, __entry, __max) \
  484. while (((__entry) = mas_find((__mas), (__max))) != NULL)
  485. /**
  486. * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
  487. * current location.
  488. * @mas: Maple Tree operation state.
  489. * @start: New start of range in the Maple Tree.
  490. * @last: New end of range in the Maple Tree.
  491. *
  492. * set the internal maple state values to a sub-range.
  493. * Please use mas_set_range() if you do not know where you are in the tree.
  494. */
  495. static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
  496. unsigned long last)
  497. {
  498. mas->index = start;
  499. mas->last = last;
  500. }
  501. /**
  502. * mas_set_range() - Set up Maple Tree operation state for a different index.
  503. * @mas: Maple Tree operation state.
  504. * @start: New start of range in the Maple Tree.
  505. * @last: New end of range in the Maple Tree.
  506. *
  507. * Move the operation state to refer to a different range. This will
  508. * have the effect of starting a walk from the top; see mas_next()
  509. * to move to an adjacent index.
  510. */
  511. static inline
  512. void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
  513. {
  514. __mas_set_range(mas, start, last);
  515. mas->node = MAS_START;
  516. }
  517. /**
  518. * mas_set() - Set up Maple Tree operation state for a different index.
  519. * @mas: Maple Tree operation state.
  520. * @index: New index into the Maple Tree.
  521. *
  522. * Move the operation state to refer to a different index. This will
  523. * have the effect of starting a walk from the top; see mas_next()
  524. * to move to an adjacent index.
  525. */
  526. static inline void mas_set(struct ma_state *mas, unsigned long index)
  527. {
  528. mas_set_range(mas, index, index);
  529. }
  530. static inline bool mt_external_lock(const struct maple_tree *mt)
  531. {
  532. return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
  533. }
  534. /**
  535. * mt_init_flags() - Initialise an empty maple tree with flags.
  536. * @mt: Maple Tree
  537. * @flags: maple tree flags.
  538. *
  539. * If you need to initialise a Maple Tree with special flags (eg, an
  540. * allocation tree), use this function.
  541. *
  542. * Context: Any context.
  543. */
  544. static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
  545. {
  546. mt->ma_flags = flags;
  547. if (!mt_external_lock(mt))
  548. spin_lock_init(&mt->ma_lock);
  549. rcu_assign_pointer(mt->ma_root, NULL);
  550. }
  551. /**
  552. * mt_init() - Initialise an empty maple tree.
  553. * @mt: Maple Tree
  554. *
  555. * An empty Maple Tree.
  556. *
  557. * Context: Any context.
  558. */
  559. static inline void mt_init(struct maple_tree *mt)
  560. {
  561. mt_init_flags(mt, 0);
  562. }
  563. static inline bool mt_in_rcu(struct maple_tree *mt)
  564. {
  565. #ifdef CONFIG_MAPLE_RCU_DISABLED
  566. return false;
  567. #endif
  568. return mt->ma_flags & MT_FLAGS_USE_RCU;
  569. }
  570. /**
  571. * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
  572. * @mt: The Maple Tree
  573. */
  574. static inline void mt_clear_in_rcu(struct maple_tree *mt)
  575. {
  576. if (!mt_in_rcu(mt))
  577. return;
  578. if (mt_external_lock(mt)) {
  579. BUG_ON(!mt_lock_is_held(mt));
  580. mt->ma_flags &= ~MT_FLAGS_USE_RCU;
  581. } else {
  582. mtree_lock(mt);
  583. mt->ma_flags &= ~MT_FLAGS_USE_RCU;
  584. mtree_unlock(mt);
  585. }
  586. }
  587. /**
  588. * mt_set_in_rcu() - Switch the tree to RCU safe mode.
  589. * @mt: The Maple Tree
  590. */
  591. static inline void mt_set_in_rcu(struct maple_tree *mt)
  592. {
  593. if (mt_in_rcu(mt))
  594. return;
  595. if (mt_external_lock(mt)) {
  596. BUG_ON(!mt_lock_is_held(mt));
  597. mt->ma_flags |= MT_FLAGS_USE_RCU;
  598. } else {
  599. mtree_lock(mt);
  600. mt->ma_flags |= MT_FLAGS_USE_RCU;
  601. mtree_unlock(mt);
  602. }
  603. }
  604. static inline unsigned int mt_height(const struct maple_tree *mt)
  605. {
  606. return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
  607. }
  608. void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
  609. void *mt_find_after(struct maple_tree *mt, unsigned long *index,
  610. unsigned long max);
  611. void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
  612. void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
  613. /**
  614. * mt_for_each - Iterate over each entry starting at index until max.
  615. * @__tree: The Maple Tree
  616. * @__entry: The current entry
  617. * @__index: The index to update to track the location in the tree
  618. * @__max: The maximum limit for @index
  619. *
  620. * Note: Will not return the zero entry.
  621. */
  622. #define mt_for_each(__tree, __entry, __index, __max) \
  623. for (__entry = mt_find(__tree, &(__index), __max); \
  624. __entry; __entry = mt_find_after(__tree, &(__index), __max))
  625. #ifdef CONFIG_DEBUG_MAPLE_TREE
  626. extern atomic_t maple_tree_tests_run;
  627. extern atomic_t maple_tree_tests_passed;
  628. void mt_dump(const struct maple_tree *mt);
  629. void mt_validate(struct maple_tree *mt);
  630. void mt_cache_shrink(void);
  631. #define MT_BUG_ON(__tree, __x) do { \
  632. atomic_inc(&maple_tree_tests_run); \
  633. if (__x) { \
  634. pr_info("BUG at %s:%d (%u)\n", \
  635. __func__, __LINE__, __x); \
  636. mt_dump(__tree); \
  637. pr_info("Pass: %u Run:%u\n", \
  638. atomic_read(&maple_tree_tests_passed), \
  639. atomic_read(&maple_tree_tests_run)); \
  640. dump_stack(); \
  641. } else { \
  642. atomic_inc(&maple_tree_tests_passed); \
  643. } \
  644. } while (0)
  645. #else
  646. #define MT_BUG_ON(__tree, __x) BUG_ON(__x)
  647. #endif /* CONFIG_DEBUG_MAPLE_TREE */
  648. #endif /*_LINUX_MAPLE_TREE_H */