sparsebit.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Sparse bit array
  4. *
  5. * Copyright (C) 2018, Google LLC.
  6. * Copyright (C) 2018, Red Hat, Inc. (code style cleanup and fuzzing driver)
  7. *
  8. * This library provides functions to support a memory efficient bit array,
  9. * with an index size of 2^64. A sparsebit array is allocated through
  10. * the use sparsebit_alloc() and free'd via sparsebit_free(),
  11. * such as in the following:
  12. *
  13. * struct sparsebit *s;
  14. * s = sparsebit_alloc();
  15. * sparsebit_free(&s);
  16. *
  17. * The struct sparsebit type resolves down to a struct sparsebit.
  18. * Note that, sparsebit_free() takes a pointer to the sparsebit
  19. * structure. This is so that sparsebit_free() is able to poison
  20. * the pointer (e.g. set it to NULL) to the struct sparsebit before
  21. * returning to the caller.
  22. *
  23. * Between the return of sparsebit_alloc() and the call of
  24. * sparsebit_free(), there are multiple query and modifying operations
  25. * that can be performed on the allocated sparsebit array. All of
  26. * these operations take as a parameter the value returned from
  27. * sparsebit_alloc() and most also take a bit index. Frequently
  28. * used routines include:
  29. *
  30. * ---- Query Operations
  31. * sparsebit_is_set(s, idx)
  32. * sparsebit_is_clear(s, idx)
  33. * sparsebit_any_set(s)
  34. * sparsebit_first_set(s)
  35. * sparsebit_next_set(s, prev_idx)
  36. *
  37. * ---- Modifying Operations
  38. * sparsebit_set(s, idx)
  39. * sparsebit_clear(s, idx)
  40. * sparsebit_set_num(s, idx, num);
  41. * sparsebit_clear_num(s, idx, num);
  42. *
  43. * A common operation, is to itterate over all the bits set in a test
  44. * sparsebit array. This can be done via code with the following structure:
  45. *
  46. * sparsebit_idx_t idx;
  47. * if (sparsebit_any_set(s)) {
  48. * idx = sparsebit_first_set(s);
  49. * do {
  50. * ...
  51. * idx = sparsebit_next_set(s, idx);
  52. * } while (idx != 0);
  53. * }
  54. *
  55. * The index of the first bit set needs to be obtained via
  56. * sparsebit_first_set(), because sparsebit_next_set(), needs
  57. * the index of the previously set. The sparsebit_idx_t type is
  58. * unsigned, so there is no previous index before 0 that is available.
  59. * Also, the call to sparsebit_first_set() is not made unless there
  60. * is at least 1 bit in the array set. This is because sparsebit_first_set()
  61. * aborts if sparsebit_first_set() is called with no bits set.
  62. * It is the callers responsibility to assure that the
  63. * sparsebit array has at least a single bit set before calling
  64. * sparsebit_first_set().
  65. *
  66. * ==== Implementation Overview ====
  67. * For the most part the internal implementation of sparsebit is
  68. * opaque to the caller. One important implementation detail that the
  69. * caller may need to be aware of is the spatial complexity of the
  70. * implementation. This implementation of a sparsebit array is not
  71. * only sparse, in that it uses memory proportional to the number of bits
  72. * set. It is also efficient in memory usage when most of the bits are
  73. * set.
  74. *
  75. * At a high-level the state of the bit settings are maintained through
  76. * the use of a binary-search tree, where each node contains at least
  77. * the following members:
  78. *
  79. * typedef uint64_t sparsebit_idx_t;
  80. * typedef uint64_t sparsebit_num_t;
  81. *
  82. * sparsebit_idx_t idx;
  83. * uint32_t mask;
  84. * sparsebit_num_t num_after;
  85. *
  86. * The idx member contains the bit index of the first bit described by this
  87. * node, while the mask member stores the setting of the first 32-bits.
  88. * The setting of the bit at idx + n, where 0 <= n < 32, is located in the
  89. * mask member at 1 << n.
  90. *
  91. * Nodes are sorted by idx and the bits described by two nodes will never
  92. * overlap. The idx member is always aligned to the mask size, i.e. a
  93. * multiple of 32.
  94. *
  95. * Beyond a typical implementation, the nodes in this implementation also
  96. * contains a member named num_after. The num_after member holds the
  97. * number of bits immediately after the mask bits that are contiguously set.
  98. * The use of the num_after member allows this implementation to efficiently
  99. * represent cases where most bits are set. For example, the case of all
  100. * but the last two bits set, is represented by the following two nodes:
  101. *
  102. * node 0 - idx: 0x0 mask: 0xffffffff num_after: 0xffffffffffffffc0
  103. * node 1 - idx: 0xffffffffffffffe0 mask: 0x3fffffff num_after: 0
  104. *
  105. * ==== Invariants ====
  106. * This implementation usses the following invariants:
  107. *
  108. * + Node are only used to represent bits that are set.
  109. * Nodes with a mask of 0 and num_after of 0 are not allowed.
  110. *
  111. * + Sum of bits set in all the nodes is equal to the value of
  112. * the struct sparsebit_pvt num_set member.
  113. *
  114. * + The setting of at least one bit is always described in a nodes
  115. * mask (mask >= 1).
  116. *
  117. * + A node with all mask bits set only occurs when the last bit
  118. * described by the previous node is not equal to this nodes
  119. * starting index - 1. All such occurences of this condition are
  120. * avoided by moving the setting of the nodes mask bits into
  121. * the previous nodes num_after setting.
  122. *
  123. * + Node starting index is evenly divisible by the number of bits
  124. * within a nodes mask member.
  125. *
  126. * + Nodes never represent a range of bits that wrap around the
  127. * highest supported index.
  128. *
  129. * (idx + MASK_BITS + num_after - 1) <= ((sparsebit_idx_t) 0) - 1)
  130. *
  131. * As a consequence of the above, the num_after member of a node
  132. * will always be <=:
  133. *
  134. * maximum_index - nodes_starting_index - number_of_mask_bits
  135. *
  136. * + Nodes within the binary search tree are sorted based on each
  137. * nodes starting index.
  138. *
  139. * + The range of bits described by any two nodes do not overlap. The
  140. * range of bits described by a single node is:
  141. *
  142. * start: node->idx
  143. * end (inclusive): node->idx + MASK_BITS + node->num_after - 1;
  144. *
  145. * Note, at times these invariants are temporarily violated for a
  146. * specific portion of the code. For example, when setting a mask
  147. * bit, there is a small delay between when the mask bit is set and the
  148. * value in the struct sparsebit_pvt num_set member is updated. Other
  149. * temporary violations occur when node_split() is called with a specified
  150. * index and assures that a node where its mask represents the bit
  151. * at the specified index exists. At times to do this node_split()
  152. * must split an existing node into two nodes or create a node that
  153. * has no bits set. Such temporary violations must be corrected before
  154. * returning to the caller. These corrections are typically performed
  155. * by the local function node_reduce().
  156. */
  157. #include "test_util.h"
  158. #include "sparsebit.h"
  159. #include <limits.h>
  160. #include <assert.h>
  161. #define DUMP_LINE_MAX 100 /* Does not include indent amount */
  162. typedef uint32_t mask_t;
  163. #define MASK_BITS (sizeof(mask_t) * CHAR_BIT)
  164. struct node {
  165. struct node *parent;
  166. struct node *left;
  167. struct node *right;
  168. sparsebit_idx_t idx; /* index of least-significant bit in mask */
  169. sparsebit_num_t num_after; /* num contiguously set after mask */
  170. mask_t mask;
  171. };
  172. struct sparsebit {
  173. /*
  174. * Points to root node of the binary search
  175. * tree. Equal to NULL when no bits are set in
  176. * the entire sparsebit array.
  177. */
  178. struct node *root;
  179. /*
  180. * A redundant count of the total number of bits set. Used for
  181. * diagnostic purposes and to change the time complexity of
  182. * sparsebit_num_set() from O(n) to O(1).
  183. * Note: Due to overflow, a value of 0 means none or all set.
  184. */
  185. sparsebit_num_t num_set;
  186. };
  187. /* Returns the number of set bits described by the settings
  188. * of the node pointed to by nodep.
  189. */
  190. static sparsebit_num_t node_num_set(struct node *nodep)
  191. {
  192. return nodep->num_after + __builtin_popcount(nodep->mask);
  193. }
  194. /* Returns a pointer to the node that describes the
  195. * lowest bit index.
  196. */
  197. static struct node *node_first(struct sparsebit *s)
  198. {
  199. struct node *nodep;
  200. for (nodep = s->root; nodep && nodep->left; nodep = nodep->left)
  201. ;
  202. return nodep;
  203. }
  204. /* Returns a pointer to the node that describes the
  205. * lowest bit index > the index of the node pointed to by np.
  206. * Returns NULL if no node with a higher index exists.
  207. */
  208. static struct node *node_next(struct sparsebit *s, struct node *np)
  209. {
  210. struct node *nodep = np;
  211. /*
  212. * If current node has a right child, next node is the left-most
  213. * of the right child.
  214. */
  215. if (nodep->right) {
  216. for (nodep = nodep->right; nodep->left; nodep = nodep->left)
  217. ;
  218. return nodep;
  219. }
  220. /*
  221. * No right child. Go up until node is left child of a parent.
  222. * That parent is then the next node.
  223. */
  224. while (nodep->parent && nodep == nodep->parent->right)
  225. nodep = nodep->parent;
  226. return nodep->parent;
  227. }
  228. /* Searches for and returns a pointer to the node that describes the
  229. * highest index < the index of the node pointed to by np.
  230. * Returns NULL if no node with a lower index exists.
  231. */
  232. static struct node *node_prev(struct sparsebit *s, struct node *np)
  233. {
  234. struct node *nodep = np;
  235. /*
  236. * If current node has a left child, next node is the right-most
  237. * of the left child.
  238. */
  239. if (nodep->left) {
  240. for (nodep = nodep->left; nodep->right; nodep = nodep->right)
  241. ;
  242. return (struct node *) nodep;
  243. }
  244. /*
  245. * No left child. Go up until node is right child of a parent.
  246. * That parent is then the next node.
  247. */
  248. while (nodep->parent && nodep == nodep->parent->left)
  249. nodep = nodep->parent;
  250. return (struct node *) nodep->parent;
  251. }
  252. /* Allocates space to hold a copy of the node sub-tree pointed to by
  253. * subtree and duplicates the bit settings to the newly allocated nodes.
  254. * Returns the newly allocated copy of subtree.
  255. */
  256. static struct node *node_copy_subtree(struct node *subtree)
  257. {
  258. struct node *root;
  259. /* Duplicate the node at the root of the subtree */
  260. root = calloc(1, sizeof(*root));
  261. if (!root) {
  262. perror("calloc");
  263. abort();
  264. }
  265. root->idx = subtree->idx;
  266. root->mask = subtree->mask;
  267. root->num_after = subtree->num_after;
  268. /* As needed, recursively duplicate the left and right subtrees */
  269. if (subtree->left) {
  270. root->left = node_copy_subtree(subtree->left);
  271. root->left->parent = root;
  272. }
  273. if (subtree->right) {
  274. root->right = node_copy_subtree(subtree->right);
  275. root->right->parent = root;
  276. }
  277. return root;
  278. }
  279. /* Searches for and returns a pointer to the node that describes the setting
  280. * of the bit given by idx. A node describes the setting of a bit if its
  281. * index is within the bits described by the mask bits or the number of
  282. * contiguous bits set after the mask. Returns NULL if there is no such node.
  283. */
  284. static struct node *node_find(struct sparsebit *s, sparsebit_idx_t idx)
  285. {
  286. struct node *nodep;
  287. /* Find the node that describes the setting of the bit at idx */
  288. for (nodep = s->root; nodep;
  289. nodep = nodep->idx > idx ? nodep->left : nodep->right) {
  290. if (idx >= nodep->idx &&
  291. idx <= nodep->idx + MASK_BITS + nodep->num_after - 1)
  292. break;
  293. }
  294. return nodep;
  295. }
  296. /* Entry Requirements:
  297. * + A node that describes the setting of idx is not already present.
  298. *
  299. * Adds a new node to describe the setting of the bit at the index given
  300. * by idx. Returns a pointer to the newly added node.
  301. *
  302. * TODO(lhuemill): Degenerate cases causes the tree to get unbalanced.
  303. */
  304. static struct node *node_add(struct sparsebit *s, sparsebit_idx_t idx)
  305. {
  306. struct node *nodep, *parentp, *prev;
  307. /* Allocate and initialize the new node. */
  308. nodep = calloc(1, sizeof(*nodep));
  309. if (!nodep) {
  310. perror("calloc");
  311. abort();
  312. }
  313. nodep->idx = idx & -MASK_BITS;
  314. /* If no nodes, set it up as the root node. */
  315. if (!s->root) {
  316. s->root = nodep;
  317. return nodep;
  318. }
  319. /*
  320. * Find the parent where the new node should be attached
  321. * and add the node there.
  322. */
  323. parentp = s->root;
  324. while (true) {
  325. if (idx < parentp->idx) {
  326. if (!parentp->left) {
  327. parentp->left = nodep;
  328. nodep->parent = parentp;
  329. break;
  330. }
  331. parentp = parentp->left;
  332. } else {
  333. assert(idx > parentp->idx + MASK_BITS + parentp->num_after - 1);
  334. if (!parentp->right) {
  335. parentp->right = nodep;
  336. nodep->parent = parentp;
  337. break;
  338. }
  339. parentp = parentp->right;
  340. }
  341. }
  342. /*
  343. * Does num_after bits of previous node overlap with the mask
  344. * of the new node? If so set the bits in the new nodes mask
  345. * and reduce the previous nodes num_after.
  346. */
  347. prev = node_prev(s, nodep);
  348. while (prev && prev->idx + MASK_BITS + prev->num_after - 1 >= nodep->idx) {
  349. unsigned int n1 = (prev->idx + MASK_BITS + prev->num_after - 1)
  350. - nodep->idx;
  351. assert(prev->num_after > 0);
  352. assert(n1 < MASK_BITS);
  353. assert(!(nodep->mask & (1 << n1)));
  354. nodep->mask |= (1 << n1);
  355. prev->num_after--;
  356. }
  357. return nodep;
  358. }
  359. /* Returns whether all the bits in the sparsebit array are set. */
  360. bool sparsebit_all_set(struct sparsebit *s)
  361. {
  362. /*
  363. * If any nodes there must be at least one bit set. Only case
  364. * where a bit is set and total num set is 0, is when all bits
  365. * are set.
  366. */
  367. return s->root && s->num_set == 0;
  368. }
  369. /* Clears all bits described by the node pointed to by nodep, then
  370. * removes the node.
  371. */
  372. static void node_rm(struct sparsebit *s, struct node *nodep)
  373. {
  374. struct node *tmp;
  375. sparsebit_num_t num_set;
  376. num_set = node_num_set(nodep);
  377. assert(s->num_set >= num_set || sparsebit_all_set(s));
  378. s->num_set -= node_num_set(nodep);
  379. /* Have both left and right child */
  380. if (nodep->left && nodep->right) {
  381. /*
  382. * Move left children to the leftmost leaf node
  383. * of the right child.
  384. */
  385. for (tmp = nodep->right; tmp->left; tmp = tmp->left)
  386. ;
  387. tmp->left = nodep->left;
  388. nodep->left = NULL;
  389. tmp->left->parent = tmp;
  390. }
  391. /* Left only child */
  392. if (nodep->left) {
  393. if (!nodep->parent) {
  394. s->root = nodep->left;
  395. nodep->left->parent = NULL;
  396. } else {
  397. nodep->left->parent = nodep->parent;
  398. if (nodep == nodep->parent->left)
  399. nodep->parent->left = nodep->left;
  400. else {
  401. assert(nodep == nodep->parent->right);
  402. nodep->parent->right = nodep->left;
  403. }
  404. }
  405. nodep->parent = nodep->left = nodep->right = NULL;
  406. free(nodep);
  407. return;
  408. }
  409. /* Right only child */
  410. if (nodep->right) {
  411. if (!nodep->parent) {
  412. s->root = nodep->right;
  413. nodep->right->parent = NULL;
  414. } else {
  415. nodep->right->parent = nodep->parent;
  416. if (nodep == nodep->parent->left)
  417. nodep->parent->left = nodep->right;
  418. else {
  419. assert(nodep == nodep->parent->right);
  420. nodep->parent->right = nodep->right;
  421. }
  422. }
  423. nodep->parent = nodep->left = nodep->right = NULL;
  424. free(nodep);
  425. return;
  426. }
  427. /* Leaf Node */
  428. if (!nodep->parent) {
  429. s->root = NULL;
  430. } else {
  431. if (nodep->parent->left == nodep)
  432. nodep->parent->left = NULL;
  433. else {
  434. assert(nodep == nodep->parent->right);
  435. nodep->parent->right = NULL;
  436. }
  437. }
  438. nodep->parent = nodep->left = nodep->right = NULL;
  439. free(nodep);
  440. return;
  441. }
  442. /* Splits the node containing the bit at idx so that there is a node
  443. * that starts at the specified index. If no such node exists, a new
  444. * node at the specified index is created. Returns the new node.
  445. *
  446. * idx must start of a mask boundary.
  447. */
  448. static struct node *node_split(struct sparsebit *s, sparsebit_idx_t idx)
  449. {
  450. struct node *nodep1, *nodep2;
  451. sparsebit_idx_t offset;
  452. sparsebit_num_t orig_num_after;
  453. assert(!(idx % MASK_BITS));
  454. /*
  455. * Is there a node that describes the setting of idx?
  456. * If not, add it.
  457. */
  458. nodep1 = node_find(s, idx);
  459. if (!nodep1)
  460. return node_add(s, idx);
  461. /*
  462. * All done if the starting index of the node is where the
  463. * split should occur.
  464. */
  465. if (nodep1->idx == idx)
  466. return nodep1;
  467. /*
  468. * Split point not at start of mask, so it must be part of
  469. * bits described by num_after.
  470. */
  471. /*
  472. * Calculate offset within num_after for where the split is
  473. * to occur.
  474. */
  475. offset = idx - (nodep1->idx + MASK_BITS);
  476. orig_num_after = nodep1->num_after;
  477. /*
  478. * Add a new node to describe the bits starting at
  479. * the split point.
  480. */
  481. nodep1->num_after = offset;
  482. nodep2 = node_add(s, idx);
  483. /* Move bits after the split point into the new node */
  484. nodep2->num_after = orig_num_after - offset;
  485. if (nodep2->num_after >= MASK_BITS) {
  486. nodep2->mask = ~(mask_t) 0;
  487. nodep2->num_after -= MASK_BITS;
  488. } else {
  489. nodep2->mask = (1 << nodep2->num_after) - 1;
  490. nodep2->num_after = 0;
  491. }
  492. return nodep2;
  493. }
  494. /* Iteratively reduces the node pointed to by nodep and its adjacent
  495. * nodes into a more compact form. For example, a node with a mask with
  496. * all bits set adjacent to a previous node, will get combined into a
  497. * single node with an increased num_after setting.
  498. *
  499. * After each reduction, a further check is made to see if additional
  500. * reductions are possible with the new previous and next nodes. Note,
  501. * a search for a reduction is only done across the nodes nearest nodep
  502. * and those that became part of a reduction. Reductions beyond nodep
  503. * and the adjacent nodes that are reduced are not discovered. It is the
  504. * responsibility of the caller to pass a nodep that is within one node
  505. * of each possible reduction.
  506. *
  507. * This function does not fix the temporary violation of all invariants.
  508. * For example it does not fix the case where the bit settings described
  509. * by two or more nodes overlap. Such a violation introduces the potential
  510. * complication of a bit setting for a specific index having different settings
  511. * in different nodes. This would then introduce the further complication
  512. * of which node has the correct setting of the bit and thus such conditions
  513. * are not allowed.
  514. *
  515. * This function is designed to fix invariant violations that are introduced
  516. * by node_split() and by changes to the nodes mask or num_after members.
  517. * For example, when setting a bit within a nodes mask, the function that
  518. * sets the bit doesn't have to worry about whether the setting of that
  519. * bit caused the mask to have leading only or trailing only bits set.
  520. * Instead, the function can call node_reduce(), with nodep equal to the
  521. * node address that it set a mask bit in, and node_reduce() will notice
  522. * the cases of leading or trailing only bits and that there is an
  523. * adjacent node that the bit settings could be merged into.
  524. *
  525. * This implementation specifically detects and corrects violation of the
  526. * following invariants:
  527. *
  528. * + Node are only used to represent bits that are set.
  529. * Nodes with a mask of 0 and num_after of 0 are not allowed.
  530. *
  531. * + The setting of at least one bit is always described in a nodes
  532. * mask (mask >= 1).
  533. *
  534. * + A node with all mask bits set only occurs when the last bit
  535. * described by the previous node is not equal to this nodes
  536. * starting index - 1. All such occurences of this condition are
  537. * avoided by moving the setting of the nodes mask bits into
  538. * the previous nodes num_after setting.
  539. */
  540. static void node_reduce(struct sparsebit *s, struct node *nodep)
  541. {
  542. bool reduction_performed;
  543. do {
  544. reduction_performed = false;
  545. struct node *prev, *next, *tmp;
  546. /* 1) Potential reductions within the current node. */
  547. /* Nodes with all bits cleared may be removed. */
  548. if (nodep->mask == 0 && nodep->num_after == 0) {
  549. /*
  550. * About to remove the node pointed to by
  551. * nodep, which normally would cause a problem
  552. * for the next pass through the reduction loop,
  553. * because the node at the starting point no longer
  554. * exists. This potential problem is handled
  555. * by first remembering the location of the next
  556. * or previous nodes. Doesn't matter which, because
  557. * once the node at nodep is removed, there will be
  558. * no other nodes between prev and next.
  559. *
  560. * Note, the checks performed on nodep against both
  561. * both prev and next both check for an adjacent
  562. * node that can be reduced into a single node. As
  563. * such, after removing the node at nodep, doesn't
  564. * matter whether the nodep for the next pass
  565. * through the loop is equal to the previous pass
  566. * prev or next node. Either way, on the next pass
  567. * the one not selected will become either the
  568. * prev or next node.
  569. */
  570. tmp = node_next(s, nodep);
  571. if (!tmp)
  572. tmp = node_prev(s, nodep);
  573. node_rm(s, nodep);
  574. nodep = NULL;
  575. nodep = tmp;
  576. reduction_performed = true;
  577. continue;
  578. }
  579. /*
  580. * When the mask is 0, can reduce the amount of num_after
  581. * bits by moving the initial num_after bits into the mask.
  582. */
  583. if (nodep->mask == 0) {
  584. assert(nodep->num_after != 0);
  585. assert(nodep->idx + MASK_BITS > nodep->idx);
  586. nodep->idx += MASK_BITS;
  587. if (nodep->num_after >= MASK_BITS) {
  588. nodep->mask = ~0;
  589. nodep->num_after -= MASK_BITS;
  590. } else {
  591. nodep->mask = (1u << nodep->num_after) - 1;
  592. nodep->num_after = 0;
  593. }
  594. reduction_performed = true;
  595. continue;
  596. }
  597. /*
  598. * 2) Potential reductions between the current and
  599. * previous nodes.
  600. */
  601. prev = node_prev(s, nodep);
  602. if (prev) {
  603. sparsebit_idx_t prev_highest_bit;
  604. /* Nodes with no bits set can be removed. */
  605. if (prev->mask == 0 && prev->num_after == 0) {
  606. node_rm(s, prev);
  607. reduction_performed = true;
  608. continue;
  609. }
  610. /*
  611. * All mask bits set and previous node has
  612. * adjacent index.
  613. */
  614. if (nodep->mask + 1 == 0 &&
  615. prev->idx + MASK_BITS == nodep->idx) {
  616. prev->num_after += MASK_BITS + nodep->num_after;
  617. nodep->mask = 0;
  618. nodep->num_after = 0;
  619. reduction_performed = true;
  620. continue;
  621. }
  622. /*
  623. * Is node adjacent to previous node and the node
  624. * contains a single contiguous range of bits
  625. * starting from the beginning of the mask?
  626. */
  627. prev_highest_bit = prev->idx + MASK_BITS - 1 + prev->num_after;
  628. if (prev_highest_bit + 1 == nodep->idx &&
  629. (nodep->mask | (nodep->mask >> 1)) == nodep->mask) {
  630. /*
  631. * How many contiguous bits are there?
  632. * Is equal to the total number of set
  633. * bits, due to an earlier check that
  634. * there is a single contiguous range of
  635. * set bits.
  636. */
  637. unsigned int num_contiguous
  638. = __builtin_popcount(nodep->mask);
  639. assert((num_contiguous > 0) &&
  640. ((1ULL << num_contiguous) - 1) == nodep->mask);
  641. prev->num_after += num_contiguous;
  642. nodep->mask = 0;
  643. /*
  644. * For predictable performance, handle special
  645. * case where all mask bits are set and there
  646. * is a non-zero num_after setting. This code
  647. * is functionally correct without the following
  648. * conditionalized statements, but without them
  649. * the value of num_after is only reduced by
  650. * the number of mask bits per pass. There are
  651. * cases where num_after can be close to 2^64.
  652. * Without this code it could take nearly
  653. * (2^64) / 32 passes to perform the full
  654. * reduction.
  655. */
  656. if (num_contiguous == MASK_BITS) {
  657. prev->num_after += nodep->num_after;
  658. nodep->num_after = 0;
  659. }
  660. reduction_performed = true;
  661. continue;
  662. }
  663. }
  664. /*
  665. * 3) Potential reductions between the current and
  666. * next nodes.
  667. */
  668. next = node_next(s, nodep);
  669. if (next) {
  670. /* Nodes with no bits set can be removed. */
  671. if (next->mask == 0 && next->num_after == 0) {
  672. node_rm(s, next);
  673. reduction_performed = true;
  674. continue;
  675. }
  676. /*
  677. * Is next node index adjacent to current node
  678. * and has a mask with all bits set?
  679. */
  680. if (next->idx == nodep->idx + MASK_BITS + nodep->num_after &&
  681. next->mask == ~(mask_t) 0) {
  682. nodep->num_after += MASK_BITS;
  683. next->mask = 0;
  684. nodep->num_after += next->num_after;
  685. next->num_after = 0;
  686. node_rm(s, next);
  687. next = NULL;
  688. reduction_performed = true;
  689. continue;
  690. }
  691. }
  692. } while (nodep && reduction_performed);
  693. }
  694. /* Returns whether the bit at the index given by idx, within the
  695. * sparsebit array is set or not.
  696. */
  697. bool sparsebit_is_set(struct sparsebit *s, sparsebit_idx_t idx)
  698. {
  699. struct node *nodep;
  700. /* Find the node that describes the setting of the bit at idx */
  701. for (nodep = s->root; nodep;
  702. nodep = nodep->idx > idx ? nodep->left : nodep->right)
  703. if (idx >= nodep->idx &&
  704. idx <= nodep->idx + MASK_BITS + nodep->num_after - 1)
  705. goto have_node;
  706. return false;
  707. have_node:
  708. /* Bit is set if it is any of the bits described by num_after */
  709. if (nodep->num_after && idx >= nodep->idx + MASK_BITS)
  710. return true;
  711. /* Is the corresponding mask bit set */
  712. assert(idx >= nodep->idx && idx - nodep->idx < MASK_BITS);
  713. return !!(nodep->mask & (1 << (idx - nodep->idx)));
  714. }
  715. /* Within the sparsebit array pointed to by s, sets the bit
  716. * at the index given by idx.
  717. */
  718. static void bit_set(struct sparsebit *s, sparsebit_idx_t idx)
  719. {
  720. struct node *nodep;
  721. /* Skip bits that are already set */
  722. if (sparsebit_is_set(s, idx))
  723. return;
  724. /*
  725. * Get a node where the bit at idx is described by the mask.
  726. * The node_split will also create a node, if there isn't
  727. * already a node that describes the setting of bit.
  728. */
  729. nodep = node_split(s, idx & -MASK_BITS);
  730. /* Set the bit within the nodes mask */
  731. assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1);
  732. assert(!(nodep->mask & (1 << (idx - nodep->idx))));
  733. nodep->mask |= 1 << (idx - nodep->idx);
  734. s->num_set++;
  735. node_reduce(s, nodep);
  736. }
  737. /* Within the sparsebit array pointed to by s, clears the bit
  738. * at the index given by idx.
  739. */
  740. static void bit_clear(struct sparsebit *s, sparsebit_idx_t idx)
  741. {
  742. struct node *nodep;
  743. /* Skip bits that are already cleared */
  744. if (!sparsebit_is_set(s, idx))
  745. return;
  746. /* Is there a node that describes the setting of this bit? */
  747. nodep = node_find(s, idx);
  748. if (!nodep)
  749. return;
  750. /*
  751. * If a num_after bit, split the node, so that the bit is
  752. * part of a node mask.
  753. */
  754. if (idx >= nodep->idx + MASK_BITS)
  755. nodep = node_split(s, idx & -MASK_BITS);
  756. /*
  757. * After node_split above, bit at idx should be within the mask.
  758. * Clear that bit.
  759. */
  760. assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1);
  761. assert(nodep->mask & (1 << (idx - nodep->idx)));
  762. nodep->mask &= ~(1 << (idx - nodep->idx));
  763. assert(s->num_set > 0 || sparsebit_all_set(s));
  764. s->num_set--;
  765. node_reduce(s, nodep);
  766. }
  767. /* Recursively dumps to the FILE stream given by stream the contents
  768. * of the sub-tree of nodes pointed to by nodep. Each line of output
  769. * is prefixed by the number of spaces given by indent. On each
  770. * recursion, the indent amount is increased by 2. This causes nodes
  771. * at each level deeper into the binary search tree to be displayed
  772. * with a greater indent.
  773. */
  774. static void dump_nodes(FILE *stream, struct node *nodep,
  775. unsigned int indent)
  776. {
  777. char *node_type;
  778. /* Dump contents of node */
  779. if (!nodep->parent)
  780. node_type = "root";
  781. else if (nodep == nodep->parent->left)
  782. node_type = "left";
  783. else {
  784. assert(nodep == nodep->parent->right);
  785. node_type = "right";
  786. }
  787. fprintf(stream, "%*s---- %s nodep: %p\n", indent, "", node_type, nodep);
  788. fprintf(stream, "%*s parent: %p left: %p right: %p\n", indent, "",
  789. nodep->parent, nodep->left, nodep->right);
  790. fprintf(stream, "%*s idx: 0x%lx mask: 0x%x num_after: 0x%lx\n",
  791. indent, "", nodep->idx, nodep->mask, nodep->num_after);
  792. /* If present, dump contents of left child nodes */
  793. if (nodep->left)
  794. dump_nodes(stream, nodep->left, indent + 2);
  795. /* If present, dump contents of right child nodes */
  796. if (nodep->right)
  797. dump_nodes(stream, nodep->right, indent + 2);
  798. }
  799. static inline sparsebit_idx_t node_first_set(struct node *nodep, int start)
  800. {
  801. mask_t leading = (mask_t)1 << start;
  802. int n1 = __builtin_ctz(nodep->mask & -leading);
  803. return nodep->idx + n1;
  804. }
  805. static inline sparsebit_idx_t node_first_clear(struct node *nodep, int start)
  806. {
  807. mask_t leading = (mask_t)1 << start;
  808. int n1 = __builtin_ctz(~nodep->mask & -leading);
  809. return nodep->idx + n1;
  810. }
  811. /* Dumps to the FILE stream specified by stream, the implementation dependent
  812. * internal state of s. Each line of output is prefixed with the number
  813. * of spaces given by indent. The output is completely implementation
  814. * dependent and subject to change. Output from this function should only
  815. * be used for diagnostic purposes. For example, this function can be
  816. * used by test cases after they detect an unexpected condition, as a means
  817. * to capture diagnostic information.
  818. */
  819. static void sparsebit_dump_internal(FILE *stream, struct sparsebit *s,
  820. unsigned int indent)
  821. {
  822. /* Dump the contents of s */
  823. fprintf(stream, "%*sroot: %p\n", indent, "", s->root);
  824. fprintf(stream, "%*snum_set: 0x%lx\n", indent, "", s->num_set);
  825. if (s->root)
  826. dump_nodes(stream, s->root, indent);
  827. }
  828. /* Allocates and returns a new sparsebit array. The initial state
  829. * of the newly allocated sparsebit array has all bits cleared.
  830. */
  831. struct sparsebit *sparsebit_alloc(void)
  832. {
  833. struct sparsebit *s;
  834. /* Allocate top level structure. */
  835. s = calloc(1, sizeof(*s));
  836. if (!s) {
  837. perror("calloc");
  838. abort();
  839. }
  840. return s;
  841. }
  842. /* Frees the implementation dependent data for the sparsebit array
  843. * pointed to by s and poisons the pointer to that data.
  844. */
  845. void sparsebit_free(struct sparsebit **sbitp)
  846. {
  847. struct sparsebit *s = *sbitp;
  848. if (!s)
  849. return;
  850. sparsebit_clear_all(s);
  851. free(s);
  852. *sbitp = NULL;
  853. }
  854. /* Makes a copy of the sparsebit array given by s, to the sparsebit
  855. * array given by d. Note, d must have already been allocated via
  856. * sparsebit_alloc(). It can though already have bits set, which
  857. * if different from src will be cleared.
  858. */
  859. void sparsebit_copy(struct sparsebit *d, struct sparsebit *s)
  860. {
  861. /* First clear any bits already set in the destination */
  862. sparsebit_clear_all(d);
  863. if (s->root) {
  864. d->root = node_copy_subtree(s->root);
  865. d->num_set = s->num_set;
  866. }
  867. }
  868. /* Returns whether num consecutive bits starting at idx are all set. */
  869. bool sparsebit_is_set_num(struct sparsebit *s,
  870. sparsebit_idx_t idx, sparsebit_num_t num)
  871. {
  872. sparsebit_idx_t next_cleared;
  873. assert(num > 0);
  874. assert(idx + num - 1 >= idx);
  875. /* With num > 0, the first bit must be set. */
  876. if (!sparsebit_is_set(s, idx))
  877. return false;
  878. /* Find the next cleared bit */
  879. next_cleared = sparsebit_next_clear(s, idx);
  880. /*
  881. * If no cleared bits beyond idx, then there are at least num
  882. * set bits. idx + num doesn't wrap. Otherwise check if
  883. * there are enough set bits between idx and the next cleared bit.
  884. */
  885. return next_cleared == 0 || next_cleared - idx >= num;
  886. }
  887. /* Returns whether the bit at the index given by idx. */
  888. bool sparsebit_is_clear(struct sparsebit *s,
  889. sparsebit_idx_t idx)
  890. {
  891. return !sparsebit_is_set(s, idx);
  892. }
  893. /* Returns whether num consecutive bits starting at idx are all cleared. */
  894. bool sparsebit_is_clear_num(struct sparsebit *s,
  895. sparsebit_idx_t idx, sparsebit_num_t num)
  896. {
  897. sparsebit_idx_t next_set;
  898. assert(num > 0);
  899. assert(idx + num - 1 >= idx);
  900. /* With num > 0, the first bit must be cleared. */
  901. if (!sparsebit_is_clear(s, idx))
  902. return false;
  903. /* Find the next set bit */
  904. next_set = sparsebit_next_set(s, idx);
  905. /*
  906. * If no set bits beyond idx, then there are at least num
  907. * cleared bits. idx + num doesn't wrap. Otherwise check if
  908. * there are enough cleared bits between idx and the next set bit.
  909. */
  910. return next_set == 0 || next_set - idx >= num;
  911. }
  912. /* Returns the total number of bits set. Note: 0 is also returned for
  913. * the case of all bits set. This is because with all bits set, there
  914. * is 1 additional bit set beyond what can be represented in the return
  915. * value. Use sparsebit_any_set(), instead of sparsebit_num_set() > 0,
  916. * to determine if the sparsebit array has any bits set.
  917. */
  918. sparsebit_num_t sparsebit_num_set(struct sparsebit *s)
  919. {
  920. return s->num_set;
  921. }
  922. /* Returns whether any bit is set in the sparsebit array. */
  923. bool sparsebit_any_set(struct sparsebit *s)
  924. {
  925. /*
  926. * Nodes only describe set bits. If any nodes then there
  927. * is at least 1 bit set.
  928. */
  929. if (!s->root)
  930. return false;
  931. /*
  932. * Every node should have a non-zero mask. For now will
  933. * just assure that the root node has a non-zero mask,
  934. * which is a quick check that at least 1 bit is set.
  935. */
  936. assert(s->root->mask != 0);
  937. assert(s->num_set > 0 ||
  938. (s->root->num_after == ((sparsebit_num_t) 0) - MASK_BITS &&
  939. s->root->mask == ~(mask_t) 0));
  940. return true;
  941. }
  942. /* Returns whether all the bits in the sparsebit array are cleared. */
  943. bool sparsebit_all_clear(struct sparsebit *s)
  944. {
  945. return !sparsebit_any_set(s);
  946. }
  947. /* Returns whether all the bits in the sparsebit array are set. */
  948. bool sparsebit_any_clear(struct sparsebit *s)
  949. {
  950. return !sparsebit_all_set(s);
  951. }
  952. /* Returns the index of the first set bit. Abort if no bits are set.
  953. */
  954. sparsebit_idx_t sparsebit_first_set(struct sparsebit *s)
  955. {
  956. struct node *nodep;
  957. /* Validate at least 1 bit is set */
  958. assert(sparsebit_any_set(s));
  959. nodep = node_first(s);
  960. return node_first_set(nodep, 0);
  961. }
  962. /* Returns the index of the first cleared bit. Abort if
  963. * no bits are cleared.
  964. */
  965. sparsebit_idx_t sparsebit_first_clear(struct sparsebit *s)
  966. {
  967. struct node *nodep1, *nodep2;
  968. /* Validate at least 1 bit is cleared. */
  969. assert(sparsebit_any_clear(s));
  970. /* If no nodes or first node index > 0 then lowest cleared is 0 */
  971. nodep1 = node_first(s);
  972. if (!nodep1 || nodep1->idx > 0)
  973. return 0;
  974. /* Does the mask in the first node contain any cleared bits. */
  975. if (nodep1->mask != ~(mask_t) 0)
  976. return node_first_clear(nodep1, 0);
  977. /*
  978. * All mask bits set in first node. If there isn't a second node
  979. * then the first cleared bit is the first bit after the bits
  980. * described by the first node.
  981. */
  982. nodep2 = node_next(s, nodep1);
  983. if (!nodep2) {
  984. /*
  985. * No second node. First cleared bit is first bit beyond
  986. * bits described by first node.
  987. */
  988. assert(nodep1->mask == ~(mask_t) 0);
  989. assert(nodep1->idx + MASK_BITS + nodep1->num_after != (sparsebit_idx_t) 0);
  990. return nodep1->idx + MASK_BITS + nodep1->num_after;
  991. }
  992. /*
  993. * There is a second node.
  994. * If it is not adjacent to the first node, then there is a gap
  995. * of cleared bits between the nodes, and the first cleared bit
  996. * is the first bit within the gap.
  997. */
  998. if (nodep1->idx + MASK_BITS + nodep1->num_after != nodep2->idx)
  999. return nodep1->idx + MASK_BITS + nodep1->num_after;
  1000. /*
  1001. * Second node is adjacent to the first node.
  1002. * Because it is adjacent, its mask should be non-zero. If all
  1003. * its mask bits are set, then with it being adjacent, it should
  1004. * have had the mask bits moved into the num_after setting of the
  1005. * previous node.
  1006. */
  1007. return node_first_clear(nodep2, 0);
  1008. }
  1009. /* Returns index of next bit set within s after the index given by prev.
  1010. * Returns 0 if there are no bits after prev that are set.
  1011. */
  1012. sparsebit_idx_t sparsebit_next_set(struct sparsebit *s,
  1013. sparsebit_idx_t prev)
  1014. {
  1015. sparsebit_idx_t lowest_possible = prev + 1;
  1016. sparsebit_idx_t start;
  1017. struct node *nodep;
  1018. /* A bit after the highest index can't be set. */
  1019. if (lowest_possible == 0)
  1020. return 0;
  1021. /*
  1022. * Find the leftmost 'candidate' overlapping or to the right
  1023. * of lowest_possible.
  1024. */
  1025. struct node *candidate = NULL;
  1026. /* True iff lowest_possible is within candidate */
  1027. bool contains = false;
  1028. /*
  1029. * Find node that describes setting of bit at lowest_possible.
  1030. * If such a node doesn't exist, find the node with the lowest
  1031. * starting index that is > lowest_possible.
  1032. */
  1033. for (nodep = s->root; nodep;) {
  1034. if ((nodep->idx + MASK_BITS + nodep->num_after - 1)
  1035. >= lowest_possible) {
  1036. candidate = nodep;
  1037. if (candidate->idx <= lowest_possible) {
  1038. contains = true;
  1039. break;
  1040. }
  1041. nodep = nodep->left;
  1042. } else {
  1043. nodep = nodep->right;
  1044. }
  1045. }
  1046. if (!candidate)
  1047. return 0;
  1048. assert(candidate->mask != 0);
  1049. /* Does the candidate node describe the setting of lowest_possible? */
  1050. if (!contains) {
  1051. /*
  1052. * Candidate doesn't describe setting of bit at lowest_possible.
  1053. * Candidate points to the first node with a starting index
  1054. * > lowest_possible.
  1055. */
  1056. assert(candidate->idx > lowest_possible);
  1057. return node_first_set(candidate, 0);
  1058. }
  1059. /*
  1060. * Candidate describes setting of bit at lowest_possible.
  1061. * Note: although the node describes the setting of the bit
  1062. * at lowest_possible, its possible that its setting and the
  1063. * setting of all latter bits described by this node are 0.
  1064. * For now, just handle the cases where this node describes
  1065. * a bit at or after an index of lowest_possible that is set.
  1066. */
  1067. start = lowest_possible - candidate->idx;
  1068. if (start < MASK_BITS && candidate->mask >= (1 << start))
  1069. return node_first_set(candidate, start);
  1070. if (candidate->num_after) {
  1071. sparsebit_idx_t first_num_after_idx = candidate->idx + MASK_BITS;
  1072. return lowest_possible < first_num_after_idx
  1073. ? first_num_after_idx : lowest_possible;
  1074. }
  1075. /*
  1076. * Although candidate node describes setting of bit at
  1077. * the index of lowest_possible, all bits at that index and
  1078. * latter that are described by candidate are cleared. With
  1079. * this, the next bit is the first bit in the next node, if
  1080. * such a node exists. If a next node doesn't exist, then
  1081. * there is no next set bit.
  1082. */
  1083. candidate = node_next(s, candidate);
  1084. if (!candidate)
  1085. return 0;
  1086. return node_first_set(candidate, 0);
  1087. }
  1088. /* Returns index of next bit cleared within s after the index given by prev.
  1089. * Returns 0 if there are no bits after prev that are cleared.
  1090. */
  1091. sparsebit_idx_t sparsebit_next_clear(struct sparsebit *s,
  1092. sparsebit_idx_t prev)
  1093. {
  1094. sparsebit_idx_t lowest_possible = prev + 1;
  1095. sparsebit_idx_t idx;
  1096. struct node *nodep1, *nodep2;
  1097. /* A bit after the highest index can't be set. */
  1098. if (lowest_possible == 0)
  1099. return 0;
  1100. /*
  1101. * Does a node describing the setting of lowest_possible exist?
  1102. * If not, the bit at lowest_possible is cleared.
  1103. */
  1104. nodep1 = node_find(s, lowest_possible);
  1105. if (!nodep1)
  1106. return lowest_possible;
  1107. /* Does a mask bit in node 1 describe the next cleared bit. */
  1108. for (idx = lowest_possible - nodep1->idx; idx < MASK_BITS; idx++)
  1109. if (!(nodep1->mask & (1 << idx)))
  1110. return nodep1->idx + idx;
  1111. /*
  1112. * Next cleared bit is not described by node 1. If there
  1113. * isn't a next node, then next cleared bit is described
  1114. * by bit after the bits described by the first node.
  1115. */
  1116. nodep2 = node_next(s, nodep1);
  1117. if (!nodep2)
  1118. return nodep1->idx + MASK_BITS + nodep1->num_after;
  1119. /*
  1120. * There is a second node.
  1121. * If it is not adjacent to the first node, then there is a gap
  1122. * of cleared bits between the nodes, and the next cleared bit
  1123. * is the first bit within the gap.
  1124. */
  1125. if (nodep1->idx + MASK_BITS + nodep1->num_after != nodep2->idx)
  1126. return nodep1->idx + MASK_BITS + nodep1->num_after;
  1127. /*
  1128. * Second node is adjacent to the first node.
  1129. * Because it is adjacent, its mask should be non-zero. If all
  1130. * its mask bits are set, then with it being adjacent, it should
  1131. * have had the mask bits moved into the num_after setting of the
  1132. * previous node.
  1133. */
  1134. return node_first_clear(nodep2, 0);
  1135. }
  1136. /* Starting with the index 1 greater than the index given by start, finds
  1137. * and returns the index of the first sequence of num consecutively set
  1138. * bits. Returns a value of 0 of no such sequence exists.
  1139. */
  1140. sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *s,
  1141. sparsebit_idx_t start, sparsebit_num_t num)
  1142. {
  1143. sparsebit_idx_t idx;
  1144. assert(num >= 1);
  1145. for (idx = sparsebit_next_set(s, start);
  1146. idx != 0 && idx + num - 1 >= idx;
  1147. idx = sparsebit_next_set(s, idx)) {
  1148. assert(sparsebit_is_set(s, idx));
  1149. /*
  1150. * Does the sequence of bits starting at idx consist of
  1151. * num set bits?
  1152. */
  1153. if (sparsebit_is_set_num(s, idx, num))
  1154. return idx;
  1155. /*
  1156. * Sequence of set bits at idx isn't large enough.
  1157. * Skip this entire sequence of set bits.
  1158. */
  1159. idx = sparsebit_next_clear(s, idx);
  1160. if (idx == 0)
  1161. return 0;
  1162. }
  1163. return 0;
  1164. }
  1165. /* Starting with the index 1 greater than the index given by start, finds
  1166. * and returns the index of the first sequence of num consecutively cleared
  1167. * bits. Returns a value of 0 of no such sequence exists.
  1168. */
  1169. sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *s,
  1170. sparsebit_idx_t start, sparsebit_num_t num)
  1171. {
  1172. sparsebit_idx_t idx;
  1173. assert(num >= 1);
  1174. for (idx = sparsebit_next_clear(s, start);
  1175. idx != 0 && idx + num - 1 >= idx;
  1176. idx = sparsebit_next_clear(s, idx)) {
  1177. assert(sparsebit_is_clear(s, idx));
  1178. /*
  1179. * Does the sequence of bits starting at idx consist of
  1180. * num cleared bits?
  1181. */
  1182. if (sparsebit_is_clear_num(s, idx, num))
  1183. return idx;
  1184. /*
  1185. * Sequence of cleared bits at idx isn't large enough.
  1186. * Skip this entire sequence of cleared bits.
  1187. */
  1188. idx = sparsebit_next_set(s, idx);
  1189. if (idx == 0)
  1190. return 0;
  1191. }
  1192. return 0;
  1193. }
  1194. /* Sets the bits * in the inclusive range idx through idx + num - 1. */
  1195. void sparsebit_set_num(struct sparsebit *s,
  1196. sparsebit_idx_t start, sparsebit_num_t num)
  1197. {
  1198. struct node *nodep, *next;
  1199. unsigned int n1;
  1200. sparsebit_idx_t idx;
  1201. sparsebit_num_t n;
  1202. sparsebit_idx_t middle_start, middle_end;
  1203. assert(num > 0);
  1204. assert(start + num - 1 >= start);
  1205. /*
  1206. * Leading - bits before first mask boundary.
  1207. *
  1208. * TODO(lhuemill): With some effort it may be possible to
  1209. * replace the following loop with a sequential sequence
  1210. * of statements. High level sequence would be:
  1211. *
  1212. * 1. Use node_split() to force node that describes setting
  1213. * of idx to be within the mask portion of a node.
  1214. * 2. Form mask of bits to be set.
  1215. * 3. Determine number of mask bits already set in the node
  1216. * and store in a local variable named num_already_set.
  1217. * 4. Set the appropriate mask bits within the node.
  1218. * 5. Increment struct sparsebit_pvt num_set member
  1219. * by the number of bits that were actually set.
  1220. * Exclude from the counts bits that were already set.
  1221. * 6. Before returning to the caller, use node_reduce() to
  1222. * handle the multiple corner cases that this method
  1223. * introduces.
  1224. */
  1225. for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--)
  1226. bit_set(s, idx);
  1227. /* Middle - bits spanning one or more entire mask */
  1228. middle_start = idx;
  1229. middle_end = middle_start + (n & -MASK_BITS) - 1;
  1230. if (n >= MASK_BITS) {
  1231. nodep = node_split(s, middle_start);
  1232. /*
  1233. * As needed, split just after end of middle bits.
  1234. * No split needed if end of middle bits is at highest
  1235. * supported bit index.
  1236. */
  1237. if (middle_end + 1 > middle_end)
  1238. (void) node_split(s, middle_end + 1);
  1239. /* Delete nodes that only describe bits within the middle. */
  1240. for (next = node_next(s, nodep);
  1241. next && (next->idx < middle_end);
  1242. next = node_next(s, nodep)) {
  1243. assert(next->idx + MASK_BITS + next->num_after - 1 <= middle_end);
  1244. node_rm(s, next);
  1245. next = NULL;
  1246. }
  1247. /* As needed set each of the mask bits */
  1248. for (n1 = 0; n1 < MASK_BITS; n1++) {
  1249. if (!(nodep->mask & (1 << n1))) {
  1250. nodep->mask |= 1 << n1;
  1251. s->num_set++;
  1252. }
  1253. }
  1254. s->num_set -= nodep->num_after;
  1255. nodep->num_after = middle_end - middle_start + 1 - MASK_BITS;
  1256. s->num_set += nodep->num_after;
  1257. node_reduce(s, nodep);
  1258. }
  1259. idx = middle_end + 1;
  1260. n -= middle_end - middle_start + 1;
  1261. /* Trailing - bits at and beyond last mask boundary */
  1262. assert(n < MASK_BITS);
  1263. for (; n > 0; idx++, n--)
  1264. bit_set(s, idx);
  1265. }
  1266. /* Clears the bits * in the inclusive range idx through idx + num - 1. */
  1267. void sparsebit_clear_num(struct sparsebit *s,
  1268. sparsebit_idx_t start, sparsebit_num_t num)
  1269. {
  1270. struct node *nodep, *next;
  1271. unsigned int n1;
  1272. sparsebit_idx_t idx;
  1273. sparsebit_num_t n;
  1274. sparsebit_idx_t middle_start, middle_end;
  1275. assert(num > 0);
  1276. assert(start + num - 1 >= start);
  1277. /* Leading - bits before first mask boundary */
  1278. for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--)
  1279. bit_clear(s, idx);
  1280. /* Middle - bits spanning one or more entire mask */
  1281. middle_start = idx;
  1282. middle_end = middle_start + (n & -MASK_BITS) - 1;
  1283. if (n >= MASK_BITS) {
  1284. nodep = node_split(s, middle_start);
  1285. /*
  1286. * As needed, split just after end of middle bits.
  1287. * No split needed if end of middle bits is at highest
  1288. * supported bit index.
  1289. */
  1290. if (middle_end + 1 > middle_end)
  1291. (void) node_split(s, middle_end + 1);
  1292. /* Delete nodes that only describe bits within the middle. */
  1293. for (next = node_next(s, nodep);
  1294. next && (next->idx < middle_end);
  1295. next = node_next(s, nodep)) {
  1296. assert(next->idx + MASK_BITS + next->num_after - 1 <= middle_end);
  1297. node_rm(s, next);
  1298. next = NULL;
  1299. }
  1300. /* As needed clear each of the mask bits */
  1301. for (n1 = 0; n1 < MASK_BITS; n1++) {
  1302. if (nodep->mask & (1 << n1)) {
  1303. nodep->mask &= ~(1 << n1);
  1304. s->num_set--;
  1305. }
  1306. }
  1307. /* Clear any bits described by num_after */
  1308. s->num_set -= nodep->num_after;
  1309. nodep->num_after = 0;
  1310. /*
  1311. * Delete the node that describes the beginning of
  1312. * the middle bits and perform any allowed reductions
  1313. * with the nodes prev or next of nodep.
  1314. */
  1315. node_reduce(s, nodep);
  1316. nodep = NULL;
  1317. }
  1318. idx = middle_end + 1;
  1319. n -= middle_end - middle_start + 1;
  1320. /* Trailing - bits at and beyond last mask boundary */
  1321. assert(n < MASK_BITS);
  1322. for (; n > 0; idx++, n--)
  1323. bit_clear(s, idx);
  1324. }
  1325. /* Sets the bit at the index given by idx. */
  1326. void sparsebit_set(struct sparsebit *s, sparsebit_idx_t idx)
  1327. {
  1328. sparsebit_set_num(s, idx, 1);
  1329. }
  1330. /* Clears the bit at the index given by idx. */
  1331. void sparsebit_clear(struct sparsebit *s, sparsebit_idx_t idx)
  1332. {
  1333. sparsebit_clear_num(s, idx, 1);
  1334. }
  1335. /* Sets the bits in the entire addressable range of the sparsebit array. */
  1336. void sparsebit_set_all(struct sparsebit *s)
  1337. {
  1338. sparsebit_set(s, 0);
  1339. sparsebit_set_num(s, 1, ~(sparsebit_idx_t) 0);
  1340. assert(sparsebit_all_set(s));
  1341. }
  1342. /* Clears the bits in the entire addressable range of the sparsebit array. */
  1343. void sparsebit_clear_all(struct sparsebit *s)
  1344. {
  1345. sparsebit_clear(s, 0);
  1346. sparsebit_clear_num(s, 1, ~(sparsebit_idx_t) 0);
  1347. assert(!sparsebit_any_set(s));
  1348. }
  1349. static size_t display_range(FILE *stream, sparsebit_idx_t low,
  1350. sparsebit_idx_t high, bool prepend_comma_space)
  1351. {
  1352. char *fmt_str;
  1353. size_t sz;
  1354. /* Determine the printf format string */
  1355. if (low == high)
  1356. fmt_str = prepend_comma_space ? ", 0x%lx" : "0x%lx";
  1357. else
  1358. fmt_str = prepend_comma_space ? ", 0x%lx:0x%lx" : "0x%lx:0x%lx";
  1359. /*
  1360. * When stream is NULL, just determine the size of what would
  1361. * have been printed, else print the range.
  1362. */
  1363. if (!stream)
  1364. sz = snprintf(NULL, 0, fmt_str, low, high);
  1365. else
  1366. sz = fprintf(stream, fmt_str, low, high);
  1367. return sz;
  1368. }
  1369. /* Dumps to the FILE stream given by stream, the bit settings
  1370. * of s. Each line of output is prefixed with the number of
  1371. * spaces given by indent. The length of each line is implementation
  1372. * dependent and does not depend on the indent amount. The following
  1373. * is an example output of a sparsebit array that has bits:
  1374. *
  1375. * 0x5, 0x8, 0xa:0xe, 0x12
  1376. *
  1377. * This corresponds to a sparsebit whose bits 5, 8, 10, 11, 12, 13, 14, 18
  1378. * are set. Note that a ':', instead of a '-' is used to specify a range of
  1379. * contiguous bits. This is done because '-' is used to specify command-line
  1380. * options, and sometimes ranges are specified as command-line arguments.
  1381. */
  1382. void sparsebit_dump(FILE *stream, struct sparsebit *s,
  1383. unsigned int indent)
  1384. {
  1385. size_t current_line_len = 0;
  1386. size_t sz;
  1387. struct node *nodep;
  1388. if (!sparsebit_any_set(s))
  1389. return;
  1390. /* Display initial indent */
  1391. fprintf(stream, "%*s", indent, "");
  1392. /* For each node */
  1393. for (nodep = node_first(s); nodep; nodep = node_next(s, nodep)) {
  1394. unsigned int n1;
  1395. sparsebit_idx_t low, high;
  1396. /* For each group of bits in the mask */
  1397. for (n1 = 0; n1 < MASK_BITS; n1++) {
  1398. if (nodep->mask & (1 << n1)) {
  1399. low = high = nodep->idx + n1;
  1400. for (; n1 < MASK_BITS; n1++) {
  1401. if (nodep->mask & (1 << n1))
  1402. high = nodep->idx + n1;
  1403. else
  1404. break;
  1405. }
  1406. if ((n1 == MASK_BITS) && nodep->num_after)
  1407. high += nodep->num_after;
  1408. /*
  1409. * How much room will it take to display
  1410. * this range.
  1411. */
  1412. sz = display_range(NULL, low, high,
  1413. current_line_len != 0);
  1414. /*
  1415. * If there is not enough room, display
  1416. * a newline plus the indent of the next
  1417. * line.
  1418. */
  1419. if (current_line_len + sz > DUMP_LINE_MAX) {
  1420. fputs("\n", stream);
  1421. fprintf(stream, "%*s", indent, "");
  1422. current_line_len = 0;
  1423. }
  1424. /* Display the range */
  1425. sz = display_range(stream, low, high,
  1426. current_line_len != 0);
  1427. current_line_len += sz;
  1428. }
  1429. }
  1430. /*
  1431. * If num_after and most significant-bit of mask is not
  1432. * set, then still need to display a range for the bits
  1433. * described by num_after.
  1434. */
  1435. if (!(nodep->mask & (1 << (MASK_BITS - 1))) && nodep->num_after) {
  1436. low = nodep->idx + MASK_BITS;
  1437. high = nodep->idx + MASK_BITS + nodep->num_after - 1;
  1438. /*
  1439. * How much room will it take to display
  1440. * this range.
  1441. */
  1442. sz = display_range(NULL, low, high,
  1443. current_line_len != 0);
  1444. /*
  1445. * If there is not enough room, display
  1446. * a newline plus the indent of the next
  1447. * line.
  1448. */
  1449. if (current_line_len + sz > DUMP_LINE_MAX) {
  1450. fputs("\n", stream);
  1451. fprintf(stream, "%*s", indent, "");
  1452. current_line_len = 0;
  1453. }
  1454. /* Display the range */
  1455. sz = display_range(stream, low, high,
  1456. current_line_len != 0);
  1457. current_line_len += sz;
  1458. }
  1459. }
  1460. fputs("\n", stream);
  1461. }
  1462. /* Validates the internal state of the sparsebit array given by
  1463. * s. On error, diagnostic information is printed to stderr and
  1464. * abort is called.
  1465. */
  1466. void sparsebit_validate_internal(struct sparsebit *s)
  1467. {
  1468. bool error_detected = false;
  1469. struct node *nodep, *prev = NULL;
  1470. sparsebit_num_t total_bits_set = 0;
  1471. unsigned int n1;
  1472. /* For each node */
  1473. for (nodep = node_first(s); nodep;
  1474. prev = nodep, nodep = node_next(s, nodep)) {
  1475. /*
  1476. * Increase total bits set by the number of bits set
  1477. * in this node.
  1478. */
  1479. for (n1 = 0; n1 < MASK_BITS; n1++)
  1480. if (nodep->mask & (1 << n1))
  1481. total_bits_set++;
  1482. total_bits_set += nodep->num_after;
  1483. /*
  1484. * Arbitrary choice as to whether a mask of 0 is allowed
  1485. * or not. For diagnostic purposes it is beneficial to
  1486. * have only one valid means to represent a set of bits.
  1487. * To support this an arbitrary choice has been made
  1488. * to not allow a mask of zero.
  1489. */
  1490. if (nodep->mask == 0) {
  1491. fprintf(stderr, "Node mask of zero, "
  1492. "nodep: %p nodep->mask: 0x%x",
  1493. nodep, nodep->mask);
  1494. error_detected = true;
  1495. break;
  1496. }
  1497. /*
  1498. * Validate num_after is not greater than the max index
  1499. * - the number of mask bits. The num_after member
  1500. * uses 0-based indexing and thus has no value that
  1501. * represents all bits set. This limitation is handled
  1502. * by requiring a non-zero mask. With a non-zero mask,
  1503. * MASK_BITS worth of bits are described by the mask,
  1504. * which makes the largest needed num_after equal to:
  1505. *
  1506. * (~(sparsebit_num_t) 0) - MASK_BITS + 1
  1507. */
  1508. if (nodep->num_after
  1509. > (~(sparsebit_num_t) 0) - MASK_BITS + 1) {
  1510. fprintf(stderr, "num_after too large, "
  1511. "nodep: %p nodep->num_after: 0x%lx",
  1512. nodep, nodep->num_after);
  1513. error_detected = true;
  1514. break;
  1515. }
  1516. /* Validate node index is divisible by the mask size */
  1517. if (nodep->idx % MASK_BITS) {
  1518. fprintf(stderr, "Node index not divisible by "
  1519. "mask size,\n"
  1520. " nodep: %p nodep->idx: 0x%lx "
  1521. "MASK_BITS: %lu\n",
  1522. nodep, nodep->idx, MASK_BITS);
  1523. error_detected = true;
  1524. break;
  1525. }
  1526. /*
  1527. * Validate bits described by node don't wrap beyond the
  1528. * highest supported index.
  1529. */
  1530. if ((nodep->idx + MASK_BITS + nodep->num_after - 1) < nodep->idx) {
  1531. fprintf(stderr, "Bits described by node wrap "
  1532. "beyond highest supported index,\n"
  1533. " nodep: %p nodep->idx: 0x%lx\n"
  1534. " MASK_BITS: %lu nodep->num_after: 0x%lx",
  1535. nodep, nodep->idx, MASK_BITS, nodep->num_after);
  1536. error_detected = true;
  1537. break;
  1538. }
  1539. /* Check parent pointers. */
  1540. if (nodep->left) {
  1541. if (nodep->left->parent != nodep) {
  1542. fprintf(stderr, "Left child parent pointer "
  1543. "doesn't point to this node,\n"
  1544. " nodep: %p nodep->left: %p "
  1545. "nodep->left->parent: %p",
  1546. nodep, nodep->left,
  1547. nodep->left->parent);
  1548. error_detected = true;
  1549. break;
  1550. }
  1551. }
  1552. if (nodep->right) {
  1553. if (nodep->right->parent != nodep) {
  1554. fprintf(stderr, "Right child parent pointer "
  1555. "doesn't point to this node,\n"
  1556. " nodep: %p nodep->right: %p "
  1557. "nodep->right->parent: %p",
  1558. nodep, nodep->right,
  1559. nodep->right->parent);
  1560. error_detected = true;
  1561. break;
  1562. }
  1563. }
  1564. if (!nodep->parent) {
  1565. if (s->root != nodep) {
  1566. fprintf(stderr, "Unexpected root node, "
  1567. "s->root: %p nodep: %p",
  1568. s->root, nodep);
  1569. error_detected = true;
  1570. break;
  1571. }
  1572. }
  1573. if (prev) {
  1574. /*
  1575. * Is index of previous node before index of
  1576. * current node?
  1577. */
  1578. if (prev->idx >= nodep->idx) {
  1579. fprintf(stderr, "Previous node index "
  1580. ">= current node index,\n"
  1581. " prev: %p prev->idx: 0x%lx\n"
  1582. " nodep: %p nodep->idx: 0x%lx",
  1583. prev, prev->idx, nodep, nodep->idx);
  1584. error_detected = true;
  1585. break;
  1586. }
  1587. /*
  1588. * Nodes occur in asscending order, based on each
  1589. * nodes starting index.
  1590. */
  1591. if ((prev->idx + MASK_BITS + prev->num_after - 1)
  1592. >= nodep->idx) {
  1593. fprintf(stderr, "Previous node bit range "
  1594. "overlap with current node bit range,\n"
  1595. " prev: %p prev->idx: 0x%lx "
  1596. "prev->num_after: 0x%lx\n"
  1597. " nodep: %p nodep->idx: 0x%lx "
  1598. "nodep->num_after: 0x%lx\n"
  1599. " MASK_BITS: %lu",
  1600. prev, prev->idx, prev->num_after,
  1601. nodep, nodep->idx, nodep->num_after,
  1602. MASK_BITS);
  1603. error_detected = true;
  1604. break;
  1605. }
  1606. /*
  1607. * When the node has all mask bits set, it shouldn't
  1608. * be adjacent to the last bit described by the
  1609. * previous node.
  1610. */
  1611. if (nodep->mask == ~(mask_t) 0 &&
  1612. prev->idx + MASK_BITS + prev->num_after == nodep->idx) {
  1613. fprintf(stderr, "Current node has mask with "
  1614. "all bits set and is adjacent to the "
  1615. "previous node,\n"
  1616. " prev: %p prev->idx: 0x%lx "
  1617. "prev->num_after: 0x%lx\n"
  1618. " nodep: %p nodep->idx: 0x%lx "
  1619. "nodep->num_after: 0x%lx\n"
  1620. " MASK_BITS: %lu",
  1621. prev, prev->idx, prev->num_after,
  1622. nodep, nodep->idx, nodep->num_after,
  1623. MASK_BITS);
  1624. error_detected = true;
  1625. break;
  1626. }
  1627. }
  1628. }
  1629. if (!error_detected) {
  1630. /*
  1631. * Is sum of bits set in each node equal to the count
  1632. * of total bits set.
  1633. */
  1634. if (s->num_set != total_bits_set) {
  1635. fprintf(stderr, "Number of bits set mismatch,\n"
  1636. " s->num_set: 0x%lx total_bits_set: 0x%lx",
  1637. s->num_set, total_bits_set);
  1638. error_detected = true;
  1639. }
  1640. }
  1641. if (error_detected) {
  1642. fputs(" dump_internal:\n", stderr);
  1643. sparsebit_dump_internal(stderr, s, 4);
  1644. abort();
  1645. }
  1646. }
  1647. #ifdef FUZZ
  1648. /* A simple but effective fuzzing driver. Look for bugs with the help
  1649. * of some invariants and of a trivial representation of sparsebit.
  1650. * Just use 512 bytes of /dev/zero and /dev/urandom as inputs, and let
  1651. * afl-fuzz do the magic. :)
  1652. */
  1653. #include <stdlib.h>
  1654. struct range {
  1655. sparsebit_idx_t first, last;
  1656. bool set;
  1657. };
  1658. struct sparsebit *s;
  1659. struct range ranges[1000];
  1660. int num_ranges;
  1661. static bool get_value(sparsebit_idx_t idx)
  1662. {
  1663. int i;
  1664. for (i = num_ranges; --i >= 0; )
  1665. if (ranges[i].first <= idx && idx <= ranges[i].last)
  1666. return ranges[i].set;
  1667. return false;
  1668. }
  1669. static void operate(int code, sparsebit_idx_t first, sparsebit_idx_t last)
  1670. {
  1671. sparsebit_num_t num;
  1672. sparsebit_idx_t next;
  1673. if (first < last) {
  1674. num = last - first + 1;
  1675. } else {
  1676. num = first - last + 1;
  1677. first = last;
  1678. last = first + num - 1;
  1679. }
  1680. switch (code) {
  1681. case 0:
  1682. sparsebit_set(s, first);
  1683. assert(sparsebit_is_set(s, first));
  1684. assert(!sparsebit_is_clear(s, first));
  1685. assert(sparsebit_any_set(s));
  1686. assert(!sparsebit_all_clear(s));
  1687. if (get_value(first))
  1688. return;
  1689. if (num_ranges == 1000)
  1690. exit(0);
  1691. ranges[num_ranges++] = (struct range)
  1692. { .first = first, .last = first, .set = true };
  1693. break;
  1694. case 1:
  1695. sparsebit_clear(s, first);
  1696. assert(!sparsebit_is_set(s, first));
  1697. assert(sparsebit_is_clear(s, first));
  1698. assert(sparsebit_any_clear(s));
  1699. assert(!sparsebit_all_set(s));
  1700. if (!get_value(first))
  1701. return;
  1702. if (num_ranges == 1000)
  1703. exit(0);
  1704. ranges[num_ranges++] = (struct range)
  1705. { .first = first, .last = first, .set = false };
  1706. break;
  1707. case 2:
  1708. assert(sparsebit_is_set(s, first) == get_value(first));
  1709. assert(sparsebit_is_clear(s, first) == !get_value(first));
  1710. break;
  1711. case 3:
  1712. if (sparsebit_any_set(s))
  1713. assert(get_value(sparsebit_first_set(s)));
  1714. if (sparsebit_any_clear(s))
  1715. assert(!get_value(sparsebit_first_clear(s)));
  1716. sparsebit_set_all(s);
  1717. assert(!sparsebit_any_clear(s));
  1718. assert(sparsebit_all_set(s));
  1719. num_ranges = 0;
  1720. ranges[num_ranges++] = (struct range)
  1721. { .first = 0, .last = ~(sparsebit_idx_t)0, .set = true };
  1722. break;
  1723. case 4:
  1724. if (sparsebit_any_set(s))
  1725. assert(get_value(sparsebit_first_set(s)));
  1726. if (sparsebit_any_clear(s))
  1727. assert(!get_value(sparsebit_first_clear(s)));
  1728. sparsebit_clear_all(s);
  1729. assert(!sparsebit_any_set(s));
  1730. assert(sparsebit_all_clear(s));
  1731. num_ranges = 0;
  1732. break;
  1733. case 5:
  1734. next = sparsebit_next_set(s, first);
  1735. assert(next == 0 || next > first);
  1736. assert(next == 0 || get_value(next));
  1737. break;
  1738. case 6:
  1739. next = sparsebit_next_clear(s, first);
  1740. assert(next == 0 || next > first);
  1741. assert(next == 0 || !get_value(next));
  1742. break;
  1743. case 7:
  1744. next = sparsebit_next_clear(s, first);
  1745. if (sparsebit_is_set_num(s, first, num)) {
  1746. assert(next == 0 || next > last);
  1747. if (first)
  1748. next = sparsebit_next_set(s, first - 1);
  1749. else if (sparsebit_any_set(s))
  1750. next = sparsebit_first_set(s);
  1751. else
  1752. return;
  1753. assert(next == first);
  1754. } else {
  1755. assert(sparsebit_is_clear(s, first) || next <= last);
  1756. }
  1757. break;
  1758. case 8:
  1759. next = sparsebit_next_set(s, first);
  1760. if (sparsebit_is_clear_num(s, first, num)) {
  1761. assert(next == 0 || next > last);
  1762. if (first)
  1763. next = sparsebit_next_clear(s, first - 1);
  1764. else if (sparsebit_any_clear(s))
  1765. next = sparsebit_first_clear(s);
  1766. else
  1767. return;
  1768. assert(next == first);
  1769. } else {
  1770. assert(sparsebit_is_set(s, first) || next <= last);
  1771. }
  1772. break;
  1773. case 9:
  1774. sparsebit_set_num(s, first, num);
  1775. assert(sparsebit_is_set_num(s, first, num));
  1776. assert(!sparsebit_is_clear_num(s, first, num));
  1777. assert(sparsebit_any_set(s));
  1778. assert(!sparsebit_all_clear(s));
  1779. if (num_ranges == 1000)
  1780. exit(0);
  1781. ranges[num_ranges++] = (struct range)
  1782. { .first = first, .last = last, .set = true };
  1783. break;
  1784. case 10:
  1785. sparsebit_clear_num(s, first, num);
  1786. assert(!sparsebit_is_set_num(s, first, num));
  1787. assert(sparsebit_is_clear_num(s, first, num));
  1788. assert(sparsebit_any_clear(s));
  1789. assert(!sparsebit_all_set(s));
  1790. if (num_ranges == 1000)
  1791. exit(0);
  1792. ranges[num_ranges++] = (struct range)
  1793. { .first = first, .last = last, .set = false };
  1794. break;
  1795. case 11:
  1796. sparsebit_validate_internal(s);
  1797. break;
  1798. default:
  1799. break;
  1800. }
  1801. }
  1802. unsigned char get8(void)
  1803. {
  1804. int ch;
  1805. ch = getchar();
  1806. if (ch == EOF)
  1807. exit(0);
  1808. return ch;
  1809. }
  1810. uint64_t get64(void)
  1811. {
  1812. uint64_t x;
  1813. x = get8();
  1814. x = (x << 8) | get8();
  1815. x = (x << 8) | get8();
  1816. x = (x << 8) | get8();
  1817. x = (x << 8) | get8();
  1818. x = (x << 8) | get8();
  1819. x = (x << 8) | get8();
  1820. return (x << 8) | get8();
  1821. }
  1822. int main(void)
  1823. {
  1824. s = sparsebit_alloc();
  1825. for (;;) {
  1826. uint8_t op = get8() & 0xf;
  1827. uint64_t first = get64();
  1828. uint64_t last = get64();
  1829. operate(op, first, last);
  1830. }
  1831. }
  1832. #endif