ebitmap.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Implementation of the extensible bitmap type.
  4. *
  5. * Author : Stephen Smalley, <[email protected]>
  6. */
  7. /*
  8. * Updated: Hewlett-Packard <[email protected]>
  9. *
  10. * Added support to import/export the NetLabel category bitmap
  11. *
  12. * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
  13. */
  14. /*
  15. * Updated: KaiGai Kohei <[email protected]>
  16. * Applied standard bit operations to improve bitmap scanning.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/errno.h>
  21. #include <linux/jhash.h>
  22. #include <net/netlabel.h>
  23. #include "ebitmap.h"
  24. #include "policydb.h"
  25. #define BITS_PER_U64 (sizeof(u64) * 8)
  26. static struct kmem_cache *ebitmap_node_cachep __ro_after_init;
  27. int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2)
  28. {
  29. const struct ebitmap_node *n1, *n2;
  30. if (e1->highbit != e2->highbit)
  31. return 0;
  32. n1 = e1->node;
  33. n2 = e2->node;
  34. while (n1 && n2 &&
  35. (n1->startbit == n2->startbit) &&
  36. !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) {
  37. n1 = n1->next;
  38. n2 = n2->next;
  39. }
  40. if (n1 || n2)
  41. return 0;
  42. return 1;
  43. }
  44. int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src)
  45. {
  46. struct ebitmap_node *new, *prev;
  47. const struct ebitmap_node *n;
  48. ebitmap_init(dst);
  49. n = src->node;
  50. prev = NULL;
  51. while (n) {
  52. new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
  53. if (!new) {
  54. ebitmap_destroy(dst);
  55. return -ENOMEM;
  56. }
  57. new->startbit = n->startbit;
  58. memcpy(new->maps, n->maps, EBITMAP_SIZE / 8);
  59. new->next = NULL;
  60. if (prev)
  61. prev->next = new;
  62. else
  63. dst->node = new;
  64. prev = new;
  65. n = n->next;
  66. }
  67. dst->highbit = src->highbit;
  68. return 0;
  69. }
  70. int ebitmap_and(struct ebitmap *dst, const struct ebitmap *e1, const struct ebitmap *e2)
  71. {
  72. struct ebitmap_node *n;
  73. int bit, rc;
  74. ebitmap_init(dst);
  75. ebitmap_for_each_positive_bit(e1, n, bit) {
  76. if (ebitmap_get_bit(e2, bit)) {
  77. rc = ebitmap_set_bit(dst, bit, 1);
  78. if (rc < 0)
  79. return rc;
  80. }
  81. }
  82. return 0;
  83. }
  84. #ifdef CONFIG_NETLABEL
  85. /**
  86. * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
  87. * @ebmap: the ebitmap to export
  88. * @catmap: the NetLabel category bitmap
  89. *
  90. * Description:
  91. * Export a SELinux extensibile bitmap into a NetLabel category bitmap.
  92. * Returns zero on success, negative values on error.
  93. *
  94. */
  95. int ebitmap_netlbl_export(struct ebitmap *ebmap,
  96. struct netlbl_lsm_catmap **catmap)
  97. {
  98. struct ebitmap_node *e_iter = ebmap->node;
  99. unsigned long e_map;
  100. u32 offset;
  101. unsigned int iter;
  102. int rc;
  103. if (e_iter == NULL) {
  104. *catmap = NULL;
  105. return 0;
  106. }
  107. if (*catmap != NULL)
  108. netlbl_catmap_free(*catmap);
  109. *catmap = NULL;
  110. while (e_iter) {
  111. offset = e_iter->startbit;
  112. for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) {
  113. e_map = e_iter->maps[iter];
  114. if (e_map != 0) {
  115. rc = netlbl_catmap_setlong(catmap,
  116. offset,
  117. e_map,
  118. GFP_ATOMIC);
  119. if (rc != 0)
  120. goto netlbl_export_failure;
  121. }
  122. offset += EBITMAP_UNIT_SIZE;
  123. }
  124. e_iter = e_iter->next;
  125. }
  126. return 0;
  127. netlbl_export_failure:
  128. netlbl_catmap_free(*catmap);
  129. return -ENOMEM;
  130. }
  131. /**
  132. * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
  133. * @ebmap: the ebitmap to import
  134. * @catmap: the NetLabel category bitmap
  135. *
  136. * Description:
  137. * Import a NetLabel category bitmap into a SELinux extensibile bitmap.
  138. * Returns zero on success, negative values on error.
  139. *
  140. */
  141. int ebitmap_netlbl_import(struct ebitmap *ebmap,
  142. struct netlbl_lsm_catmap *catmap)
  143. {
  144. int rc;
  145. struct ebitmap_node *e_iter = NULL;
  146. struct ebitmap_node *e_prev = NULL;
  147. u32 offset = 0, idx;
  148. unsigned long bitmap;
  149. for (;;) {
  150. rc = netlbl_catmap_getlong(catmap, &offset, &bitmap);
  151. if (rc < 0)
  152. goto netlbl_import_failure;
  153. if (offset == (u32)-1)
  154. return 0;
  155. /* don't waste ebitmap space if the netlabel bitmap is empty */
  156. if (bitmap == 0) {
  157. offset += EBITMAP_UNIT_SIZE;
  158. continue;
  159. }
  160. if (e_iter == NULL ||
  161. offset >= e_iter->startbit + EBITMAP_SIZE) {
  162. e_prev = e_iter;
  163. e_iter = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
  164. if (e_iter == NULL)
  165. goto netlbl_import_failure;
  166. e_iter->startbit = offset - (offset % EBITMAP_SIZE);
  167. if (e_prev == NULL)
  168. ebmap->node = e_iter;
  169. else
  170. e_prev->next = e_iter;
  171. ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
  172. }
  173. /* offset will always be aligned to an unsigned long */
  174. idx = EBITMAP_NODE_INDEX(e_iter, offset);
  175. e_iter->maps[idx] = bitmap;
  176. /* next */
  177. offset += EBITMAP_UNIT_SIZE;
  178. }
  179. /* NOTE: we should never reach this return */
  180. return 0;
  181. netlbl_import_failure:
  182. ebitmap_destroy(ebmap);
  183. return -ENOMEM;
  184. }
  185. #endif /* CONFIG_NETLABEL */
  186. /*
  187. * Check to see if all the bits set in e2 are also set in e1. Optionally,
  188. * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
  189. * last_e2bit.
  190. */
  191. int ebitmap_contains(const struct ebitmap *e1, const struct ebitmap *e2, u32 last_e2bit)
  192. {
  193. const struct ebitmap_node *n1, *n2;
  194. int i;
  195. if (e1->highbit < e2->highbit)
  196. return 0;
  197. n1 = e1->node;
  198. n2 = e2->node;
  199. while (n1 && n2 && (n1->startbit <= n2->startbit)) {
  200. if (n1->startbit < n2->startbit) {
  201. n1 = n1->next;
  202. continue;
  203. }
  204. for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
  205. i--; /* Skip trailing NULL map entries */
  206. if (last_e2bit && (i >= 0)) {
  207. u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
  208. __fls(n2->maps[i]);
  209. if (lastsetbit > last_e2bit)
  210. return 0;
  211. }
  212. while (i >= 0) {
  213. if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
  214. return 0;
  215. i--;
  216. }
  217. n1 = n1->next;
  218. n2 = n2->next;
  219. }
  220. if (n2)
  221. return 0;
  222. return 1;
  223. }
  224. int ebitmap_get_bit(const struct ebitmap *e, unsigned long bit)
  225. {
  226. const struct ebitmap_node *n;
  227. if (e->highbit < bit)
  228. return 0;
  229. n = e->node;
  230. while (n && (n->startbit <= bit)) {
  231. if ((n->startbit + EBITMAP_SIZE) > bit)
  232. return ebitmap_node_get_bit(n, bit);
  233. n = n->next;
  234. }
  235. return 0;
  236. }
  237. int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
  238. {
  239. struct ebitmap_node *n, *prev, *new;
  240. prev = NULL;
  241. n = e->node;
  242. while (n && n->startbit <= bit) {
  243. if ((n->startbit + EBITMAP_SIZE) > bit) {
  244. if (value) {
  245. ebitmap_node_set_bit(n, bit);
  246. } else {
  247. unsigned int s;
  248. ebitmap_node_clr_bit(n, bit);
  249. s = find_first_bit(n->maps, EBITMAP_SIZE);
  250. if (s < EBITMAP_SIZE)
  251. return 0;
  252. /* drop this node from the bitmap */
  253. if (!n->next) {
  254. /*
  255. * this was the highest map
  256. * within the bitmap
  257. */
  258. if (prev)
  259. e->highbit = prev->startbit
  260. + EBITMAP_SIZE;
  261. else
  262. e->highbit = 0;
  263. }
  264. if (prev)
  265. prev->next = n->next;
  266. else
  267. e->node = n->next;
  268. kmem_cache_free(ebitmap_node_cachep, n);
  269. }
  270. return 0;
  271. }
  272. prev = n;
  273. n = n->next;
  274. }
  275. if (!value)
  276. return 0;
  277. new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC);
  278. if (!new)
  279. return -ENOMEM;
  280. new->startbit = bit - (bit % EBITMAP_SIZE);
  281. ebitmap_node_set_bit(new, bit);
  282. if (!n)
  283. /* this node will be the highest map within the bitmap */
  284. e->highbit = new->startbit + EBITMAP_SIZE;
  285. if (prev) {
  286. new->next = prev->next;
  287. prev->next = new;
  288. } else {
  289. new->next = e->node;
  290. e->node = new;
  291. }
  292. return 0;
  293. }
  294. void ebitmap_destroy(struct ebitmap *e)
  295. {
  296. struct ebitmap_node *n, *temp;
  297. if (!e)
  298. return;
  299. n = e->node;
  300. while (n) {
  301. temp = n;
  302. n = n->next;
  303. kmem_cache_free(ebitmap_node_cachep, temp);
  304. }
  305. e->highbit = 0;
  306. e->node = NULL;
  307. }
  308. int ebitmap_read(struct ebitmap *e, void *fp)
  309. {
  310. struct ebitmap_node *n = NULL;
  311. u32 mapunit, count, startbit, index;
  312. __le32 ebitmap_start;
  313. u64 map;
  314. __le64 mapbits;
  315. __le32 buf[3];
  316. int rc, i;
  317. ebitmap_init(e);
  318. rc = next_entry(buf, fp, sizeof buf);
  319. if (rc < 0)
  320. goto out;
  321. mapunit = le32_to_cpu(buf[0]);
  322. e->highbit = le32_to_cpu(buf[1]);
  323. count = le32_to_cpu(buf[2]);
  324. if (mapunit != BITS_PER_U64) {
  325. pr_err("SELinux: ebitmap: map size %u does not "
  326. "match my size %zd (high bit was %d)\n",
  327. mapunit, BITS_PER_U64, e->highbit);
  328. goto bad;
  329. }
  330. /* round up e->highbit */
  331. e->highbit += EBITMAP_SIZE - 1;
  332. e->highbit -= (e->highbit % EBITMAP_SIZE);
  333. if (!e->highbit) {
  334. e->node = NULL;
  335. goto ok;
  336. }
  337. if (e->highbit && !count)
  338. goto bad;
  339. for (i = 0; i < count; i++) {
  340. rc = next_entry(&ebitmap_start, fp, sizeof(u32));
  341. if (rc < 0) {
  342. pr_err("SELinux: ebitmap: truncated map\n");
  343. goto bad;
  344. }
  345. startbit = le32_to_cpu(ebitmap_start);
  346. if (startbit & (mapunit - 1)) {
  347. pr_err("SELinux: ebitmap start bit (%d) is "
  348. "not a multiple of the map unit size (%u)\n",
  349. startbit, mapunit);
  350. goto bad;
  351. }
  352. if (startbit > e->highbit - mapunit) {
  353. pr_err("SELinux: ebitmap start bit (%d) is "
  354. "beyond the end of the bitmap (%u)\n",
  355. startbit, (e->highbit - mapunit));
  356. goto bad;
  357. }
  358. if (!n || startbit >= n->startbit + EBITMAP_SIZE) {
  359. struct ebitmap_node *tmp;
  360. tmp = kmem_cache_zalloc(ebitmap_node_cachep, GFP_KERNEL);
  361. if (!tmp) {
  362. pr_err("SELinux: ebitmap: out of memory\n");
  363. rc = -ENOMEM;
  364. goto bad;
  365. }
  366. /* round down */
  367. tmp->startbit = startbit - (startbit % EBITMAP_SIZE);
  368. if (n)
  369. n->next = tmp;
  370. else
  371. e->node = tmp;
  372. n = tmp;
  373. } else if (startbit <= n->startbit) {
  374. pr_err("SELinux: ebitmap: start bit %d"
  375. " comes after start bit %d\n",
  376. startbit, n->startbit);
  377. goto bad;
  378. }
  379. rc = next_entry(&mapbits, fp, sizeof(u64));
  380. if (rc < 0) {
  381. pr_err("SELinux: ebitmap: truncated map\n");
  382. goto bad;
  383. }
  384. map = le64_to_cpu(mapbits);
  385. index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
  386. while (map) {
  387. n->maps[index++] = map & (-1UL);
  388. map = EBITMAP_SHIFT_UNIT_SIZE(map);
  389. }
  390. }
  391. ok:
  392. rc = 0;
  393. out:
  394. return rc;
  395. bad:
  396. if (!rc)
  397. rc = -EINVAL;
  398. ebitmap_destroy(e);
  399. goto out;
  400. }
  401. int ebitmap_write(const struct ebitmap *e, void *fp)
  402. {
  403. struct ebitmap_node *n;
  404. u32 count;
  405. __le32 buf[3];
  406. u64 map;
  407. int bit, last_bit, last_startbit, rc;
  408. buf[0] = cpu_to_le32(BITS_PER_U64);
  409. count = 0;
  410. last_bit = 0;
  411. last_startbit = -1;
  412. ebitmap_for_each_positive_bit(e, n, bit) {
  413. if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
  414. count++;
  415. last_startbit = rounddown(bit, BITS_PER_U64);
  416. }
  417. last_bit = roundup(bit + 1, BITS_PER_U64);
  418. }
  419. buf[1] = cpu_to_le32(last_bit);
  420. buf[2] = cpu_to_le32(count);
  421. rc = put_entry(buf, sizeof(u32), 3, fp);
  422. if (rc)
  423. return rc;
  424. map = 0;
  425. last_startbit = INT_MIN;
  426. ebitmap_for_each_positive_bit(e, n, bit) {
  427. if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
  428. __le64 buf64[1];
  429. /* this is the very first bit */
  430. if (!map) {
  431. last_startbit = rounddown(bit, BITS_PER_U64);
  432. map = (u64)1 << (bit - last_startbit);
  433. continue;
  434. }
  435. /* write the last node */
  436. buf[0] = cpu_to_le32(last_startbit);
  437. rc = put_entry(buf, sizeof(u32), 1, fp);
  438. if (rc)
  439. return rc;
  440. buf64[0] = cpu_to_le64(map);
  441. rc = put_entry(buf64, sizeof(u64), 1, fp);
  442. if (rc)
  443. return rc;
  444. /* set up for the next node */
  445. map = 0;
  446. last_startbit = rounddown(bit, BITS_PER_U64);
  447. }
  448. map |= (u64)1 << (bit - last_startbit);
  449. }
  450. /* write the last node */
  451. if (map) {
  452. __le64 buf64[1];
  453. /* write the last node */
  454. buf[0] = cpu_to_le32(last_startbit);
  455. rc = put_entry(buf, sizeof(u32), 1, fp);
  456. if (rc)
  457. return rc;
  458. buf64[0] = cpu_to_le64(map);
  459. rc = put_entry(buf64, sizeof(u64), 1, fp);
  460. if (rc)
  461. return rc;
  462. }
  463. return 0;
  464. }
  465. u32 ebitmap_hash(const struct ebitmap *e, u32 hash)
  466. {
  467. struct ebitmap_node *node;
  468. /* need to change hash even if ebitmap is empty */
  469. hash = jhash_1word(e->highbit, hash);
  470. for (node = e->node; node; node = node->next) {
  471. hash = jhash_1word(node->startbit, hash);
  472. hash = jhash(node->maps, sizeof(node->maps), hash);
  473. }
  474. return hash;
  475. }
  476. void __init ebitmap_cache_init(void)
  477. {
  478. ebitmap_node_cachep = kmem_cache_create("ebitmap_node",
  479. sizeof(struct ebitmap_node),
  480. 0, SLAB_PANIC, NULL);
  481. }