ruleset.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Landlock LSM - Ruleset management
  4. *
  5. * Copyright © 2016-2020 Mickaël Salaün <[email protected]>
  6. * Copyright © 2018-2020 ANSSI
  7. */
  8. #include <linux/bits.h>
  9. #include <linux/bug.h>
  10. #include <linux/compiler_types.h>
  11. #include <linux/err.h>
  12. #include <linux/errno.h>
  13. #include <linux/kernel.h>
  14. #include <linux/lockdep.h>
  15. #include <linux/overflow.h>
  16. #include <linux/rbtree.h>
  17. #include <linux/refcount.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include "limits.h"
  22. #include "object.h"
  23. #include "ruleset.h"
  24. static struct landlock_ruleset *create_ruleset(const u32 num_layers)
  25. {
  26. struct landlock_ruleset *new_ruleset;
  27. new_ruleset =
  28. kzalloc(struct_size(new_ruleset, fs_access_masks, num_layers),
  29. GFP_KERNEL_ACCOUNT);
  30. if (!new_ruleset)
  31. return ERR_PTR(-ENOMEM);
  32. refcount_set(&new_ruleset->usage, 1);
  33. mutex_init(&new_ruleset->lock);
  34. new_ruleset->root = RB_ROOT;
  35. new_ruleset->num_layers = num_layers;
  36. /*
  37. * hierarchy = NULL
  38. * num_rules = 0
  39. * fs_access_masks[] = 0
  40. */
  41. return new_ruleset;
  42. }
  43. struct landlock_ruleset *
  44. landlock_create_ruleset(const access_mask_t fs_access_mask)
  45. {
  46. struct landlock_ruleset *new_ruleset;
  47. /* Informs about useless ruleset. */
  48. if (!fs_access_mask)
  49. return ERR_PTR(-ENOMSG);
  50. new_ruleset = create_ruleset(1);
  51. if (!IS_ERR(new_ruleset))
  52. new_ruleset->fs_access_masks[0] = fs_access_mask;
  53. return new_ruleset;
  54. }
  55. static void build_check_rule(void)
  56. {
  57. const struct landlock_rule rule = {
  58. .num_layers = ~0,
  59. };
  60. BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
  61. }
  62. static struct landlock_rule *
  63. create_rule(struct landlock_object *const object,
  64. const struct landlock_layer (*const layers)[], const u32 num_layers,
  65. const struct landlock_layer *const new_layer)
  66. {
  67. struct landlock_rule *new_rule;
  68. u32 new_num_layers;
  69. build_check_rule();
  70. if (new_layer) {
  71. /* Should already be checked by landlock_merge_ruleset(). */
  72. if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
  73. return ERR_PTR(-E2BIG);
  74. new_num_layers = num_layers + 1;
  75. } else {
  76. new_num_layers = num_layers;
  77. }
  78. new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
  79. GFP_KERNEL_ACCOUNT);
  80. if (!new_rule)
  81. return ERR_PTR(-ENOMEM);
  82. RB_CLEAR_NODE(&new_rule->node);
  83. landlock_get_object(object);
  84. new_rule->object = object;
  85. new_rule->num_layers = new_num_layers;
  86. /* Copies the original layer stack. */
  87. memcpy(new_rule->layers, layers,
  88. flex_array_size(new_rule, layers, num_layers));
  89. if (new_layer)
  90. /* Adds a copy of @new_layer on the layer stack. */
  91. new_rule->layers[new_rule->num_layers - 1] = *new_layer;
  92. return new_rule;
  93. }
  94. static void free_rule(struct landlock_rule *const rule)
  95. {
  96. might_sleep();
  97. if (!rule)
  98. return;
  99. landlock_put_object(rule->object);
  100. kfree(rule);
  101. }
  102. static void build_check_ruleset(void)
  103. {
  104. const struct landlock_ruleset ruleset = {
  105. .num_rules = ~0,
  106. .num_layers = ~0,
  107. };
  108. typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0;
  109. BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
  110. BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
  111. BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS);
  112. }
  113. /**
  114. * insert_rule - Create and insert a rule in a ruleset
  115. *
  116. * @ruleset: The ruleset to be updated.
  117. * @object: The object to build the new rule with. The underlying kernel
  118. * object must be held by the caller.
  119. * @layers: One or multiple layers to be copied into the new rule.
  120. * @num_layers: The number of @layers entries.
  121. *
  122. * When user space requests to add a new rule to a ruleset, @layers only
  123. * contains one entry and this entry is not assigned to any level. In this
  124. * case, the new rule will extend @ruleset, similarly to a boolean OR between
  125. * access rights.
  126. *
  127. * When merging a ruleset in a domain, or copying a domain, @layers will be
  128. * added to @ruleset as new constraints, similarly to a boolean AND between
  129. * access rights.
  130. */
  131. static int insert_rule(struct landlock_ruleset *const ruleset,
  132. struct landlock_object *const object,
  133. const struct landlock_layer (*const layers)[],
  134. size_t num_layers)
  135. {
  136. struct rb_node **walker_node;
  137. struct rb_node *parent_node = NULL;
  138. struct landlock_rule *new_rule;
  139. might_sleep();
  140. lockdep_assert_held(&ruleset->lock);
  141. if (WARN_ON_ONCE(!object || !layers))
  142. return -ENOENT;
  143. walker_node = &(ruleset->root.rb_node);
  144. while (*walker_node) {
  145. struct landlock_rule *const this =
  146. rb_entry(*walker_node, struct landlock_rule, node);
  147. if (this->object != object) {
  148. parent_node = *walker_node;
  149. if (this->object < object)
  150. walker_node = &((*walker_node)->rb_right);
  151. else
  152. walker_node = &((*walker_node)->rb_left);
  153. continue;
  154. }
  155. /* Only a single-level layer should match an existing rule. */
  156. if (WARN_ON_ONCE(num_layers != 1))
  157. return -EINVAL;
  158. /* If there is a matching rule, updates it. */
  159. if ((*layers)[0].level == 0) {
  160. /*
  161. * Extends access rights when the request comes from
  162. * landlock_add_rule(2), i.e. @ruleset is not a domain.
  163. */
  164. if (WARN_ON_ONCE(this->num_layers != 1))
  165. return -EINVAL;
  166. if (WARN_ON_ONCE(this->layers[0].level != 0))
  167. return -EINVAL;
  168. this->layers[0].access |= (*layers)[0].access;
  169. return 0;
  170. }
  171. if (WARN_ON_ONCE(this->layers[0].level == 0))
  172. return -EINVAL;
  173. /*
  174. * Intersects access rights when it is a merge between a
  175. * ruleset and a domain.
  176. */
  177. new_rule = create_rule(object, &this->layers, this->num_layers,
  178. &(*layers)[0]);
  179. if (IS_ERR(new_rule))
  180. return PTR_ERR(new_rule);
  181. rb_replace_node(&this->node, &new_rule->node, &ruleset->root);
  182. free_rule(this);
  183. return 0;
  184. }
  185. /* There is no match for @object. */
  186. build_check_ruleset();
  187. if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
  188. return -E2BIG;
  189. new_rule = create_rule(object, layers, num_layers, NULL);
  190. if (IS_ERR(new_rule))
  191. return PTR_ERR(new_rule);
  192. rb_link_node(&new_rule->node, parent_node, walker_node);
  193. rb_insert_color(&new_rule->node, &ruleset->root);
  194. ruleset->num_rules++;
  195. return 0;
  196. }
  197. static void build_check_layer(void)
  198. {
  199. const struct landlock_layer layer = {
  200. .level = ~0,
  201. .access = ~0,
  202. };
  203. BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
  204. BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
  205. }
  206. /* @ruleset must be locked by the caller. */
  207. int landlock_insert_rule(struct landlock_ruleset *const ruleset,
  208. struct landlock_object *const object,
  209. const access_mask_t access)
  210. {
  211. struct landlock_layer layers[] = { {
  212. .access = access,
  213. /* When @level is zero, insert_rule() extends @ruleset. */
  214. .level = 0,
  215. } };
  216. build_check_layer();
  217. return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers));
  218. }
  219. static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy)
  220. {
  221. if (hierarchy)
  222. refcount_inc(&hierarchy->usage);
  223. }
  224. static void put_hierarchy(struct landlock_hierarchy *hierarchy)
  225. {
  226. while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
  227. const struct landlock_hierarchy *const freeme = hierarchy;
  228. hierarchy = hierarchy->parent;
  229. kfree(freeme);
  230. }
  231. }
  232. static int merge_ruleset(struct landlock_ruleset *const dst,
  233. struct landlock_ruleset *const src)
  234. {
  235. struct landlock_rule *walker_rule, *next_rule;
  236. int err = 0;
  237. might_sleep();
  238. /* Should already be checked by landlock_merge_ruleset() */
  239. if (WARN_ON_ONCE(!src))
  240. return 0;
  241. /* Only merge into a domain. */
  242. if (WARN_ON_ONCE(!dst || !dst->hierarchy))
  243. return -EINVAL;
  244. /* Locks @dst first because we are its only owner. */
  245. mutex_lock(&dst->lock);
  246. mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
  247. /* Stacks the new layer. */
  248. if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
  249. err = -EINVAL;
  250. goto out_unlock;
  251. }
  252. dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0];
  253. /* Merges the @src tree. */
  254. rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, &src->root,
  255. node) {
  256. struct landlock_layer layers[] = { {
  257. .level = dst->num_layers,
  258. } };
  259. if (WARN_ON_ONCE(walker_rule->num_layers != 1)) {
  260. err = -EINVAL;
  261. goto out_unlock;
  262. }
  263. if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) {
  264. err = -EINVAL;
  265. goto out_unlock;
  266. }
  267. layers[0].access = walker_rule->layers[0].access;
  268. err = insert_rule(dst, walker_rule->object, &layers,
  269. ARRAY_SIZE(layers));
  270. if (err)
  271. goto out_unlock;
  272. }
  273. out_unlock:
  274. mutex_unlock(&src->lock);
  275. mutex_unlock(&dst->lock);
  276. return err;
  277. }
  278. static int inherit_ruleset(struct landlock_ruleset *const parent,
  279. struct landlock_ruleset *const child)
  280. {
  281. struct landlock_rule *walker_rule, *next_rule;
  282. int err = 0;
  283. might_sleep();
  284. if (!parent)
  285. return 0;
  286. /* Locks @child first because we are its only owner. */
  287. mutex_lock(&child->lock);
  288. mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
  289. /* Copies the @parent tree. */
  290. rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
  291. &parent->root, node) {
  292. err = insert_rule(child, walker_rule->object,
  293. &walker_rule->layers,
  294. walker_rule->num_layers);
  295. if (err)
  296. goto out_unlock;
  297. }
  298. if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
  299. err = -EINVAL;
  300. goto out_unlock;
  301. }
  302. /* Copies the parent layer stack and leaves a space for the new layer. */
  303. memcpy(child->fs_access_masks, parent->fs_access_masks,
  304. flex_array_size(parent, fs_access_masks, parent->num_layers));
  305. if (WARN_ON_ONCE(!parent->hierarchy)) {
  306. err = -EINVAL;
  307. goto out_unlock;
  308. }
  309. get_hierarchy(parent->hierarchy);
  310. child->hierarchy->parent = parent->hierarchy;
  311. out_unlock:
  312. mutex_unlock(&parent->lock);
  313. mutex_unlock(&child->lock);
  314. return err;
  315. }
  316. static void free_ruleset(struct landlock_ruleset *const ruleset)
  317. {
  318. struct landlock_rule *freeme, *next;
  319. might_sleep();
  320. rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, node)
  321. free_rule(freeme);
  322. put_hierarchy(ruleset->hierarchy);
  323. kfree(ruleset);
  324. }
  325. void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
  326. {
  327. might_sleep();
  328. if (ruleset && refcount_dec_and_test(&ruleset->usage))
  329. free_ruleset(ruleset);
  330. }
  331. static void free_ruleset_work(struct work_struct *const work)
  332. {
  333. struct landlock_ruleset *ruleset;
  334. ruleset = container_of(work, struct landlock_ruleset, work_free);
  335. free_ruleset(ruleset);
  336. }
  337. void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
  338. {
  339. if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
  340. INIT_WORK(&ruleset->work_free, free_ruleset_work);
  341. schedule_work(&ruleset->work_free);
  342. }
  343. }
  344. /**
  345. * landlock_merge_ruleset - Merge a ruleset with a domain
  346. *
  347. * @parent: Parent domain.
  348. * @ruleset: New ruleset to be merged.
  349. *
  350. * Returns the intersection of @parent and @ruleset, or returns @parent if
  351. * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
  352. */
  353. struct landlock_ruleset *
  354. landlock_merge_ruleset(struct landlock_ruleset *const parent,
  355. struct landlock_ruleset *const ruleset)
  356. {
  357. struct landlock_ruleset *new_dom;
  358. u32 num_layers;
  359. int err;
  360. might_sleep();
  361. if (WARN_ON_ONCE(!ruleset || parent == ruleset))
  362. return ERR_PTR(-EINVAL);
  363. if (parent) {
  364. if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
  365. return ERR_PTR(-E2BIG);
  366. num_layers = parent->num_layers + 1;
  367. } else {
  368. num_layers = 1;
  369. }
  370. /* Creates a new domain... */
  371. new_dom = create_ruleset(num_layers);
  372. if (IS_ERR(new_dom))
  373. return new_dom;
  374. new_dom->hierarchy =
  375. kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
  376. if (!new_dom->hierarchy) {
  377. err = -ENOMEM;
  378. goto out_put_dom;
  379. }
  380. refcount_set(&new_dom->hierarchy->usage, 1);
  381. /* ...as a child of @parent... */
  382. err = inherit_ruleset(parent, new_dom);
  383. if (err)
  384. goto out_put_dom;
  385. /* ...and including @ruleset. */
  386. err = merge_ruleset(new_dom, ruleset);
  387. if (err)
  388. goto out_put_dom;
  389. return new_dom;
  390. out_put_dom:
  391. landlock_put_ruleset(new_dom);
  392. return ERR_PTR(err);
  393. }
  394. /*
  395. * The returned access has the same lifetime as @ruleset.
  396. */
  397. const struct landlock_rule *
  398. landlock_find_rule(const struct landlock_ruleset *const ruleset,
  399. const struct landlock_object *const object)
  400. {
  401. const struct rb_node *node;
  402. if (!object)
  403. return NULL;
  404. node = ruleset->root.rb_node;
  405. while (node) {
  406. struct landlock_rule *this =
  407. rb_entry(node, struct landlock_rule, node);
  408. if (this->object == object)
  409. return this;
  410. if (this->object < object)
  411. node = node->rb_right;
  412. else
  413. node = node->rb_left;
  414. }
  415. return NULL;
  416. }