ucount.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/stat.h>
  3. #include <linux/sysctl.h>
  4. #include <linux/slab.h>
  5. #include <linux/cred.h>
  6. #include <linux/hash.h>
  7. #include <linux/kmemleak.h>
  8. #include <linux/user_namespace.h>
  9. struct ucounts init_ucounts = {
  10. .ns = &init_user_ns,
  11. .uid = GLOBAL_ROOT_UID,
  12. .count = ATOMIC_INIT(1),
  13. };
  14. #define UCOUNTS_HASHTABLE_BITS 10
  15. static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
  16. static DEFINE_SPINLOCK(ucounts_lock);
  17. #define ucounts_hashfn(ns, uid) \
  18. hash_long((unsigned long)__kuid_val(uid) + (unsigned long)(ns), \
  19. UCOUNTS_HASHTABLE_BITS)
  20. #define ucounts_hashentry(ns, uid) \
  21. (ucounts_hashtable + ucounts_hashfn(ns, uid))
  22. #ifdef CONFIG_SYSCTL
  23. static struct ctl_table_set *
  24. set_lookup(struct ctl_table_root *root)
  25. {
  26. return &current_user_ns()->set;
  27. }
  28. static int set_is_seen(struct ctl_table_set *set)
  29. {
  30. return &current_user_ns()->set == set;
  31. }
  32. static int set_permissions(struct ctl_table_header *head,
  33. struct ctl_table *table)
  34. {
  35. struct user_namespace *user_ns =
  36. container_of(head->set, struct user_namespace, set);
  37. int mode;
  38. /* Allow users with CAP_SYS_RESOURCE unrestrained access */
  39. if (ns_capable(user_ns, CAP_SYS_RESOURCE))
  40. mode = (table->mode & S_IRWXU) >> 6;
  41. else
  42. /* Allow all others at most read-only access */
  43. mode = table->mode & S_IROTH;
  44. return (mode << 6) | (mode << 3) | mode;
  45. }
  46. static struct ctl_table_root set_root = {
  47. .lookup = set_lookup,
  48. .permissions = set_permissions,
  49. };
  50. static long ue_zero = 0;
  51. static long ue_int_max = INT_MAX;
  52. #define UCOUNT_ENTRY(name) \
  53. { \
  54. .procname = name, \
  55. .maxlen = sizeof(long), \
  56. .mode = 0644, \
  57. .proc_handler = proc_doulongvec_minmax, \
  58. .extra1 = &ue_zero, \
  59. .extra2 = &ue_int_max, \
  60. }
  61. static struct ctl_table user_table[] = {
  62. UCOUNT_ENTRY("max_user_namespaces"),
  63. UCOUNT_ENTRY("max_pid_namespaces"),
  64. UCOUNT_ENTRY("max_uts_namespaces"),
  65. UCOUNT_ENTRY("max_ipc_namespaces"),
  66. UCOUNT_ENTRY("max_net_namespaces"),
  67. UCOUNT_ENTRY("max_mnt_namespaces"),
  68. UCOUNT_ENTRY("max_cgroup_namespaces"),
  69. UCOUNT_ENTRY("max_time_namespaces"),
  70. #ifdef CONFIG_INOTIFY_USER
  71. UCOUNT_ENTRY("max_inotify_instances"),
  72. UCOUNT_ENTRY("max_inotify_watches"),
  73. #endif
  74. #ifdef CONFIG_FANOTIFY
  75. UCOUNT_ENTRY("max_fanotify_groups"),
  76. UCOUNT_ENTRY("max_fanotify_marks"),
  77. #endif
  78. { }
  79. };
  80. #endif /* CONFIG_SYSCTL */
  81. bool setup_userns_sysctls(struct user_namespace *ns)
  82. {
  83. #ifdef CONFIG_SYSCTL
  84. struct ctl_table *tbl;
  85. BUILD_BUG_ON(ARRAY_SIZE(user_table) != UCOUNT_COUNTS + 1);
  86. setup_sysctl_set(&ns->set, &set_root, set_is_seen);
  87. tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL);
  88. if (tbl) {
  89. int i;
  90. for (i = 0; i < UCOUNT_COUNTS; i++) {
  91. tbl[i].data = &ns->ucount_max[i];
  92. }
  93. ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl);
  94. }
  95. if (!ns->sysctls) {
  96. kfree(tbl);
  97. retire_sysctl_set(&ns->set);
  98. return false;
  99. }
  100. #endif
  101. return true;
  102. }
  103. void retire_userns_sysctls(struct user_namespace *ns)
  104. {
  105. #ifdef CONFIG_SYSCTL
  106. struct ctl_table *tbl;
  107. tbl = ns->sysctls->ctl_table_arg;
  108. unregister_sysctl_table(ns->sysctls);
  109. retire_sysctl_set(&ns->set);
  110. kfree(tbl);
  111. #endif
  112. }
  113. static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
  114. {
  115. struct ucounts *ucounts;
  116. hlist_for_each_entry(ucounts, hashent, node) {
  117. if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
  118. return ucounts;
  119. }
  120. return NULL;
  121. }
  122. static void hlist_add_ucounts(struct ucounts *ucounts)
  123. {
  124. struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
  125. spin_lock_irq(&ucounts_lock);
  126. hlist_add_head(&ucounts->node, hashent);
  127. spin_unlock_irq(&ucounts_lock);
  128. }
  129. static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
  130. {
  131. /* Returns true on a successful get, false if the count wraps. */
  132. return !atomic_add_negative(1, &ucounts->count);
  133. }
  134. struct ucounts *get_ucounts(struct ucounts *ucounts)
  135. {
  136. if (!get_ucounts_or_wrap(ucounts)) {
  137. put_ucounts(ucounts);
  138. ucounts = NULL;
  139. }
  140. return ucounts;
  141. }
  142. struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
  143. {
  144. struct hlist_head *hashent = ucounts_hashentry(ns, uid);
  145. struct ucounts *ucounts, *new;
  146. bool wrapped;
  147. spin_lock_irq(&ucounts_lock);
  148. ucounts = find_ucounts(ns, uid, hashent);
  149. if (!ucounts) {
  150. spin_unlock_irq(&ucounts_lock);
  151. new = kzalloc(sizeof(*new), GFP_KERNEL);
  152. if (!new)
  153. return NULL;
  154. new->ns = ns;
  155. new->uid = uid;
  156. atomic_set(&new->count, 1);
  157. spin_lock_irq(&ucounts_lock);
  158. ucounts = find_ucounts(ns, uid, hashent);
  159. if (ucounts) {
  160. kfree(new);
  161. } else {
  162. hlist_add_head(&new->node, hashent);
  163. get_user_ns(new->ns);
  164. spin_unlock_irq(&ucounts_lock);
  165. return new;
  166. }
  167. }
  168. wrapped = !get_ucounts_or_wrap(ucounts);
  169. spin_unlock_irq(&ucounts_lock);
  170. if (wrapped) {
  171. put_ucounts(ucounts);
  172. return NULL;
  173. }
  174. return ucounts;
  175. }
  176. void put_ucounts(struct ucounts *ucounts)
  177. {
  178. unsigned long flags;
  179. if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
  180. hlist_del_init(&ucounts->node);
  181. spin_unlock_irqrestore(&ucounts_lock, flags);
  182. put_user_ns(ucounts->ns);
  183. kfree(ucounts);
  184. }
  185. }
  186. static inline bool atomic_long_inc_below(atomic_long_t *v, int u)
  187. {
  188. long c, old;
  189. c = atomic_long_read(v);
  190. for (;;) {
  191. if (unlikely(c >= u))
  192. return false;
  193. old = atomic_long_cmpxchg(v, c, c+1);
  194. if (likely(old == c))
  195. return true;
  196. c = old;
  197. }
  198. }
  199. struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
  200. enum ucount_type type)
  201. {
  202. struct ucounts *ucounts, *iter, *bad;
  203. struct user_namespace *tns;
  204. ucounts = alloc_ucounts(ns, uid);
  205. for (iter = ucounts; iter; iter = tns->ucounts) {
  206. long max;
  207. tns = iter->ns;
  208. max = READ_ONCE(tns->ucount_max[type]);
  209. if (!atomic_long_inc_below(&iter->ucount[type], max))
  210. goto fail;
  211. }
  212. return ucounts;
  213. fail:
  214. bad = iter;
  215. for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
  216. atomic_long_dec(&iter->ucount[type]);
  217. put_ucounts(ucounts);
  218. return NULL;
  219. }
  220. void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
  221. {
  222. struct ucounts *iter;
  223. for (iter = ucounts; iter; iter = iter->ns->ucounts) {
  224. long dec = atomic_long_dec_if_positive(&iter->ucount[type]);
  225. WARN_ON_ONCE(dec < 0);
  226. }
  227. put_ucounts(ucounts);
  228. }
  229. long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v)
  230. {
  231. struct ucounts *iter;
  232. long max = LONG_MAX;
  233. long ret = 0;
  234. for (iter = ucounts; iter; iter = iter->ns->ucounts) {
  235. long new = atomic_long_add_return(v, &iter->rlimit[type]);
  236. if (new < 0 || new > max)
  237. ret = LONG_MAX;
  238. else if (iter == ucounts)
  239. ret = new;
  240. max = get_userns_rlimit_max(iter->ns, type);
  241. }
  242. return ret;
  243. }
  244. bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v)
  245. {
  246. struct ucounts *iter;
  247. long new = -1; /* Silence compiler warning */
  248. for (iter = ucounts; iter; iter = iter->ns->ucounts) {
  249. long dec = atomic_long_sub_return(v, &iter->rlimit[type]);
  250. WARN_ON_ONCE(dec < 0);
  251. if (iter == ucounts)
  252. new = dec;
  253. }
  254. return (new == 0);
  255. }
  256. static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
  257. struct ucounts *last, enum rlimit_type type)
  258. {
  259. struct ucounts *iter, *next;
  260. for (iter = ucounts; iter != last; iter = next) {
  261. long dec = atomic_long_sub_return(1, &iter->rlimit[type]);
  262. WARN_ON_ONCE(dec < 0);
  263. next = iter->ns->ucounts;
  264. if (dec == 0)
  265. put_ucounts(iter);
  266. }
  267. }
  268. void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type)
  269. {
  270. do_dec_rlimit_put_ucounts(ucounts, NULL, type);
  271. }
  272. long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type)
  273. {
  274. /* Caller must hold a reference to ucounts */
  275. struct ucounts *iter;
  276. long max = LONG_MAX;
  277. long dec, ret = 0;
  278. for (iter = ucounts; iter; iter = iter->ns->ucounts) {
  279. long new = atomic_long_add_return(1, &iter->rlimit[type]);
  280. if (new < 0 || new > max)
  281. goto unwind;
  282. if (iter == ucounts)
  283. ret = new;
  284. max = get_userns_rlimit_max(iter->ns, type);
  285. /*
  286. * Grab an extra ucount reference for the caller when
  287. * the rlimit count was previously 0.
  288. */
  289. if (new != 1)
  290. continue;
  291. if (!get_ucounts(iter))
  292. goto dec_unwind;
  293. }
  294. return ret;
  295. dec_unwind:
  296. dec = atomic_long_sub_return(1, &iter->rlimit[type]);
  297. WARN_ON_ONCE(dec < 0);
  298. unwind:
  299. do_dec_rlimit_put_ucounts(ucounts, iter, type);
  300. return 0;
  301. }
  302. bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long rlimit)
  303. {
  304. struct ucounts *iter;
  305. long max = rlimit;
  306. if (rlimit > LONG_MAX)
  307. max = LONG_MAX;
  308. for (iter = ucounts; iter; iter = iter->ns->ucounts) {
  309. long val = get_rlimit_value(iter, type);
  310. if (val < 0 || val > max)
  311. return true;
  312. max = get_userns_rlimit_max(iter->ns, type);
  313. }
  314. return false;
  315. }
  316. static __init int user_namespace_sysctl_init(void)
  317. {
  318. #ifdef CONFIG_SYSCTL
  319. static struct ctl_table_header *user_header;
  320. static struct ctl_table empty[1];
  321. /*
  322. * It is necessary to register the user directory in the
  323. * default set so that registrations in the child sets work
  324. * properly.
  325. */
  326. user_header = register_sysctl("user", empty);
  327. kmemleak_ignore(user_header);
  328. BUG_ON(!user_header);
  329. BUG_ON(!setup_userns_sysctls(&init_user_ns));
  330. #endif
  331. hlist_add_ucounts(&init_ucounts);
  332. inc_rlimit_ucounts(&init_ucounts, UCOUNT_RLIMIT_NPROC, 1);
  333. return 0;
  334. }
  335. subsys_initcall(user_namespace_sysctl_init);