blob: 4f5613dac2273a1b96feb809e2a5f1449932769b [file] [log] [blame]
Thomas Gleixnerb886d83c2019-06-01 10:08:55 +02001// SPDX-License-Identifier: GPL-2.0-only
Eric W. Biedermandbec2842016-07-30 13:58:49 -05002
3#include <linux/stat.h>
4#include <linux/sysctl.h>
5#include <linux/slab.h>
Ingo Molnar5b825c32017-02-02 17:54:15 +01006#include <linux/cred.h>
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -05007#include <linux/hash.h>
Randy Dunlap514c6032018-04-05 16:25:34 -07008#include <linux/kmemleak.h>
Eric W. Biedermandbec2842016-07-30 13:58:49 -05009#include <linux/user_namespace.h>
10
Alexey Gladkov905ae012021-04-22 14:27:09 +020011struct ucounts init_ucounts = {
12 .ns = &init_user_ns,
13 .uid = GLOBAL_ROOT_UID,
Alexey Gladkovb6c33652021-04-22 14:27:10 +020014 .count = ATOMIC_INIT(1),
Alexey Gladkov905ae012021-04-22 14:27:09 +020015};
16
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -050017#define UCOUNTS_HASHTABLE_BITS 10
18static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
19static DEFINE_SPINLOCK(ucounts_lock);
20
21#define ucounts_hashfn(ns, uid) \
22 hash_long((unsigned long)__kuid_val(uid) + (unsigned long)(ns), \
23 UCOUNTS_HASHTABLE_BITS)
24#define ucounts_hashentry(ns, uid) \
25 (ucounts_hashtable + ucounts_hashfn(ns, uid))
26
27
Eric W. Biedermandbec2842016-07-30 13:58:49 -050028#ifdef CONFIG_SYSCTL
29static struct ctl_table_set *
30set_lookup(struct ctl_table_root *root)
31{
32 return &current_user_ns()->set;
33}
34
35static int set_is_seen(struct ctl_table_set *set)
36{
37 return &current_user_ns()->set == set;
38}
39
40static int set_permissions(struct ctl_table_header *head,
41 struct ctl_table *table)
42{
43 struct user_namespace *user_ns =
44 container_of(head->set, struct user_namespace, set);
45 int mode;
46
47 /* Allow users with CAP_SYS_RESOURCE unrestrained access */
48 if (ns_capable(user_ns, CAP_SYS_RESOURCE))
49 mode = (table->mode & S_IRWXU) >> 6;
50 else
51 /* Allow all others at most read-only access */
52 mode = table->mode & S_IROTH;
53 return (mode << 6) | (mode << 3) | mode;
54}
55
56static struct ctl_table_root set_root = {
57 .lookup = set_lookup,
58 .permissions = set_permissions,
59};
60
Sven Schnellef153c222021-07-30 08:28:54 +020061static long ue_zero = 0;
62static long ue_int_max = INT_MAX;
63
64#define UCOUNT_ENTRY(name) \
65 { \
66 .procname = name, \
67 .maxlen = sizeof(long), \
68 .mode = 0644, \
69 .proc_handler = proc_doulongvec_minmax, \
70 .extra1 = &ue_zero, \
71 .extra2 = &ue_int_max, \
Eric W. Biederman25f9c082016-08-08 14:41:52 -050072 }
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -050073static struct ctl_table user_table[] = {
Eric W. Biederman25f9c082016-08-08 14:41:52 -050074 UCOUNT_ENTRY("max_user_namespaces"),
Eric W. Biedermanf333c702016-08-08 14:08:36 -050075 UCOUNT_ENTRY("max_pid_namespaces"),
Eric W. Biedermanf7af3d12016-08-08 14:11:25 -050076 UCOUNT_ENTRY("max_uts_namespaces"),
Eric W. Biedermanaba35662016-08-08 14:20:23 -050077 UCOUNT_ENTRY("max_ipc_namespaces"),
Eric W. Biederman70328662016-08-08 14:33:23 -050078 UCOUNT_ENTRY("max_net_namespaces"),
Eric W. Biederman537f7cc2016-08-08 14:37:37 -050079 UCOUNT_ENTRY("max_mnt_namespaces"),
Eric W. Biedermand08311d2016-08-08 14:25:30 -050080 UCOUNT_ENTRY("max_cgroup_namespaces"),
Dmitry Safonoveeec26d2020-04-06 18:13:42 +010081 UCOUNT_ENTRY("max_time_namespaces"),
Nikolay Borisov1cce1ee2016-12-14 15:56:33 +020082#ifdef CONFIG_INOTIFY_USER
83 UCOUNT_ENTRY("max_inotify_instances"),
84 UCOUNT_ENTRY("max_inotify_watches"),
85#endif
Amir Goldstein5b8fea62021-03-04 13:29:20 +020086#ifdef CONFIG_FANOTIFY
87 UCOUNT_ENTRY("max_fanotify_groups"),
88 UCOUNT_ENTRY("max_fanotify_marks"),
89#endif
Alexey Gladkov21d1c5e2021-04-22 14:27:11 +020090 { },
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +020091 { },
Alexey Gladkovd6469692021-04-22 14:27:13 +020092 { },
Alexey Gladkovd7c9e992021-04-22 14:27:14 +020093 { },
Eric W. Biedermandbec2842016-07-30 13:58:49 -050094 { }
95};
96#endif /* CONFIG_SYSCTL */
97
98bool setup_userns_sysctls(struct user_namespace *ns)
99{
100#ifdef CONFIG_SYSCTL
101 struct ctl_table *tbl;
Jan Kara0f538e32020-04-07 17:46:43 +0200102
103 BUILD_BUG_ON(ARRAY_SIZE(user_table) != UCOUNT_COUNTS + 1);
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500104 setup_sysctl_set(&ns->set, &set_root, set_is_seen);
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500105 tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL);
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500106 if (tbl) {
Eric W. Biederman25f9c082016-08-08 14:41:52 -0500107 int i;
108 for (i = 0; i < UCOUNT_COUNTS; i++) {
109 tbl[i].data = &ns->ucount_max[i];
110 }
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500111 ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl);
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500112 }
113 if (!ns->sysctls) {
114 kfree(tbl);
115 retire_sysctl_set(&ns->set);
116 return false;
117 }
118#endif
119 return true;
120}
121
122void retire_userns_sysctls(struct user_namespace *ns)
123{
124#ifdef CONFIG_SYSCTL
125 struct ctl_table *tbl;
126
127 tbl = ns->sysctls->ctl_table_arg;
128 unregister_sysctl_table(ns->sysctls);
129 retire_sysctl_set(&ns->set);
130 kfree(tbl);
131#endif
132}
133
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500134static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
135{
136 struct ucounts *ucounts;
137
138 hlist_for_each_entry(ucounts, hashent, node) {
139 if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
140 return ucounts;
141 }
142 return NULL;
143}
144
Alexey Gladkov905ae012021-04-22 14:27:09 +0200145static void hlist_add_ucounts(struct ucounts *ucounts)
146{
147 struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
148 spin_lock_irq(&ucounts_lock);
149 hlist_add_head(&ucounts->node, hashent);
150 spin_unlock_irq(&ucounts_lock);
151}
152
Eric W. Biedermanda70d312021-10-16 14:05:34 -0500153static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
154{
155 /* Returns true on a successful get, false if the count wraps. */
156 return !atomic_add_negative(1, &ucounts->count);
157}
158
Alexey Gladkovb6c33652021-04-22 14:27:10 +0200159struct ucounts *get_ucounts(struct ucounts *ucounts)
160{
Eric W. Biedermanda70d312021-10-16 14:05:34 -0500161 if (!get_ucounts_or_wrap(ucounts)) {
Alexey Gladkovb6c33652021-04-22 14:27:10 +0200162 put_ucounts(ucounts);
163 ucounts = NULL;
164 }
165 return ucounts;
166}
167
Alexey Gladkov905ae012021-04-22 14:27:09 +0200168struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500169{
170 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
171 struct ucounts *ucounts, *new;
Eric W. Biedermanda70d312021-10-16 14:05:34 -0500172 bool wrapped;
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500173
Nikolay Borisov880a3852017-01-20 15:21:35 +0200174 spin_lock_irq(&ucounts_lock);
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500175 ucounts = find_ucounts(ns, uid, hashent);
176 if (!ucounts) {
Nikolay Borisov880a3852017-01-20 15:21:35 +0200177 spin_unlock_irq(&ucounts_lock);
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500178
179 new = kzalloc(sizeof(*new), GFP_KERNEL);
180 if (!new)
181 return NULL;
182
183 new->ns = ns;
184 new->uid = uid;
Alexey Gladkovb6c33652021-04-22 14:27:10 +0200185 atomic_set(&new->count, 1);
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500186
Nikolay Borisov880a3852017-01-20 15:21:35 +0200187 spin_lock_irq(&ucounts_lock);
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500188 ucounts = find_ucounts(ns, uid, hashent);
189 if (ucounts) {
190 kfree(new);
191 } else {
192 hlist_add_head(&new->node, hashent);
Alexey Gladkovb6c33652021-04-22 14:27:10 +0200193 spin_unlock_irq(&ucounts_lock);
194 return new;
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500195 }
196 }
Eric W. Biedermanda70d312021-10-16 14:05:34 -0500197 wrapped = !get_ucounts_or_wrap(ucounts);
Nikolay Borisov880a3852017-01-20 15:21:35 +0200198 spin_unlock_irq(&ucounts_lock);
Eric W. Biedermanda70d312021-10-16 14:05:34 -0500199 if (wrapped) {
Alexey Gladkov345daff2021-07-27 17:24:18 +0200200 put_ucounts(ucounts);
201 return NULL;
202 }
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500203 return ucounts;
204}
205
Alexey Gladkov905ae012021-04-22 14:27:09 +0200206void put_ucounts(struct ucounts *ucounts)
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500207{
Nikolay Borisov880a3852017-01-20 15:21:35 +0200208 unsigned long flags;
209
Alexey Gladkov345daff2021-07-27 17:24:18 +0200210 if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500211 hlist_del_init(&ucounts->node);
Alexey Gladkovb6c33652021-04-22 14:27:10 +0200212 spin_unlock_irqrestore(&ucounts_lock, flags);
213 kfree(ucounts);
214 }
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500215}
216
Alexey Gladkovf9c82a42021-04-22 14:27:08 +0200217static inline bool atomic_long_inc_below(atomic_long_t *v, int u)
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500218{
Alexey Gladkovf9c82a42021-04-22 14:27:08 +0200219 long c, old;
220 c = atomic_long_read(v);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500221 for (;;) {
222 if (unlikely(c >= u))
223 return false;
Alexey Gladkovf9c82a42021-04-22 14:27:08 +0200224 old = atomic_long_cmpxchg(v, c, c+1);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500225 if (likely(old == c))
226 return true;
227 c = old;
228 }
229}
230
Eric W. Biederman25f9c082016-08-08 14:41:52 -0500231struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
232 enum ucount_type type)
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500233{
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500234 struct ucounts *ucounts, *iter, *bad;
235 struct user_namespace *tns;
Alexey Gladkov905ae012021-04-22 14:27:09 +0200236 ucounts = alloc_ucounts(ns, uid);
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500237 for (iter = ucounts; iter; iter = tns->ucounts) {
Alexey Gladkovf9c82a42021-04-22 14:27:08 +0200238 long max;
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500239 tns = iter->ns;
Eric W. Biederman25f9c082016-08-08 14:41:52 -0500240 max = READ_ONCE(tns->ucount_max[type]);
Alexey Gladkovf9c82a42021-04-22 14:27:08 +0200241 if (!atomic_long_inc_below(&iter->ucount[type], max))
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500242 goto fail;
243 }
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500244 return ucounts;
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500245fail:
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500246 bad = iter;
247 for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
Alexey Gladkovf9c82a42021-04-22 14:27:08 +0200248 atomic_long_dec(&iter->ucount[type]);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500249
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500250 put_ucounts(ucounts);
251 return NULL;
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500252}
253
Eric W. Biederman25f9c082016-08-08 14:41:52 -0500254void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500255{
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500256 struct ucounts *iter;
257 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
Alexey Gladkovf9c82a42021-04-22 14:27:08 +0200258 long dec = atomic_long_dec_if_positive(&iter->ucount[type]);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500259 WARN_ON_ONCE(dec < 0);
260 }
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500261 put_ucounts(ucounts);
Eric W. Biedermanb376c3e2016-08-08 13:41:24 -0500262}
263
Alexey Gladkov21d1c5e2021-04-22 14:27:11 +0200264long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
265{
266 struct ucounts *iter;
267 long ret = 0;
268
269 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
270 long max = READ_ONCE(iter->ns->ucount_max[type]);
271 long new = atomic_long_add_return(v, &iter->ucount[type]);
272 if (new < 0 || new > max)
273 ret = LONG_MAX;
274 else if (iter == ucounts)
275 ret = new;
276 }
277 return ret;
278}
279
280bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
281{
282 struct ucounts *iter;
Eric W. Biedermanf928ef62021-04-30 13:00:26 -0500283 long new = -1; /* Silence compiler warning */
Alexey Gladkov21d1c5e2021-04-22 14:27:11 +0200284 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
Eric W. Biederman32342702021-10-18 11:22:20 -0500285 long dec = atomic_long_sub_return(v, &iter->ucount[type]);
Alexey Gladkov21d1c5e2021-04-22 14:27:11 +0200286 WARN_ON_ONCE(dec < 0);
287 if (iter == ucounts)
288 new = dec;
289 }
290 return (new == 0);
291}
292
Eric W. Biederman15bc01e2021-10-16 15:59:49 -0500293static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
294 struct ucounts *last, enum ucount_type type)
295{
296 struct ucounts *iter, *next;
297 for (iter = ucounts; iter != last; iter = next) {
Eric W. Biederman32342702021-10-18 11:22:20 -0500298 long dec = atomic_long_sub_return(1, &iter->ucount[type]);
Eric W. Biederman15bc01e2021-10-16 15:59:49 -0500299 WARN_ON_ONCE(dec < 0);
300 next = iter->ns->ucounts;
301 if (dec == 0)
302 put_ucounts(iter);
303 }
304}
305
306void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type)
307{
308 do_dec_rlimit_put_ucounts(ucounts, NULL, type);
309}
310
311long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
312{
313 /* Caller must hold a reference to ucounts */
314 struct ucounts *iter;
315 long dec, ret = 0;
316
317 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
318 long max = READ_ONCE(iter->ns->ucount_max[type]);
319 long new = atomic_long_add_return(1, &iter->ucount[type]);
320 if (new < 0 || new > max)
321 goto unwind;
322 if (iter == ucounts)
323 ret = new;
324 /*
325 * Grab an extra ucount reference for the caller when
326 * the rlimit count was previously 0.
327 */
328 if (new != 1)
329 continue;
330 if (!get_ucounts(iter))
331 goto dec_unwind;
332 }
333 return ret;
334dec_unwind:
Eric W. Biederman32342702021-10-18 11:22:20 -0500335 dec = atomic_long_sub_return(1, &iter->ucount[type]);
Eric W. Biederman15bc01e2021-10-16 15:59:49 -0500336 WARN_ON_ONCE(dec < 0);
337unwind:
338 do_dec_rlimit_put_ucounts(ucounts, iter, type);
339 return 0;
340}
341
Alexey Gladkov21d1c5e2021-04-22 14:27:11 +0200342bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
343{
344 struct ucounts *iter;
345 if (get_ucounts_value(ucounts, type) > max)
346 return true;
347 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
348 max = READ_ONCE(iter->ns->ucount_max[type]);
349 if (get_ucounts_value(iter, type) > max)
350 return true;
351 }
352 return false;
353}
354
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500355static __init int user_namespace_sysctl_init(void)
356{
357#ifdef CONFIG_SYSCTL
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500358 static struct ctl_table_header *user_header;
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500359 static struct ctl_table empty[1];
360 /*
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500361 * It is necessary to register the user directory in the
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500362 * default set so that registrations in the child sets work
363 * properly.
364 */
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500365 user_header = register_sysctl("user", empty);
Luis R. Rodriguezed5bd7d2017-02-08 14:30:50 -0800366 kmemleak_ignore(user_header);
Eric W. Biedermanf6b2db12016-08-08 13:54:50 -0500367 BUG_ON(!user_header);
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500368 BUG_ON(!setup_userns_sysctls(&init_user_ns));
369#endif
Alexey Gladkov905ae012021-04-22 14:27:09 +0200370 hlist_add_ucounts(&init_ucounts);
Alexey Gladkov21d1c5e2021-04-22 14:27:11 +0200371 inc_rlimit_ucounts(&init_ucounts, UCOUNT_RLIMIT_NPROC, 1);
Eric W. Biedermandbec2842016-07-30 13:58:49 -0500372 return 0;
373}
374subsys_initcall(user_namespace_sysctl_init);