blob: 1d60fc2c99871e83ed69e9558506f503e4cacefc [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/kernel/seccomp.c
4 *
5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
6 *
Will Drewrye2cfabdf2012-04-12 16:47:57 -05007 * Copyright (C) 2012 Google, Inc.
8 * Will Drewry <wad@chromium.org>
9 *
10 * This defines a simple but solid secure-computing facility.
11 *
12 * Mode 1 uses a fixed list of allowed system calls.
13 * Mode 2 allows user-defined system call filters in the form
14 * of Berkeley Packet Filters/Linux Socket Filters.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 */
Kees Cooke68f9d42020-06-15 22:02:56 -070016#define pr_fmt(fmt) "seccomp: " fmt
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Kees Cook0b5fa222017-06-26 09:24:00 -070018#include <linux/refcount.h>
Eric Paris85e7bac32012-01-03 14:23:05 -050019#include <linux/audit.h>
Roland McGrath5b101742009-02-27 23:25:54 -080020#include <linux/compat.h>
Mike Frysingerb25e6712017-01-19 22:28:57 -060021#include <linux/coredump.h>
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +000022#include <linux/kmemleak.h>
Kees Cook5c307082018-05-01 15:07:31 -070023#include <linux/nospec.h>
24#include <linux/prctl.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050025#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010026#include <linux/sched/task_stack.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050027#include <linux/seccomp.h>
Kees Cookc8bee432014-06-27 15:16:33 -070028#include <linux/slab.h>
Kees Cook48dc92b2014-06-25 16:08:24 -070029#include <linux/syscalls.h>
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +000030#include <linux/sysctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Andy Lutomirskia4412fc2014-07-21 18:49:14 -070032#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
Will Drewrye2cfabdf2012-04-12 16:47:57 -050033#include <asm/syscall.h>
Andy Lutomirskia4412fc2014-07-21 18:49:14 -070034#endif
Will Drewrye2cfabdf2012-04-12 16:47:57 -050035
36#ifdef CONFIG_SECCOMP_FILTER
Tycho Andersen6a21cc52018-12-09 11:24:13 -070037#include <linux/file.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050038#include <linux/filter.h>
Kees Cookc2e1f2e2014-06-05 00:23:17 -070039#include <linux/pid.h>
Will Drewryfb0fadf2012-04-12 16:48:02 -050040#include <linux/ptrace.h>
Mickaël Salaünfb145282020-10-30 13:38:49 +010041#include <linux/capability.h>
Will Drewrye2cfabdf2012-04-12 16:47:57 -050042#include <linux/tracehook.h>
43#include <linux/uaccess.h>
Tycho Andersen6a21cc52018-12-09 11:24:13 -070044#include <linux/anon_inodes.h>
Sargun Dhillon9f87dcf2020-06-01 04:25:32 -070045#include <linux/lockdep.h>
Tycho Andersen6a21cc52018-12-09 11:24:13 -070046
Kees Cook47e33c052020-06-15 15:42:46 -070047/*
48 * When SECCOMP_IOCTL_NOTIF_ID_VALID was first introduced, it had the
49 * wrong direction flag in the ioctl number. This is the broken one,
50 * which the kernel needs to keep supporting until all userspaces stop
51 * using the wrong command number.
52 */
53#define SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR SECCOMP_IOR(2, __u64)
54
Tycho Andersen6a21cc52018-12-09 11:24:13 -070055enum notify_state {
56 SECCOMP_NOTIFY_INIT,
57 SECCOMP_NOTIFY_SENT,
58 SECCOMP_NOTIFY_REPLIED,
59};
60
61struct seccomp_knotif {
62 /* The struct pid of the task whose filter triggered the notification */
63 struct task_struct *task;
64
65 /* The "cookie" for this request; this is unique for this filter. */
66 u64 id;
67
68 /*
69 * The seccomp data. This pointer is valid the entire time this
70 * notification is active, since it comes from __seccomp_filter which
71 * eclipses the entire lifecycle here.
72 */
73 const struct seccomp_data *data;
74
75 /*
76 * Notification states. When SECCOMP_RET_USER_NOTIF is returned, a
77 * struct seccomp_knotif is created and starts out in INIT. Once the
78 * handler reads the notification off of an FD, it transitions to SENT.
79 * If a signal is received the state transitions back to INIT and
80 * another message is sent. When the userspace handler replies, state
81 * transitions to REPLIED.
82 */
83 enum notify_state state;
84
85 /* The return values, only valid when in SECCOMP_NOTIFY_REPLIED */
86 int error;
87 long val;
Christian Braunerfb3c53862019-09-20 10:30:05 +020088 u32 flags;
Tycho Andersen6a21cc52018-12-09 11:24:13 -070089
Sargun Dhillon7cf97b12020-06-02 18:10:43 -070090 /*
91 * Signals when this has changed states, such as the listener
92 * dying, a new seccomp addfd message, or changing to REPLIED
93 */
Tycho Andersen6a21cc52018-12-09 11:24:13 -070094 struct completion ready;
95
96 struct list_head list;
Sargun Dhillon7cf97b12020-06-02 18:10:43 -070097
98 /* outstanding addfd requests */
99 struct list_head addfd;
100};
101
102/**
103 * struct seccomp_kaddfd - container for seccomp_addfd ioctl messages
104 *
105 * @file: A reference to the file to install in the other task
106 * @fd: The fd number to install it at. If the fd number is -1, it means the
107 * installing process should allocate the fd as normal.
108 * @flags: The flags for the new file descriptor. At the moment, only O_CLOEXEC
109 * is allowed.
110 * @ret: The return value of the installing process. It is set to the fd num
111 * upon success (>= 0).
112 * @completion: Indicates that the installing process has completed fd
113 * installation, or gone away (either due to successful
114 * reply, or signal)
115 *
116 */
117struct seccomp_kaddfd {
118 struct file *file;
119 int fd;
120 unsigned int flags;
121
122 /* To only be set on reply */
123 int ret;
124 struct completion completion;
125 struct list_head list;
Tycho Andersen6a21cc52018-12-09 11:24:13 -0700126};
127
128/**
129 * struct notification - container for seccomp userspace notifications. Since
130 * most seccomp filters will not have notification listeners attached and this
131 * structure is fairly large, we store the notification-specific stuff in a
132 * separate structure.
133 *
134 * @request: A semaphore that users of this notification can wait on for
135 * changes. Actual reads and writes are still controlled with
136 * filter->notify_lock.
137 * @next_id: The id of the next request.
138 * @notifications: A list of struct seccomp_knotif elements.
Tycho Andersen6a21cc52018-12-09 11:24:13 -0700139 */
140struct notification {
141 struct semaphore request;
142 u64 next_id;
143 struct list_head notifications;
Tycho Andersen6a21cc52018-12-09 11:24:13 -0700144};
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500145
YiFei Zhuf9d480b2020-10-11 10:47:42 -0500146#ifdef SECCOMP_ARCH_NATIVE
147/**
148 * struct action_cache - per-filter cache of seccomp actions per
149 * arch/syscall pair
150 *
151 * @allow_native: A bitmap where each bit represents whether the
152 * filter will always allow the syscall, for the
153 * native architecture.
154 * @allow_compat: A bitmap where each bit represents whether the
155 * filter will always allow the syscall, for the
156 * compat architecture.
157 */
158struct action_cache {
159 DECLARE_BITMAP(allow_native, SECCOMP_ARCH_NATIVE_NR);
160#ifdef SECCOMP_ARCH_COMPAT
161 DECLARE_BITMAP(allow_compat, SECCOMP_ARCH_COMPAT_NR);
162#endif
163};
164#else
165struct action_cache { };
166
167static inline bool seccomp_cache_check_allow(const struct seccomp_filter *sfilter,
168 const struct seccomp_data *sd)
169{
170 return false;
171}
YiFei Zhu8e01b512020-10-11 10:47:43 -0500172
173static inline void seccomp_cache_prepare(struct seccomp_filter *sfilter)
174{
175}
YiFei Zhuf9d480b2020-10-11 10:47:42 -0500176#endif /* SECCOMP_ARCH_NATIVE */
177
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500178/**
179 * struct seccomp_filter - container for seccomp BPF programs
180 *
Christian Braunerb707dde2020-05-31 13:50:28 +0200181 * @refs: Reference count to manage the object lifetime.
182 * A filter's reference count is incremented for each directly
183 * attached task, once for the dependent filter, and if
184 * requested for the user notifier. When @refs reaches zero,
185 * the filter can be freed.
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200186 * @users: A filter's @users count is incremented for each directly
187 * attached task (filter installation, fork(), thread_sync),
188 * and once for the dependent filter (tracked in filter->prev).
189 * When it reaches zero it indicates that no direct or indirect
190 * users of that filter exist. No new tasks can get associated with
191 * this filter after reaching 0. The @users count is always smaller
192 * or equal to @refs. Hence, reaching 0 for @users does not mean
193 * the filter can be freed.
YiFei Zhu8e01b512020-10-11 10:47:43 -0500194 * @cache: cache of arch/syscall mappings to actions
Tyler Hickse66a3992017-08-11 04:33:56 +0000195 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500196 * @prev: points to a previously installed, or inherited, filter
Mickaël Salaün285fdfc2016-09-20 19:39:47 +0200197 * @prog: the BPF program to evaluate
Tycho Andersen6a21cc52018-12-09 11:24:13 -0700198 * @notif: the struct that holds all notification related information
199 * @notify_lock: A lock for all notification-related accesses.
Christian Brauner76194c42020-06-01 11:50:07 -0700200 * @wqh: A wait queue for poll if a notifier is in use.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500201 *
202 * seccomp_filter objects are organized in a tree linked via the @prev
203 * pointer. For any task, it appears to be a singly-linked list starting
204 * with current->seccomp.filter, the most recently attached or inherited filter.
205 * However, multiple filters may share a @prev node, by way of fork(), which
206 * results in a unidirectional tree existing in memory. This is similar to
207 * how namespaces work.
208 *
209 * seccomp_filter objects should never be modified after being attached
Christian Braunerb707dde2020-05-31 13:50:28 +0200210 * to a task_struct (other than @refs).
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500211 */
212struct seccomp_filter {
Christian Braunerb707dde2020-05-31 13:50:28 +0200213 refcount_t refs;
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200214 refcount_t users;
Tyler Hickse66a3992017-08-11 04:33:56 +0000215 bool log;
YiFei Zhu8e01b512020-10-11 10:47:43 -0500216 struct action_cache cache;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500217 struct seccomp_filter *prev;
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -0700218 struct bpf_prog *prog;
Tycho Andersen6a21cc52018-12-09 11:24:13 -0700219 struct notification *notif;
220 struct mutex notify_lock;
Christian Brauner76194c42020-06-01 11:50:07 -0700221 wait_queue_head_t wqh;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500222};
223
224/* Limit any path through the tree to 256KB worth of instructions. */
225#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
226
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100227/*
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500228 * Endianness is explicitly ignored and left for BPF program authors to manage
229 * as per the specific architecture.
230 */
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100231static void populate_seccomp_data(struct seccomp_data *sd)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500232{
Denis Efremov2d9ca262020-08-24 15:59:21 +0300233 /*
234 * Instead of using current_pt_reg(), we're already doing the work
235 * to safely fetch "current", so just use "task" everywhere below.
236 */
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100237 struct task_struct *task = current;
238 struct pt_regs *regs = task_pt_regs(task);
Daniel Borkmann2eac7642014-04-14 21:02:59 +0200239 unsigned long args[6];
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500240
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100241 sd->nr = syscall_get_nr(task, regs);
Dmitry V. Levin16add412019-03-18 02:30:18 +0300242 sd->arch = syscall_get_arch(task);
Steven Rostedt (Red Hat)b35f5492016-11-07 16:26:37 -0500243 syscall_get_arguments(task, regs, args);
Daniel Borkmann2eac7642014-04-14 21:02:59 +0200244 sd->args[0] = args[0];
245 sd->args[1] = args[1];
246 sd->args[2] = args[2];
247 sd->args[3] = args[3];
248 sd->args[4] = args[4];
249 sd->args[5] = args[5];
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100250 sd->instruction_pointer = KSTK_EIP(task);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500251}
252
253/**
254 * seccomp_check_filter - verify seccomp filter code
255 * @filter: filter to verify
256 * @flen: length of filter
257 *
Alexei Starovoitov4df95ff2014-07-30 20:34:14 -0700258 * Takes a previously checked filter (by bpf_check_classic) and
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500259 * redirects all filter code that loads struct sk_buff data
260 * and related data through seccomp_bpf_load. It also
261 * enforces length and alignment checking of those loads.
262 *
263 * Returns 0 if the rule set is legal or -EINVAL if not.
264 */
265static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
266{
267 int pc;
268 for (pc = 0; pc < flen; pc++) {
269 struct sock_filter *ftest = &filter[pc];
270 u16 code = ftest->code;
271 u32 k = ftest->k;
272
273 switch (code) {
Daniel Borkmann34805932014-05-29 10:22:50 +0200274 case BPF_LD | BPF_W | BPF_ABS:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100275 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500276 /* 32-bit aligned and not out of bounds. */
277 if (k >= sizeof(struct seccomp_data) || k & 3)
278 return -EINVAL;
279 continue;
Daniel Borkmann34805932014-05-29 10:22:50 +0200280 case BPF_LD | BPF_W | BPF_LEN:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100281 ftest->code = BPF_LD | BPF_IMM;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500282 ftest->k = sizeof(struct seccomp_data);
283 continue;
Daniel Borkmann34805932014-05-29 10:22:50 +0200284 case BPF_LDX | BPF_W | BPF_LEN:
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100285 ftest->code = BPF_LDX | BPF_IMM;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500286 ftest->k = sizeof(struct seccomp_data);
287 continue;
288 /* Explicitly include allowed calls. */
Daniel Borkmann34805932014-05-29 10:22:50 +0200289 case BPF_RET | BPF_K:
290 case BPF_RET | BPF_A:
291 case BPF_ALU | BPF_ADD | BPF_K:
292 case BPF_ALU | BPF_ADD | BPF_X:
293 case BPF_ALU | BPF_SUB | BPF_K:
294 case BPF_ALU | BPF_SUB | BPF_X:
295 case BPF_ALU | BPF_MUL | BPF_K:
296 case BPF_ALU | BPF_MUL | BPF_X:
297 case BPF_ALU | BPF_DIV | BPF_K:
298 case BPF_ALU | BPF_DIV | BPF_X:
299 case BPF_ALU | BPF_AND | BPF_K:
300 case BPF_ALU | BPF_AND | BPF_X:
301 case BPF_ALU | BPF_OR | BPF_K:
302 case BPF_ALU | BPF_OR | BPF_X:
303 case BPF_ALU | BPF_XOR | BPF_K:
304 case BPF_ALU | BPF_XOR | BPF_X:
305 case BPF_ALU | BPF_LSH | BPF_K:
306 case BPF_ALU | BPF_LSH | BPF_X:
307 case BPF_ALU | BPF_RSH | BPF_K:
308 case BPF_ALU | BPF_RSH | BPF_X:
309 case BPF_ALU | BPF_NEG:
310 case BPF_LD | BPF_IMM:
311 case BPF_LDX | BPF_IMM:
312 case BPF_MISC | BPF_TAX:
313 case BPF_MISC | BPF_TXA:
314 case BPF_LD | BPF_MEM:
315 case BPF_LDX | BPF_MEM:
316 case BPF_ST:
317 case BPF_STX:
318 case BPF_JMP | BPF_JA:
319 case BPF_JMP | BPF_JEQ | BPF_K:
320 case BPF_JMP | BPF_JEQ | BPF_X:
321 case BPF_JMP | BPF_JGE | BPF_K:
322 case BPF_JMP | BPF_JGE | BPF_X:
323 case BPF_JMP | BPF_JGT | BPF_K:
324 case BPF_JMP | BPF_JGT | BPF_X:
325 case BPF_JMP | BPF_JSET | BPF_K:
326 case BPF_JMP | BPF_JSET | BPF_X:
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500327 continue;
328 default:
329 return -EINVAL;
330 }
331 }
332 return 0;
333}
334
YiFei Zhuf9d480b2020-10-11 10:47:42 -0500335#ifdef SECCOMP_ARCH_NATIVE
336static inline bool seccomp_cache_check_allow_bitmap(const void *bitmap,
337 size_t bitmap_size,
338 int syscall_nr)
339{
340 if (unlikely(syscall_nr < 0 || syscall_nr >= bitmap_size))
341 return false;
342 syscall_nr = array_index_nospec(syscall_nr, bitmap_size);
343
344 return test_bit(syscall_nr, bitmap);
345}
346
347/**
348 * seccomp_cache_check_allow - lookup seccomp cache
349 * @sfilter: The seccomp filter
350 * @sd: The seccomp data to lookup the cache with
351 *
352 * Returns true if the seccomp_data is cached and allowed.
353 */
354static inline bool seccomp_cache_check_allow(const struct seccomp_filter *sfilter,
355 const struct seccomp_data *sd)
356{
357 int syscall_nr = sd->nr;
358 const struct action_cache *cache = &sfilter->cache;
359
360#ifndef SECCOMP_ARCH_COMPAT
361 /* A native-only architecture doesn't need to check sd->arch. */
362 return seccomp_cache_check_allow_bitmap(cache->allow_native,
363 SECCOMP_ARCH_NATIVE_NR,
364 syscall_nr);
365#else
366 if (likely(sd->arch == SECCOMP_ARCH_NATIVE))
367 return seccomp_cache_check_allow_bitmap(cache->allow_native,
368 SECCOMP_ARCH_NATIVE_NR,
369 syscall_nr);
370 if (likely(sd->arch == SECCOMP_ARCH_COMPAT))
371 return seccomp_cache_check_allow_bitmap(cache->allow_compat,
372 SECCOMP_ARCH_COMPAT_NR,
373 syscall_nr);
374#endif /* SECCOMP_ARCH_COMPAT */
375
376 WARN_ON_ONCE(true);
377 return false;
378}
379#endif /* SECCOMP_ARCH_NATIVE */
380
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500381/**
Mickaël Salaün285fdfc2016-09-20 19:39:47 +0200382 * seccomp_run_filters - evaluates all seccomp filters against @sd
383 * @sd: optional seccomp data to be passed to filters
Kees Cookdeb4de82017-08-02 15:00:40 -0700384 * @match: stores struct seccomp_filter that resulted in the return value,
385 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
386 * be unchanged.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500387 *
388 * Returns valid seccomp BPF response codes.
389 */
Kees Cook0466bdb2017-08-11 13:12:11 -0700390#define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
Kees Cookdeb4de82017-08-02 15:00:40 -0700391static u32 seccomp_run_filters(const struct seccomp_data *sd,
392 struct seccomp_filter **match)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500393{
Will Drewryacf3b2c2012-04-12 16:47:59 -0500394 u32 ret = SECCOMP_RET_ALLOW;
Pranith Kumar8225d382014-11-21 10:06:01 -0500395 /* Make sure cross-thread synced filter points somewhere sane. */
396 struct seccomp_filter *f =
Will Deacon506458e2017-10-24 11:22:48 +0100397 READ_ONCE(current->seccomp.filter);
Will Drewryacf3b2c2012-04-12 16:47:59 -0500398
399 /* Ensure unexpected behavior doesn't result in failing open. */
Igor Stoppa0d42d732018-09-05 23:34:43 +0300400 if (WARN_ON(f == NULL))
Kees Cook4d3b0b02017-08-11 13:01:39 -0700401 return SECCOMP_RET_KILL_PROCESS;
Will Drewryacf3b2c2012-04-12 16:47:59 -0500402
YiFei Zhuf9d480b2020-10-11 10:47:42 -0500403 if (seccomp_cache_check_allow(f, sd))
404 return SECCOMP_RET_ALLOW;
405
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500406 /*
407 * All filters in the list are evaluated and the lowest BPF return
Will Drewryacf3b2c2012-04-12 16:47:59 -0500408 * value always takes priority (ignoring the DATA).
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500409 */
Kees Cook3ba25302014-06-27 15:01:35 -0700410 for (; f; f = f->prev) {
David Miller3d9f773c2020-02-24 15:01:43 +0100411 u32 cur_ret = bpf_prog_run_pin_on_cpu(f->prog, sd);
Alexei Starovoitov8f577ca2014-05-13 19:50:47 -0700412
Kees Cook0466bdb2017-08-11 13:12:11 -0700413 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
Will Drewryacf3b2c2012-04-12 16:47:59 -0500414 ret = cur_ret;
Kees Cookdeb4de82017-08-02 15:00:40 -0700415 *match = f;
416 }
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500417 }
418 return ret;
419}
Kees Cook1f41b4502014-06-25 15:38:02 -0700420#endif /* CONFIG_SECCOMP_FILTER */
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500421
Kees Cook1f41b4502014-06-25 15:38:02 -0700422static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
423{
Guenter Roeck69f6a342014-08-10 20:50:30 -0700424 assert_spin_locked(&current->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700425
Kees Cook1f41b4502014-06-25 15:38:02 -0700426 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
427 return false;
428
429 return true;
430}
431
Thomas Gleixner8bf37d82018-05-04 15:12:06 +0200432void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
Kees Cook5c307082018-05-01 15:07:31 -0700433
Kees Cook3ba25302014-06-27 15:01:35 -0700434static inline void seccomp_assign_mode(struct task_struct *task,
Kees Cook00a02d02018-05-03 14:56:12 -0700435 unsigned long seccomp_mode,
436 unsigned long flags)
Kees Cook1f41b4502014-06-25 15:38:02 -0700437{
Guenter Roeck69f6a342014-08-10 20:50:30 -0700438 assert_spin_locked(&task->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700439
Kees Cook3ba25302014-06-27 15:01:35 -0700440 task->seccomp.mode = seccomp_mode;
441 /*
Gabriel Krisman Bertazi23d67a52020-11-16 12:42:00 -0500442 * Make sure SYSCALL_WORK_SECCOMP cannot be set before the mode (and
Kees Cook3ba25302014-06-27 15:01:35 -0700443 * filter) is set.
444 */
445 smp_mb__before_atomic();
Kees Cook00a02d02018-05-03 14:56:12 -0700446 /* Assume default seccomp processes want spec flaw mitigation. */
447 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
Thomas Gleixner8bf37d82018-05-04 15:12:06 +0200448 arch_seccomp_spec_mitigate(task);
Gabriel Krisman Bertazi23d67a52020-11-16 12:42:00 -0500449 set_task_syscall_work(task, SECCOMP);
Kees Cook1f41b4502014-06-25 15:38:02 -0700450}
451
452#ifdef CONFIG_SECCOMP_FILTER
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700453/* Returns 1 if the parent is an ancestor of the child. */
454static int is_ancestor(struct seccomp_filter *parent,
455 struct seccomp_filter *child)
456{
457 /* NULL is the root ancestor. */
458 if (parent == NULL)
459 return 1;
460 for (; child; child = child->prev)
461 if (child == parent)
462 return 1;
463 return 0;
464}
465
466/**
467 * seccomp_can_sync_threads: checks if all threads can be synchronized
468 *
469 * Expects sighand and cred_guard_mutex locks to be held.
470 *
471 * Returns 0 on success, -ve on error, or the pid of a thread which was
Tycho Andersen6beff002019-03-06 13:14:12 -0700472 * either not in the correct seccomp mode or did not have an ancestral
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700473 * seccomp filter.
474 */
475static inline pid_t seccomp_can_sync_threads(void)
476{
477 struct task_struct *thread, *caller;
478
479 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
Guenter Roeck69f6a342014-08-10 20:50:30 -0700480 assert_spin_locked(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700481
482 /* Validate all threads being eligible for synchronization. */
483 caller = current;
484 for_each_thread(caller, thread) {
485 pid_t failed;
486
487 /* Skip current, since it is initiating the sync. */
488 if (thread == caller)
489 continue;
490
491 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
492 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
493 is_ancestor(thread->seccomp.filter,
494 caller->seccomp.filter)))
495 continue;
496
497 /* Return the first thread that cannot be synchronized. */
498 failed = task_pid_vnr(thread);
499 /* If the pid cannot be resolved, then return -ESRCH */
Igor Stoppa0d42d732018-09-05 23:34:43 +0300500 if (WARN_ON(failed == 0))
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700501 failed = -ESRCH;
502 return failed;
503 }
504
505 return 0;
506}
507
Christian Brauner3a15fb62020-05-31 13:50:29 +0200508static inline void seccomp_filter_free(struct seccomp_filter *filter)
509{
510 if (filter) {
511 bpf_prog_destroy(filter->prog);
512 kfree(filter);
513 }
514}
515
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200516static void __seccomp_filter_orphan(struct seccomp_filter *orig)
517{
518 while (orig && refcount_dec_and_test(&orig->users)) {
519 if (waitqueue_active(&orig->wqh))
520 wake_up_poll(&orig->wqh, EPOLLHUP);
521 orig = orig->prev;
522 }
523}
524
Christian Brauner3a15fb62020-05-31 13:50:29 +0200525static void __put_seccomp_filter(struct seccomp_filter *orig)
526{
527 /* Clean up single-reference branches iteratively. */
528 while (orig && refcount_dec_and_test(&orig->refs)) {
529 struct seccomp_filter *freeme = orig;
530 orig = orig->prev;
531 seccomp_filter_free(freeme);
532 }
533}
534
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200535static void __seccomp_filter_release(struct seccomp_filter *orig)
536{
537 /* Notify about any unused filters in the task's former filter tree. */
538 __seccomp_filter_orphan(orig);
539 /* Finally drop all references to the task's former tree. */
540 __put_seccomp_filter(orig);
541}
542
Christian Brauner3a15fb62020-05-31 13:50:29 +0200543/**
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200544 * seccomp_filter_release - Detach the task from its filter tree,
545 * drop its reference count, and notify
546 * about unused filters
Christian Brauner3a15fb62020-05-31 13:50:29 +0200547 *
548 * This function should only be called when the task is exiting as
549 * it detaches it from its filter tree. As such, READ_ONCE() and
550 * barriers are not needed here, as would normally be needed.
551 */
552void seccomp_filter_release(struct task_struct *tsk)
553{
554 struct seccomp_filter *orig = tsk->seccomp.filter;
555
YiFei Zhu0d8315d2020-11-11 07:33:54 -0600556 /* We are effectively holding the siglock by not having any sighand. */
557 WARN_ON(tsk->sighand != NULL);
558
Christian Brauner3a15fb62020-05-31 13:50:29 +0200559 /* Detach task from its filter tree. */
560 tsk->seccomp.filter = NULL;
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200561 __seccomp_filter_release(orig);
Christian Brauner3a15fb62020-05-31 13:50:29 +0200562}
563
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700564/**
565 * seccomp_sync_threads: sets all threads to use current's filter
566 *
567 * Expects sighand and cred_guard_mutex locks to be held, and for
568 * seccomp_can_sync_threads() to have returned success already
569 * without dropping the locks.
570 *
571 */
Kees Cook00a02d02018-05-03 14:56:12 -0700572static inline void seccomp_sync_threads(unsigned long flags)
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700573{
574 struct task_struct *thread, *caller;
575
576 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
Guenter Roeck69f6a342014-08-10 20:50:30 -0700577 assert_spin_locked(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700578
579 /* Synchronize all threads. */
580 caller = current;
581 for_each_thread(caller, thread) {
582 /* Skip current, since it needs no changes. */
583 if (thread == caller)
584 continue;
585
586 /* Get a task reference for the new leaf node. */
587 get_seccomp_filter(caller);
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200588
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700589 /*
590 * Drop the task reference to the shared ancestor since
591 * current's path will hold a reference. (This also
592 * allows a put before the assignment.)
593 */
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200594 __seccomp_filter_release(thread->seccomp.filter);
595
596 /* Make our new filter tree visible. */
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700597 smp_store_release(&thread->seccomp.filter,
598 caller->seccomp.filter);
Kees Cookc818c032020-05-13 14:11:26 -0700599 atomic_set(&thread->seccomp.filter_count,
600 atomic_read(&thread->seccomp.filter_count));
Jann Horn103502a2015-12-26 06:00:48 +0100601
602 /*
603 * Don't let an unprivileged task work around
604 * the no_new_privs restriction by creating
605 * a thread that sets it up, enters seccomp,
606 * then dies.
607 */
608 if (task_no_new_privs(caller))
609 task_set_no_new_privs(thread);
610
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700611 /*
612 * Opt the other thread into seccomp if needed.
613 * As threads are considered to be trust-realm
614 * equivalent (see ptrace_may_access), it is safe to
615 * allow one thread to transition the other.
616 */
Jann Horn103502a2015-12-26 06:00:48 +0100617 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
Kees Cook00a02d02018-05-03 14:56:12 -0700618 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
619 flags);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700620 }
621}
622
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500623/**
Kees Cookc8bee432014-06-27 15:16:33 -0700624 * seccomp_prepare_filter: Prepares a seccomp filter for use.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500625 * @fprog: BPF program to install
626 *
Kees Cookc8bee432014-06-27 15:16:33 -0700627 * Returns filter on success or an ERR_PTR on failure.
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500628 */
Kees Cookc8bee432014-06-27 15:16:33 -0700629static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500630{
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200631 struct seccomp_filter *sfilter;
632 int ret;
YiFei Zhu8e01b512020-10-11 10:47:43 -0500633 const bool save_orig =
634#if defined(CONFIG_CHECKPOINT_RESTORE) || defined(SECCOMP_ARCH_NATIVE)
635 true;
636#else
637 false;
638#endif
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500639
640 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
Kees Cookc8bee432014-06-27 15:16:33 -0700641 return ERR_PTR(-EINVAL);
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200642
Kees Cookc8bee432014-06-27 15:16:33 -0700643 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500644
645 /*
Fabian Frederick119ce5c2014-06-06 14:37:53 -0700646 * Installing a seccomp filter requires that the task has
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500647 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
648 * This avoids scenarios where unprivileged tasks can affect the
649 * behavior of privileged children.
650 */
Kees Cook1d4457f2014-05-21 15:23:46 -0700651 if (!task_no_new_privs(current) &&
Mickaël Salaünfb145282020-10-30 13:38:49 +0100652 !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
Kees Cookc8bee432014-06-27 15:16:33 -0700653 return ERR_PTR(-EACCES);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500654
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100655 /* Allocate a new seccomp_filter */
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200656 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
657 if (!sfilter)
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200658 return ERR_PTR(-ENOMEM);
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200659
Tycho Andersen6a21cc52018-12-09 11:24:13 -0700660 mutex_init(&sfilter->notify_lock);
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200661 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
Tycho Andersenf8e529e2015-10-27 09:23:59 +0900662 seccomp_check_filter, save_orig);
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200663 if (ret < 0) {
664 kfree(sfilter);
665 return ERR_PTR(ret);
Nicolas Schichand9e12f42015-05-06 16:12:28 +0200666 }
Alexei Starovoitovbd4cf0e2014-03-28 18:58:25 +0100667
Christian Braunerb707dde2020-05-31 13:50:28 +0200668 refcount_set(&sfilter->refs, 1);
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200669 refcount_set(&sfilter->users, 1);
Christian Brauner76194c42020-06-01 11:50:07 -0700670 init_waitqueue_head(&sfilter->wqh);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500671
Daniel Borkmannac67eb22015-05-06 16:12:30 +0200672 return sfilter;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500673}
674
675/**
Kees Cookc8bee432014-06-27 15:16:33 -0700676 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500677 * @user_filter: pointer to the user data containing a sock_fprog.
678 *
679 * Returns 0 on success and non-zero otherwise.
680 */
Kees Cookc8bee432014-06-27 15:16:33 -0700681static struct seccomp_filter *
682seccomp_prepare_user_filter(const char __user *user_filter)
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500683{
684 struct sock_fprog fprog;
Kees Cookc8bee432014-06-27 15:16:33 -0700685 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500686
687#ifdef CONFIG_COMPAT
Andy Lutomirski5c380652016-03-22 14:24:52 -0700688 if (in_compat_syscall()) {
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500689 struct compat_sock_fprog fprog32;
690 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
691 goto out;
692 fprog.len = fprog32.len;
693 fprog.filter = compat_ptr(fprog32.filter);
694 } else /* falls through to the if below. */
695#endif
696 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
697 goto out;
Kees Cookc8bee432014-06-27 15:16:33 -0700698 filter = seccomp_prepare_filter(&fprog);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500699out:
Kees Cookc8bee432014-06-27 15:16:33 -0700700 return filter;
701}
702
YiFei Zhu8e01b512020-10-11 10:47:43 -0500703#ifdef SECCOMP_ARCH_NATIVE
704/**
705 * seccomp_is_const_allow - check if filter is constant allow with given data
706 * @fprog: The BPF programs
707 * @sd: The seccomp data to check against, only syscall number and arch
708 * number are considered constant.
709 */
710static bool seccomp_is_const_allow(struct sock_fprog_kern *fprog,
711 struct seccomp_data *sd)
712{
713 unsigned int reg_value = 0;
714 unsigned int pc;
715 bool op_res;
716
717 if (WARN_ON_ONCE(!fprog))
718 return false;
719
720 for (pc = 0; pc < fprog->len; pc++) {
721 struct sock_filter *insn = &fprog->filter[pc];
722 u16 code = insn->code;
723 u32 k = insn->k;
724
725 switch (code) {
726 case BPF_LD | BPF_W | BPF_ABS:
727 switch (k) {
728 case offsetof(struct seccomp_data, nr):
729 reg_value = sd->nr;
730 break;
731 case offsetof(struct seccomp_data, arch):
732 reg_value = sd->arch;
733 break;
734 default:
735 /* can't optimize (non-constant value load) */
736 return false;
737 }
738 break;
739 case BPF_RET | BPF_K:
740 /* reached return with constant values only, check allow */
741 return k == SECCOMP_RET_ALLOW;
742 case BPF_JMP | BPF_JA:
743 pc += insn->k;
744 break;
745 case BPF_JMP | BPF_JEQ | BPF_K:
746 case BPF_JMP | BPF_JGE | BPF_K:
747 case BPF_JMP | BPF_JGT | BPF_K:
748 case BPF_JMP | BPF_JSET | BPF_K:
749 switch (BPF_OP(code)) {
750 case BPF_JEQ:
751 op_res = reg_value == k;
752 break;
753 case BPF_JGE:
754 op_res = reg_value >= k;
755 break;
756 case BPF_JGT:
757 op_res = reg_value > k;
758 break;
759 case BPF_JSET:
760 op_res = !!(reg_value & k);
761 break;
762 default:
763 /* can't optimize (unknown jump) */
764 return false;
765 }
766
767 pc += op_res ? insn->jt : insn->jf;
768 break;
769 case BPF_ALU | BPF_AND | BPF_K:
770 reg_value &= k;
771 break;
772 default:
773 /* can't optimize (unknown insn) */
774 return false;
775 }
776 }
777
778 /* ran off the end of the filter?! */
779 WARN_ON(1);
780 return false;
781}
782
783static void seccomp_cache_prepare_bitmap(struct seccomp_filter *sfilter,
784 void *bitmap, const void *bitmap_prev,
785 size_t bitmap_size, int arch)
786{
787 struct sock_fprog_kern *fprog = sfilter->prog->orig_prog;
788 struct seccomp_data sd;
789 int nr;
790
791 if (bitmap_prev) {
792 /* The new filter must be as restrictive as the last. */
793 bitmap_copy(bitmap, bitmap_prev, bitmap_size);
794 } else {
795 /* Before any filters, all syscalls are always allowed. */
796 bitmap_fill(bitmap, bitmap_size);
797 }
798
799 for (nr = 0; nr < bitmap_size; nr++) {
800 /* No bitmap change: not a cacheable action. */
801 if (!test_bit(nr, bitmap))
802 continue;
803
804 sd.nr = nr;
805 sd.arch = arch;
806
807 /* No bitmap change: continue to always allow. */
808 if (seccomp_is_const_allow(fprog, &sd))
809 continue;
810
811 /*
812 * Not a cacheable action: always run filters.
813 * atomic clear_bit() not needed, filter not visible yet.
814 */
815 __clear_bit(nr, bitmap);
816 }
817}
818
819/**
820 * seccomp_cache_prepare - emulate the filter to find cachable syscalls
821 * @sfilter: The seccomp filter
822 *
823 * Returns 0 if successful or -errno if error occurred.
824 */
825static void seccomp_cache_prepare(struct seccomp_filter *sfilter)
826{
827 struct action_cache *cache = &sfilter->cache;
828 const struct action_cache *cache_prev =
829 sfilter->prev ? &sfilter->prev->cache : NULL;
830
831 seccomp_cache_prepare_bitmap(sfilter, cache->allow_native,
832 cache_prev ? cache_prev->allow_native : NULL,
833 SECCOMP_ARCH_NATIVE_NR,
834 SECCOMP_ARCH_NATIVE);
835
836#ifdef SECCOMP_ARCH_COMPAT
837 seccomp_cache_prepare_bitmap(sfilter, cache->allow_compat,
838 cache_prev ? cache_prev->allow_compat : NULL,
839 SECCOMP_ARCH_COMPAT_NR,
840 SECCOMP_ARCH_COMPAT);
841#endif /* SECCOMP_ARCH_COMPAT */
842}
843#endif /* SECCOMP_ARCH_NATIVE */
844
Kees Cookc8bee432014-06-27 15:16:33 -0700845/**
846 * seccomp_attach_filter: validate and attach filter
847 * @flags: flags to change filter behavior
848 * @filter: seccomp filter to add to the current process
849 *
Kees Cookdbd952122014-06-27 15:18:48 -0700850 * Caller must be holding current->sighand->siglock lock.
851 *
Tycho Andersen7a0df7f2019-03-06 13:14:13 -0700852 * Returns 0 on success, -ve on error, or
853 * - in TSYNC mode: the pid of a thread which was either not in the correct
854 * seccomp mode or did not have an ancestral seccomp filter
855 * - in NEW_LISTENER mode: the fd of the new listener
Kees Cookc8bee432014-06-27 15:16:33 -0700856 */
857static long seccomp_attach_filter(unsigned int flags,
858 struct seccomp_filter *filter)
859{
860 unsigned long total_insns;
861 struct seccomp_filter *walker;
862
Guenter Roeck69f6a342014-08-10 20:50:30 -0700863 assert_spin_locked(&current->sighand->siglock);
Kees Cookdbd952122014-06-27 15:18:48 -0700864
Kees Cookc8bee432014-06-27 15:16:33 -0700865 /* Validate resulting filter length. */
866 total_insns = filter->prog->len;
867 for (walker = current->seccomp.filter; walker; walker = walker->prev)
868 total_insns += walker->prog->len + 4; /* 4 instr penalty */
869 if (total_insns > MAX_INSNS_PER_PATH)
870 return -ENOMEM;
871
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700872 /* If thread sync has been requested, check that it is possible. */
873 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
874 int ret;
875
876 ret = seccomp_can_sync_threads();
Tycho Andersen51891492020-03-04 11:05:17 -0700877 if (ret) {
878 if (flags & SECCOMP_FILTER_FLAG_TSYNC_ESRCH)
879 return -ESRCH;
880 else
881 return ret;
882 }
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700883 }
884
Tyler Hickse66a3992017-08-11 04:33:56 +0000885 /* Set log flag, if present. */
886 if (flags & SECCOMP_FILTER_FLAG_LOG)
887 filter->log = true;
888
Kees Cookc8bee432014-06-27 15:16:33 -0700889 /*
890 * If there is an existing filter, make it the prev and don't drop its
891 * task reference.
892 */
893 filter->prev = current->seccomp.filter;
YiFei Zhu8e01b512020-10-11 10:47:43 -0500894 seccomp_cache_prepare(filter);
Kees Cookc8bee432014-06-27 15:16:33 -0700895 current->seccomp.filter = filter;
Kees Cookc818c032020-05-13 14:11:26 -0700896 atomic_inc(&current->seccomp.filter_count);
Kees Cookc8bee432014-06-27 15:16:33 -0700897
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700898 /* Now that the new filter is in place, synchronize to all threads. */
899 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
Kees Cook00a02d02018-05-03 14:56:12 -0700900 seccomp_sync_threads(flags);
Kees Cookc2e1f2e2014-06-05 00:23:17 -0700901
Kees Cookc8bee432014-06-27 15:16:33 -0700902 return 0;
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500903}
904
Colin Ian King084f5602017-09-29 14:26:48 +0100905static void __get_seccomp_filter(struct seccomp_filter *filter)
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600906{
Christian Braunerb707dde2020-05-31 13:50:28 +0200907 refcount_inc(&filter->refs);
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600908}
909
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500910/* get_seccomp_filter - increments the reference count of the filter on @tsk */
911void get_seccomp_filter(struct task_struct *tsk)
912{
913 struct seccomp_filter *orig = tsk->seccomp.filter;
914 if (!orig)
915 return;
Oleg Nesterov66a733e2017-09-27 09:25:30 -0600916 __get_seccomp_filter(orig);
Christian Brauner99cdb8b2020-05-31 13:50:30 +0200917 refcount_inc(&orig->users);
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500918}
919
Eric W. Biedermanae7795b2018-09-25 11:27:20 +0200920static void seccomp_init_siginfo(kernel_siginfo_t *info, int syscall, int reason)
Mike Frysingerb25e6712017-01-19 22:28:57 -0600921{
Eric W. Biederman3b10db2b2017-08-18 19:56:27 -0500922 clear_siginfo(info);
Mike Frysingerb25e6712017-01-19 22:28:57 -0600923 info->si_signo = SIGSYS;
924 info->si_code = SYS_SECCOMP;
925 info->si_call_addr = (void __user *)KSTK_EIP(current);
926 info->si_errno = reason;
Dmitry V. Levin16add412019-03-18 02:30:18 +0300927 info->si_arch = syscall_get_arch(current);
Mike Frysingerb25e6712017-01-19 22:28:57 -0600928 info->si_syscall = syscall;
929}
930
Will Drewrybb6ea432012-04-12 16:48:01 -0500931/**
932 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
933 * @syscall: syscall number to send to userland
934 * @reason: filter-supplied reason code to send to userland (via si_errno)
935 *
936 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
937 */
938static void seccomp_send_sigsys(int syscall, int reason)
939{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +0200940 struct kernel_siginfo info;
Mike Frysingerb25e6712017-01-19 22:28:57 -0600941 seccomp_init_siginfo(&info, syscall, reason);
Eric W. Biedermana89e9b82019-05-15 10:11:09 -0500942 force_sig_info(&info);
Will Drewrybb6ea432012-04-12 16:48:01 -0500943}
Will Drewrye2cfabdf2012-04-12 16:47:57 -0500944#endif /* CONFIG_SECCOMP_FILTER */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000946/* For use with seccomp_actions_logged */
Kees Cook4d3b0b02017-08-11 13:01:39 -0700947#define SECCOMP_LOG_KILL_PROCESS (1 << 0)
948#define SECCOMP_LOG_KILL_THREAD (1 << 1)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000949#define SECCOMP_LOG_TRAP (1 << 2)
950#define SECCOMP_LOG_ERRNO (1 << 3)
951#define SECCOMP_LOG_TRACE (1 << 4)
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000952#define SECCOMP_LOG_LOG (1 << 5)
953#define SECCOMP_LOG_ALLOW (1 << 6)
Tycho Andersen6a21cc52018-12-09 11:24:13 -0700954#define SECCOMP_LOG_USER_NOTIF (1 << 7)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000955
Kees Cook4d3b0b02017-08-11 13:01:39 -0700956static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
957 SECCOMP_LOG_KILL_THREAD |
Kees Cookfd768752017-08-11 12:53:18 -0700958 SECCOMP_LOG_TRAP |
959 SECCOMP_LOG_ERRNO |
Tycho Andersen6a21cc52018-12-09 11:24:13 -0700960 SECCOMP_LOG_USER_NOTIF |
Kees Cookfd768752017-08-11 12:53:18 -0700961 SECCOMP_LOG_TRACE |
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000962 SECCOMP_LOG_LOG;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000963
Tyler Hickse66a3992017-08-11 04:33:56 +0000964static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
965 bool requested)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000966{
967 bool log = false;
968
969 switch (action) {
970 case SECCOMP_RET_ALLOW:
Tyler Hickse66a3992017-08-11 04:33:56 +0000971 break;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000972 case SECCOMP_RET_TRAP:
Tyler Hickse66a3992017-08-11 04:33:56 +0000973 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
974 break;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000975 case SECCOMP_RET_ERRNO:
Tyler Hickse66a3992017-08-11 04:33:56 +0000976 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
977 break;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000978 case SECCOMP_RET_TRACE:
Tyler Hickse66a3992017-08-11 04:33:56 +0000979 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000980 break;
Tycho Andersen6a21cc52018-12-09 11:24:13 -0700981 case SECCOMP_RET_USER_NOTIF:
982 log = requested && seccomp_actions_logged & SECCOMP_LOG_USER_NOTIF;
983 break;
Tyler Hicks59f5cf42017-08-11 04:33:57 +0000984 case SECCOMP_RET_LOG:
985 log = seccomp_actions_logged & SECCOMP_LOG_LOG;
986 break;
Kees Cookfd768752017-08-11 12:53:18 -0700987 case SECCOMP_RET_KILL_THREAD:
Kees Cookfd768752017-08-11 12:53:18 -0700988 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
Kees Cook4d3b0b02017-08-11 13:01:39 -0700989 break;
990 case SECCOMP_RET_KILL_PROCESS:
991 default:
992 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +0000993 }
994
995 /*
Tyler Hicks326bee02018-05-04 01:08:15 +0000996 * Emit an audit message when the action is RET_KILL_*, RET_LOG, or the
997 * FILTER_FLAG_LOG bit was set. The admin has the ability to silence
998 * any action from being logged by removing the action name from the
999 * seccomp_actions_logged sysctl.
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001000 */
Tyler Hicks326bee02018-05-04 01:08:15 +00001001 if (!log)
1002 return;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001003
Tyler Hicks326bee02018-05-04 01:08:15 +00001004 audit_seccomp(syscall, signr, action);
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00001005}
1006
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007/*
1008 * Secure computing mode 1 allows only read/write/exit/sigreturn.
1009 * To be fully secure this must be combined with rlimit
1010 * to limit the stack allocations too.
1011 */
Matt Redfearncb4253a2016-03-29 09:35:34 +01001012static const int mode1_syscalls[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
Kees Cookfe4bfff2020-06-19 12:20:15 -07001014 -1, /* negative terminated */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015};
1016
Andy Lutomirskia4412fc2014-07-21 18:49:14 -07001017static void __secure_computing_strict(int this_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018{
Kees Cookfe4bfff2020-06-19 12:20:15 -07001019 const int *allowed_syscalls = mode1_syscalls;
Andy Lutomirskia4412fc2014-07-21 18:49:14 -07001020#ifdef CONFIG_COMPAT
Andy Lutomirski5c380652016-03-22 14:24:52 -07001021 if (in_compat_syscall())
Kees Cookfe4bfff2020-06-19 12:20:15 -07001022 allowed_syscalls = get_compat_mode1_syscalls();
Andy Lutomirskia4412fc2014-07-21 18:49:14 -07001023#endif
1024 do {
Kees Cookfe4bfff2020-06-19 12:20:15 -07001025 if (*allowed_syscalls == this_syscall)
Andy Lutomirskia4412fc2014-07-21 18:49:14 -07001026 return;
Kees Cookfe4bfff2020-06-19 12:20:15 -07001027 } while (*++allowed_syscalls != -1);
Andy Lutomirskia4412fc2014-07-21 18:49:14 -07001028
1029#ifdef SECCOMP_DEBUG
1030 dump_stack();
1031#endif
Kees Cookfd768752017-08-11 12:53:18 -07001032 seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
Andy Lutomirskia4412fc2014-07-21 18:49:14 -07001033 do_exit(SIGKILL);
1034}
1035
1036#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
1037void secure_computing_strict(int this_syscall)
1038{
1039 int mode = current->seccomp.mode;
1040
Masahiro Yamada97f26452016-08-03 13:45:50 -07001041 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
Tycho Andersen13c4a902015-06-13 09:02:48 -06001042 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
1043 return;
1044
Kees Cook221272f2015-06-15 15:29:16 -07001045 if (mode == SECCOMP_MODE_DISABLED)
Andy Lutomirskia4412fc2014-07-21 18:49:14 -07001046 return;
1047 else if (mode == SECCOMP_MODE_STRICT)
1048 __secure_computing_strict(this_syscall);
1049 else
1050 BUG();
1051}
1052#else
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001053
1054#ifdef CONFIG_SECCOMP_FILTER
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001055static u64 seccomp_next_notify_id(struct seccomp_filter *filter)
1056{
1057 /*
1058 * Note: overflow is ok here, the id just needs to be unique per
1059 * filter.
1060 */
1061 lockdep_assert_held(&filter->notify_lock);
1062 return filter->notif->next_id++;
1063}
1064
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001065static void seccomp_handle_addfd(struct seccomp_kaddfd *addfd)
1066{
1067 /*
1068 * Remove the notification, and reset the list pointers, indicating
1069 * that it has been handled.
1070 */
1071 list_del_init(&addfd->list);
1072 addfd->ret = receive_fd_replace(addfd->fd, addfd->file, addfd->flags);
1073 complete(&addfd->completion);
1074}
1075
Christian Braunerfb3c53862019-09-20 10:30:05 +02001076static int seccomp_do_user_notification(int this_syscall,
1077 struct seccomp_filter *match,
1078 const struct seccomp_data *sd)
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001079{
1080 int err;
Christian Braunerfb3c53862019-09-20 10:30:05 +02001081 u32 flags = 0;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001082 long ret = 0;
1083 struct seccomp_knotif n = {};
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001084 struct seccomp_kaddfd *addfd, *tmp;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001085
1086 mutex_lock(&match->notify_lock);
1087 err = -ENOSYS;
1088 if (!match->notif)
1089 goto out;
1090
1091 n.task = current;
1092 n.state = SECCOMP_NOTIFY_INIT;
1093 n.data = sd;
1094 n.id = seccomp_next_notify_id(match);
1095 init_completion(&n.ready);
1096 list_add(&n.list, &match->notif->notifications);
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001097 INIT_LIST_HEAD(&n.addfd);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001098
1099 up(&match->notif->request);
Christian Brauner76194c42020-06-01 11:50:07 -07001100 wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001101 mutex_unlock(&match->notify_lock);
1102
1103 /*
1104 * This is where we wait for a reply from userspace.
1105 */
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001106wait:
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001107 err = wait_for_completion_interruptible(&n.ready);
1108 mutex_lock(&match->notify_lock);
1109 if (err == 0) {
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001110 /* Check if we were woken up by a addfd message */
1111 addfd = list_first_entry_or_null(&n.addfd,
1112 struct seccomp_kaddfd, list);
1113 if (addfd && n.state != SECCOMP_NOTIFY_REPLIED) {
1114 seccomp_handle_addfd(addfd);
1115 mutex_unlock(&match->notify_lock);
1116 goto wait;
1117 }
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001118 ret = n.val;
1119 err = n.error;
Christian Braunerfb3c53862019-09-20 10:30:05 +02001120 flags = n.flags;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001121 }
1122
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001123 /* If there were any pending addfd calls, clear them out */
1124 list_for_each_entry_safe(addfd, tmp, &n.addfd, list) {
1125 /* The process went away before we got a chance to handle it */
1126 addfd->ret = -ESRCH;
1127 list_del_init(&addfd->list);
1128 complete(&addfd->completion);
1129 }
1130
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001131 /*
1132 * Note that it's possible the listener died in between the time when
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001133 * we were notified of a response (or a signal) and when we were able to
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001134 * re-acquire the lock, so only delete from the list if the
1135 * notification actually exists.
1136 *
1137 * Also note that this test is only valid because there's no way to
1138 * *reattach* to a notifier right now. If one is added, we'll need to
1139 * keep track of the notif itself and make sure they match here.
1140 */
1141 if (match->notif)
1142 list_del(&n.list);
1143out:
1144 mutex_unlock(&match->notify_lock);
Christian Braunerfb3c53862019-09-20 10:30:05 +02001145
1146 /* Userspace requests to continue the syscall. */
1147 if (flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE)
1148 return 0;
1149
Denis Efremov2d9ca262020-08-24 15:59:21 +03001150 syscall_set_return_value(current, current_pt_regs(),
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001151 err, ret);
Christian Braunerfb3c53862019-09-20 10:30:05 +02001152 return -1;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001153}
1154
Kees Cookce6526e2016-06-01 19:29:15 -07001155static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
1156 const bool recheck_after_trace)
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001157{
1158 u32 filter_ret, action;
Kees Cookdeb4de82017-08-02 15:00:40 -07001159 struct seccomp_filter *match = NULL;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001160 int data;
Tycho Andersendb511392018-12-09 11:24:11 -07001161 struct seccomp_data sd_local;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Kees Cook3ba25302014-06-27 15:01:35 -07001163 /*
1164 * Make sure that any changes to mode from another thread have
Gabriel Krisman Bertazi23d67a52020-11-16 12:42:00 -05001165 * been seen after SYSCALL_WORK_SECCOMP was seen.
Kees Cook3ba25302014-06-27 15:01:35 -07001166 */
wanghongzhea381b702021-02-05 11:34:09 +08001167 smp_rmb();
Kees Cook3ba25302014-06-27 15:01:35 -07001168
Tycho Andersendb511392018-12-09 11:24:11 -07001169 if (!sd) {
1170 populate_seccomp_data(&sd_local);
1171 sd = &sd_local;
1172 }
1173
Kees Cookdeb4de82017-08-02 15:00:40 -07001174 filter_ret = seccomp_run_filters(sd, &match);
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001175 data = filter_ret & SECCOMP_RET_DATA;
Kees Cook0466bdb2017-08-11 13:12:11 -07001176 action = filter_ret & SECCOMP_RET_ACTION_FULL;
Andy Lutomirski87b526d2012-10-01 11:40:45 -07001177
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001178 switch (action) {
1179 case SECCOMP_RET_ERRNO:
Kees Cook580c57f2015-02-17 13:48:00 -08001180 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
1181 if (data > MAX_ERRNO)
1182 data = MAX_ERRNO;
Denis Efremov2d9ca262020-08-24 15:59:21 +03001183 syscall_set_return_value(current, current_pt_regs(),
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001184 -data, 0);
1185 goto skip;
1186
1187 case SECCOMP_RET_TRAP:
1188 /* Show the handler the original registers. */
Denis Efremov2d9ca262020-08-24 15:59:21 +03001189 syscall_rollback(current, current_pt_regs());
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001190 /* Let the filter pass back 16 bits of data. */
1191 seccomp_send_sigsys(this_syscall, data);
1192 goto skip;
1193
1194 case SECCOMP_RET_TRACE:
Kees Cookce6526e2016-06-01 19:29:15 -07001195 /* We've been put in this state by the ptracer already. */
1196 if (recheck_after_trace)
1197 return 0;
1198
Kees Cook8112c4f2016-06-01 16:02:17 -07001199 /* ENOSYS these calls if there is no tracer attached. */
1200 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
1201 syscall_set_return_value(current,
Denis Efremov2d9ca262020-08-24 15:59:21 +03001202 current_pt_regs(),
Kees Cook8112c4f2016-06-01 16:02:17 -07001203 -ENOSYS, 0);
1204 goto skip;
1205 }
1206
1207 /* Allow the BPF to provide the event message */
1208 ptrace_event(PTRACE_EVENT_SECCOMP, data);
1209 /*
1210 * The delivery of a fatal signal during event
Kees Cook485a2522016-08-10 16:28:09 -07001211 * notification may silently skip tracer notification,
1212 * which could leave us with a potentially unmodified
1213 * syscall that the tracer would have liked to have
1214 * changed. Since the process is about to die, we just
1215 * force the syscall to be skipped and let the signal
1216 * kill the process and correctly handle any tracer exit
1217 * notifications.
Kees Cook8112c4f2016-06-01 16:02:17 -07001218 */
1219 if (fatal_signal_pending(current))
Kees Cook485a2522016-08-10 16:28:09 -07001220 goto skip;
Kees Cook8112c4f2016-06-01 16:02:17 -07001221 /* Check if the tracer forced the syscall to be skipped. */
Denis Efremov2d9ca262020-08-24 15:59:21 +03001222 this_syscall = syscall_get_nr(current, current_pt_regs());
Kees Cook8112c4f2016-06-01 16:02:17 -07001223 if (this_syscall < 0)
1224 goto skip;
1225
Kees Cookce6526e2016-06-01 19:29:15 -07001226 /*
1227 * Recheck the syscall, since it may have changed. This
1228 * intentionally uses a NULL struct seccomp_data to force
1229 * a reload of all registers. This does not goto skip since
1230 * a skip would have already been reported.
1231 */
1232 if (__seccomp_filter(this_syscall, NULL, true))
1233 return -1;
1234
Kees Cook8112c4f2016-06-01 16:02:17 -07001235 return 0;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001236
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001237 case SECCOMP_RET_USER_NOTIF:
Christian Braunerfb3c53862019-09-20 10:30:05 +02001238 if (seccomp_do_user_notification(this_syscall, match, sd))
1239 goto skip;
1240
1241 return 0;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001242
Tyler Hicks59f5cf42017-08-11 04:33:57 +00001243 case SECCOMP_RET_LOG:
1244 seccomp_log(this_syscall, 0, action, true);
1245 return 0;
1246
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001247 case SECCOMP_RET_ALLOW:
Kees Cookdeb4de82017-08-02 15:00:40 -07001248 /*
1249 * Note that the "match" filter will always be NULL for
1250 * this action since SECCOMP_RET_ALLOW is the starting
1251 * state in seccomp_run_filters().
1252 */
Kees Cook8112c4f2016-06-01 16:02:17 -07001253 return 0;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001254
Kees Cookfd768752017-08-11 12:53:18 -07001255 case SECCOMP_RET_KILL_THREAD:
Kees Cook4d3b0b02017-08-11 13:01:39 -07001256 case SECCOMP_RET_KILL_PROCESS:
Kees Cook131b6352017-02-23 09:24:24 -08001257 default:
Tyler Hickse66a3992017-08-11 04:33:56 +00001258 seccomp_log(this_syscall, SIGSYS, action, true);
Kees Cookd7276e32017-02-07 15:18:51 -08001259 /* Dump core only if this is the last remaining thread. */
Rich Felker4d671d92020-08-28 21:56:13 -04001260 if (action != SECCOMP_RET_KILL_THREAD ||
Kees Cook4d3b0b02017-08-11 13:01:39 -07001261 get_nr_threads(current) == 1) {
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001262 kernel_siginfo_t info;
Kees Cook131b6352017-02-23 09:24:24 -08001263
Kees Cookd7276e32017-02-07 15:18:51 -08001264 /* Show the original registers in the dump. */
Denis Efremov2d9ca262020-08-24 15:59:21 +03001265 syscall_rollback(current, current_pt_regs());
Kees Cookd7276e32017-02-07 15:18:51 -08001266 /* Trigger a manual coredump since do_exit skips it. */
1267 seccomp_init_siginfo(&info, this_syscall, data);
1268 do_coredump(&info);
1269 }
Rich Felker4d671d92020-08-28 21:56:13 -04001270 if (action == SECCOMP_RET_KILL_THREAD)
Kees Cook4d3b0b02017-08-11 13:01:39 -07001271 do_exit(SIGSYS);
Rich Felker4d671d92020-08-28 21:56:13 -04001272 else
1273 do_group_exit(SIGSYS);
Will Drewry8156b452012-04-17 14:48:58 -05001274 }
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001275
1276 unreachable();
1277
1278skip:
Tyler Hickse66a3992017-08-11 04:33:56 +00001279 seccomp_log(this_syscall, 0, action, match ? match->log : false);
Kees Cook8112c4f2016-06-01 16:02:17 -07001280 return -1;
1281}
1282#else
Kees Cookce6526e2016-06-01 19:29:15 -07001283static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
1284 const bool recheck_after_trace)
Kees Cook8112c4f2016-06-01 16:02:17 -07001285{
1286 BUG();
Paul Cercueil04b38d02021-01-11 17:28:39 +00001287
1288 return -1;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001289}
1290#endif
1291
Kees Cook8112c4f2016-06-01 16:02:17 -07001292int __secure_computing(const struct seccomp_data *sd)
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001293{
1294 int mode = current->seccomp.mode;
Kees Cook8112c4f2016-06-01 16:02:17 -07001295 int this_syscall;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001296
Masahiro Yamada97f26452016-08-03 13:45:50 -07001297 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
Tycho Andersen13c4a902015-06-13 09:02:48 -06001298 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
Kees Cook8112c4f2016-06-01 16:02:17 -07001299 return 0;
1300
1301 this_syscall = sd ? sd->nr :
Denis Efremov2d9ca262020-08-24 15:59:21 +03001302 syscall_get_nr(current, current_pt_regs());
Tycho Andersen13c4a902015-06-13 09:02:48 -06001303
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001304 switch (mode) {
1305 case SECCOMP_MODE_STRICT:
1306 __secure_computing_strict(this_syscall); /* may call do_exit */
Kees Cook8112c4f2016-06-01 16:02:17 -07001307 return 0;
Andy Lutomirski13aa72f2014-07-21 18:49:15 -07001308 case SECCOMP_MODE_FILTER:
Kees Cookce6526e2016-06-01 19:29:15 -07001309 return __seccomp_filter(this_syscall, sd, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 default:
1311 BUG();
1312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313}
Andy Lutomirskia4412fc2014-07-21 18:49:14 -07001314#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -07001315
1316long prctl_get_seccomp(void)
1317{
1318 return current->seccomp.mode;
1319}
1320
Will Drewrye2cfabdf2012-04-12 16:47:57 -05001321/**
Kees Cook3b23dd12014-06-25 15:55:25 -07001322 * seccomp_set_mode_strict: internal function for setting strict seccomp
Will Drewrye2cfabdf2012-04-12 16:47:57 -05001323 *
1324 * Once current->seccomp.mode is non-zero, it may not be changed.
1325 *
1326 * Returns 0 on success or -EINVAL on failure.
1327 */
Kees Cook3b23dd12014-06-25 15:55:25 -07001328static long seccomp_set_mode_strict(void)
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -07001329{
Kees Cook3b23dd12014-06-25 15:55:25 -07001330 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
Will Drewrye2cfabdf2012-04-12 16:47:57 -05001331 long ret = -EINVAL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -07001332
Kees Cookdbd952122014-06-27 15:18:48 -07001333 spin_lock_irq(&current->sighand->siglock);
1334
Kees Cook1f41b4502014-06-25 15:38:02 -07001335 if (!seccomp_may_assign_mode(seccomp_mode))
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -07001336 goto out;
1337
Andrea Arcangelicf99aba2007-07-15 23:41:33 -07001338#ifdef TIF_NOTSC
Kees Cook3b23dd12014-06-25 15:55:25 -07001339 disable_TSC();
Andrea Arcangelicf99aba2007-07-15 23:41:33 -07001340#endif
Kees Cook00a02d02018-05-03 14:56:12 -07001341 seccomp_assign_mode(current, seccomp_mode, 0);
Kees Cook3b23dd12014-06-25 15:55:25 -07001342 ret = 0;
1343
1344out:
Kees Cookdbd952122014-06-27 15:18:48 -07001345 spin_unlock_irq(&current->sighand->siglock);
Kees Cook3b23dd12014-06-25 15:55:25 -07001346
1347 return ret;
1348}
1349
Will Drewrye2cfabdf2012-04-12 16:47:57 -05001350#ifdef CONFIG_SECCOMP_FILTER
Tycho Andersene8393172020-09-02 08:09:53 -06001351static void seccomp_notify_free(struct seccomp_filter *filter)
1352{
1353 kfree(filter->notif);
1354 filter->notif = NULL;
1355}
1356
Tycho Andersena566a902020-09-01 19:40:16 -06001357static void seccomp_notify_detach(struct seccomp_filter *filter)
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001358{
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001359 struct seccomp_knotif *knotif;
1360
Tycho Andersena811dc62019-01-12 11:24:20 -07001361 if (!filter)
Tycho Andersena566a902020-09-01 19:40:16 -06001362 return;
Tycho Andersena811dc62019-01-12 11:24:20 -07001363
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001364 mutex_lock(&filter->notify_lock);
1365
1366 /*
1367 * If this file is being closed because e.g. the task who owned it
1368 * died, let's wake everyone up who was waiting on us.
1369 */
1370 list_for_each_entry(knotif, &filter->notif->notifications, list) {
1371 if (knotif->state == SECCOMP_NOTIFY_REPLIED)
1372 continue;
1373
1374 knotif->state = SECCOMP_NOTIFY_REPLIED;
1375 knotif->error = -ENOSYS;
1376 knotif->val = 0;
1377
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001378 /*
1379 * We do not need to wake up any pending addfd messages, as
1380 * the notifier will do that for us, as this just looks
1381 * like a standard reply.
1382 */
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001383 complete(&knotif->ready);
1384 }
1385
Tycho Andersene8393172020-09-02 08:09:53 -06001386 seccomp_notify_free(filter);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001387 mutex_unlock(&filter->notify_lock);
Tycho Andersena566a902020-09-01 19:40:16 -06001388}
1389
1390static int seccomp_notify_release(struct inode *inode, struct file *file)
1391{
1392 struct seccomp_filter *filter = file->private_data;
1393
1394 seccomp_notify_detach(filter);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001395 __put_seccomp_filter(filter);
1396 return 0;
1397}
1398
Sargun Dhillon9f87dcf2020-06-01 04:25:32 -07001399/* must be called with notif_lock held */
1400static inline struct seccomp_knotif *
1401find_notification(struct seccomp_filter *filter, u64 id)
1402{
1403 struct seccomp_knotif *cur;
1404
1405 lockdep_assert_held(&filter->notify_lock);
1406
1407 list_for_each_entry(cur, &filter->notif->notifications, list) {
1408 if (cur->id == id)
1409 return cur;
1410 }
1411
1412 return NULL;
1413}
1414
1415
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001416static long seccomp_notify_recv(struct seccomp_filter *filter,
1417 void __user *buf)
1418{
1419 struct seccomp_knotif *knotif = NULL, *cur;
1420 struct seccomp_notif unotif;
1421 ssize_t ret;
1422
Sargun Dhillon2882d532019-12-28 22:24:50 -08001423 /* Verify that we're not given garbage to keep struct extensible. */
1424 ret = check_zeroed_user(buf, sizeof(unotif));
1425 if (ret < 0)
1426 return ret;
1427 if (!ret)
1428 return -EINVAL;
1429
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001430 memset(&unotif, 0, sizeof(unotif));
1431
1432 ret = down_interruptible(&filter->notif->request);
1433 if (ret < 0)
1434 return ret;
1435
1436 mutex_lock(&filter->notify_lock);
1437 list_for_each_entry(cur, &filter->notif->notifications, list) {
1438 if (cur->state == SECCOMP_NOTIFY_INIT) {
1439 knotif = cur;
1440 break;
1441 }
1442 }
1443
1444 /*
1445 * If we didn't find a notification, it could be that the task was
1446 * interrupted by a fatal signal between the time we were woken and
1447 * when we were able to acquire the rw lock.
1448 */
1449 if (!knotif) {
1450 ret = -ENOENT;
1451 goto out;
1452 }
1453
1454 unotif.id = knotif->id;
1455 unotif.pid = task_pid_vnr(knotif->task);
1456 unotif.data = *(knotif->data);
1457
1458 knotif->state = SECCOMP_NOTIFY_SENT;
Christian Brauner76194c42020-06-01 11:50:07 -07001459 wake_up_poll(&filter->wqh, EPOLLOUT | EPOLLWRNORM);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001460 ret = 0;
1461out:
1462 mutex_unlock(&filter->notify_lock);
1463
1464 if (ret == 0 && copy_to_user(buf, &unotif, sizeof(unotif))) {
1465 ret = -EFAULT;
1466
1467 /*
1468 * Userspace screwed up. To make sure that we keep this
1469 * notification alive, let's reset it back to INIT. It
1470 * may have died when we released the lock, so we need to make
1471 * sure it's still around.
1472 */
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001473 mutex_lock(&filter->notify_lock);
Sargun Dhillon9f87dcf2020-06-01 04:25:32 -07001474 knotif = find_notification(filter, unotif.id);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001475 if (knotif) {
1476 knotif->state = SECCOMP_NOTIFY_INIT;
1477 up(&filter->notif->request);
1478 }
1479 mutex_unlock(&filter->notify_lock);
1480 }
1481
1482 return ret;
1483}
1484
1485static long seccomp_notify_send(struct seccomp_filter *filter,
1486 void __user *buf)
1487{
1488 struct seccomp_notif_resp resp = {};
Sargun Dhillon9f87dcf2020-06-01 04:25:32 -07001489 struct seccomp_knotif *knotif;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001490 long ret;
1491
1492 if (copy_from_user(&resp, buf, sizeof(resp)))
1493 return -EFAULT;
1494
Christian Braunerfb3c53862019-09-20 10:30:05 +02001495 if (resp.flags & ~SECCOMP_USER_NOTIF_FLAG_CONTINUE)
1496 return -EINVAL;
1497
1498 if ((resp.flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE) &&
1499 (resp.error || resp.val))
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001500 return -EINVAL;
1501
1502 ret = mutex_lock_interruptible(&filter->notify_lock);
1503 if (ret < 0)
1504 return ret;
1505
Sargun Dhillon9f87dcf2020-06-01 04:25:32 -07001506 knotif = find_notification(filter, resp.id);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001507 if (!knotif) {
1508 ret = -ENOENT;
1509 goto out;
1510 }
1511
1512 /* Allow exactly one reply. */
1513 if (knotif->state != SECCOMP_NOTIFY_SENT) {
1514 ret = -EINPROGRESS;
1515 goto out;
1516 }
1517
1518 ret = 0;
1519 knotif->state = SECCOMP_NOTIFY_REPLIED;
1520 knotif->error = resp.error;
1521 knotif->val = resp.val;
Christian Braunerfb3c53862019-09-20 10:30:05 +02001522 knotif->flags = resp.flags;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001523 complete(&knotif->ready);
1524out:
1525 mutex_unlock(&filter->notify_lock);
1526 return ret;
1527}
1528
1529static long seccomp_notify_id_valid(struct seccomp_filter *filter,
1530 void __user *buf)
1531{
Sargun Dhillon9f87dcf2020-06-01 04:25:32 -07001532 struct seccomp_knotif *knotif;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001533 u64 id;
1534 long ret;
1535
1536 if (copy_from_user(&id, buf, sizeof(id)))
1537 return -EFAULT;
1538
1539 ret = mutex_lock_interruptible(&filter->notify_lock);
1540 if (ret < 0)
1541 return ret;
1542
Sargun Dhillon9f87dcf2020-06-01 04:25:32 -07001543 knotif = find_notification(filter, id);
1544 if (knotif && knotif->state == SECCOMP_NOTIFY_SENT)
1545 ret = 0;
1546 else
1547 ret = -ENOENT;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001548
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001549 mutex_unlock(&filter->notify_lock);
1550 return ret;
1551}
1552
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001553static long seccomp_notify_addfd(struct seccomp_filter *filter,
1554 struct seccomp_notif_addfd __user *uaddfd,
1555 unsigned int size)
1556{
1557 struct seccomp_notif_addfd addfd;
1558 struct seccomp_knotif *knotif;
1559 struct seccomp_kaddfd kaddfd;
1560 int ret;
1561
1562 BUILD_BUG_ON(sizeof(addfd) < SECCOMP_NOTIFY_ADDFD_SIZE_VER0);
1563 BUILD_BUG_ON(sizeof(addfd) != SECCOMP_NOTIFY_ADDFD_SIZE_LATEST);
1564
1565 if (size < SECCOMP_NOTIFY_ADDFD_SIZE_VER0 || size >= PAGE_SIZE)
1566 return -EINVAL;
1567
1568 ret = copy_struct_from_user(&addfd, sizeof(addfd), uaddfd, size);
1569 if (ret)
1570 return ret;
1571
1572 if (addfd.newfd_flags & ~O_CLOEXEC)
1573 return -EINVAL;
1574
1575 if (addfd.flags & ~SECCOMP_ADDFD_FLAG_SETFD)
1576 return -EINVAL;
1577
1578 if (addfd.newfd && !(addfd.flags & SECCOMP_ADDFD_FLAG_SETFD))
1579 return -EINVAL;
1580
1581 kaddfd.file = fget(addfd.srcfd);
1582 if (!kaddfd.file)
1583 return -EBADF;
1584
1585 kaddfd.flags = addfd.newfd_flags;
1586 kaddfd.fd = (addfd.flags & SECCOMP_ADDFD_FLAG_SETFD) ?
1587 addfd.newfd : -1;
1588 init_completion(&kaddfd.completion);
1589
1590 ret = mutex_lock_interruptible(&filter->notify_lock);
1591 if (ret < 0)
1592 goto out;
1593
1594 knotif = find_notification(filter, addfd.id);
1595 if (!knotif) {
1596 ret = -ENOENT;
1597 goto out_unlock;
1598 }
1599
1600 /*
1601 * We do not want to allow for FD injection to occur before the
1602 * notification has been picked up by a userspace handler, or after
1603 * the notification has been replied to.
1604 */
1605 if (knotif->state != SECCOMP_NOTIFY_SENT) {
1606 ret = -EINPROGRESS;
1607 goto out_unlock;
1608 }
1609
1610 list_add(&kaddfd.list, &knotif->addfd);
1611 complete(&knotif->ready);
1612 mutex_unlock(&filter->notify_lock);
1613
1614 /* Now we wait for it to be processed or be interrupted */
1615 ret = wait_for_completion_interruptible(&kaddfd.completion);
1616 if (ret == 0) {
1617 /*
1618 * We had a successful completion. The other side has already
1619 * removed us from the addfd queue, and
1620 * wait_for_completion_interruptible has a memory barrier upon
1621 * success that lets us read this value directly without
1622 * locking.
1623 */
1624 ret = kaddfd.ret;
1625 goto out;
1626 }
1627
1628 mutex_lock(&filter->notify_lock);
1629 /*
1630 * Even though we were woken up by a signal and not a successful
1631 * completion, a completion may have happened in the mean time.
1632 *
1633 * We need to check again if the addfd request has been handled,
1634 * and if not, we will remove it from the queue.
1635 */
1636 if (list_empty(&kaddfd.list))
1637 ret = kaddfd.ret;
1638 else
1639 list_del(&kaddfd.list);
1640
1641out_unlock:
1642 mutex_unlock(&filter->notify_lock);
1643out:
1644 fput(kaddfd.file);
1645
1646 return ret;
1647}
1648
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001649static long seccomp_notify_ioctl(struct file *file, unsigned int cmd,
1650 unsigned long arg)
1651{
1652 struct seccomp_filter *filter = file->private_data;
1653 void __user *buf = (void __user *)arg;
1654
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001655 /* Fixed-size ioctls */
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001656 switch (cmd) {
1657 case SECCOMP_IOCTL_NOTIF_RECV:
1658 return seccomp_notify_recv(filter, buf);
1659 case SECCOMP_IOCTL_NOTIF_SEND:
1660 return seccomp_notify_send(filter, buf);
Kees Cook47e33c052020-06-15 15:42:46 -07001661 case SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR:
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001662 case SECCOMP_IOCTL_NOTIF_ID_VALID:
1663 return seccomp_notify_id_valid(filter, buf);
Sargun Dhillon7cf97b12020-06-02 18:10:43 -07001664 }
1665
1666 /* Extensible Argument ioctls */
1667#define EA_IOCTL(cmd) ((cmd) & ~(IOC_INOUT | IOCSIZE_MASK))
1668 switch (EA_IOCTL(cmd)) {
1669 case EA_IOCTL(SECCOMP_IOCTL_NOTIF_ADDFD):
1670 return seccomp_notify_addfd(filter, buf, _IOC_SIZE(cmd));
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001671 default:
1672 return -EINVAL;
1673 }
1674}
1675
1676static __poll_t seccomp_notify_poll(struct file *file,
1677 struct poll_table_struct *poll_tab)
1678{
1679 struct seccomp_filter *filter = file->private_data;
1680 __poll_t ret = 0;
1681 struct seccomp_knotif *cur;
1682
Christian Brauner76194c42020-06-01 11:50:07 -07001683 poll_wait(file, &filter->wqh, poll_tab);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001684
Tycho Andersen319deec2018-12-12 19:46:54 -07001685 if (mutex_lock_interruptible(&filter->notify_lock) < 0)
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001686 return EPOLLERR;
1687
1688 list_for_each_entry(cur, &filter->notif->notifications, list) {
1689 if (cur->state == SECCOMP_NOTIFY_INIT)
1690 ret |= EPOLLIN | EPOLLRDNORM;
1691 if (cur->state == SECCOMP_NOTIFY_SENT)
1692 ret |= EPOLLOUT | EPOLLWRNORM;
1693 if ((ret & EPOLLIN) && (ret & EPOLLOUT))
1694 break;
1695 }
1696
1697 mutex_unlock(&filter->notify_lock);
1698
Christian Brauner99cdb8b2020-05-31 13:50:30 +02001699 if (refcount_read(&filter->users) == 0)
1700 ret |= EPOLLHUP;
1701
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001702 return ret;
1703}
1704
1705static const struct file_operations seccomp_notify_ops = {
1706 .poll = seccomp_notify_poll,
1707 .release = seccomp_notify_release,
1708 .unlocked_ioctl = seccomp_notify_ioctl,
Sven Schnelle3db81af2020-03-10 13:33:32 +01001709 .compat_ioctl = seccomp_notify_ioctl,
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001710};
1711
1712static struct file *init_listener(struct seccomp_filter *filter)
1713{
Jann Horndfe719f2020-10-05 03:44:01 +02001714 struct file *ret;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001715
1716 ret = ERR_PTR(-ENOMEM);
1717 filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL);
1718 if (!filter->notif)
1719 goto out;
1720
1721 sema_init(&filter->notif->request, 0);
1722 filter->notif->next_id = get_random_u64();
1723 INIT_LIST_HEAD(&filter->notif->notifications);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001724
1725 ret = anon_inode_getfile("seccomp notify", &seccomp_notify_ops,
1726 filter, O_RDWR);
1727 if (IS_ERR(ret))
1728 goto out_notif;
1729
1730 /* The file has a reference to it now */
1731 __get_seccomp_filter(filter);
1732
1733out_notif:
1734 if (IS_ERR(ret))
Tycho Andersene8393172020-09-02 08:09:53 -06001735 seccomp_notify_free(filter);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001736out:
1737 return ret;
1738}
1739
Jann Horndfe719f2020-10-05 03:44:01 +02001740/*
1741 * Does @new_child have a listener while an ancestor also has a listener?
1742 * If so, we'll want to reject this filter.
1743 * This only has to be tested for the current process, even in the TSYNC case,
1744 * because TSYNC installs @child with the same parent on all threads.
1745 * Note that @new_child is not hooked up to its parent at this point yet, so
1746 * we use current->seccomp.filter.
1747 */
1748static bool has_duplicate_listener(struct seccomp_filter *new_child)
1749{
1750 struct seccomp_filter *cur;
1751
1752 /* must be protected against concurrent TSYNC */
1753 lockdep_assert_held(&current->sighand->siglock);
1754
1755 if (!new_child->notif)
1756 return false;
1757 for (cur = current->seccomp.filter; cur; cur = cur->prev) {
1758 if (cur->notif)
1759 return true;
1760 }
1761
1762 return false;
1763}
1764
Kees Cook3b23dd12014-06-25 15:55:25 -07001765/**
1766 * seccomp_set_mode_filter: internal function for setting seccomp filter
Kees Cook48dc92b2014-06-25 16:08:24 -07001767 * @flags: flags to change filter behavior
Kees Cook3b23dd12014-06-25 15:55:25 -07001768 * @filter: struct sock_fprog containing filter
1769 *
1770 * This function may be called repeatedly to install additional filters.
1771 * Every filter successfully installed will be evaluated (in reverse order)
1772 * for each system call the task makes.
1773 *
1774 * Once current->seccomp.mode is non-zero, it may not be changed.
1775 *
1776 * Returns 0 on success or -EINVAL on failure.
1777 */
Kees Cook48dc92b2014-06-25 16:08:24 -07001778static long seccomp_set_mode_filter(unsigned int flags,
1779 const char __user *filter)
Kees Cook3b23dd12014-06-25 15:55:25 -07001780{
1781 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
Kees Cookc8bee432014-06-27 15:16:33 -07001782 struct seccomp_filter *prepared = NULL;
Kees Cook3b23dd12014-06-25 15:55:25 -07001783 long ret = -EINVAL;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001784 int listener = -1;
1785 struct file *listener_f = NULL;
Kees Cook3b23dd12014-06-25 15:55:25 -07001786
Kees Cook48dc92b2014-06-25 16:08:24 -07001787 /* Validate flags. */
Kees Cookc2e1f2e2014-06-05 00:23:17 -07001788 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
Kees Cookdbd952122014-06-27 15:18:48 -07001789 return -EINVAL;
Kees Cook48dc92b2014-06-25 16:08:24 -07001790
Tycho Andersen7a0df7f2019-03-06 13:14:13 -07001791 /*
1792 * In the successful case, NEW_LISTENER returns the new listener fd.
1793 * But in the failure case, TSYNC returns the thread that died. If you
1794 * combine these two flags, there's no way to tell whether something
Tycho Andersen51891492020-03-04 11:05:17 -07001795 * succeeded or failed. So, let's disallow this combination if the user
1796 * has not explicitly requested no errors from TSYNC.
Tycho Andersen7a0df7f2019-03-06 13:14:13 -07001797 */
1798 if ((flags & SECCOMP_FILTER_FLAG_TSYNC) &&
Tycho Andersen51891492020-03-04 11:05:17 -07001799 (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) &&
1800 ((flags & SECCOMP_FILTER_FLAG_TSYNC_ESRCH) == 0))
Tycho Andersen7a0df7f2019-03-06 13:14:13 -07001801 return -EINVAL;
1802
Kees Cookc8bee432014-06-27 15:16:33 -07001803 /* Prepare the new filter before holding any locks. */
1804 prepared = seccomp_prepare_user_filter(filter);
1805 if (IS_ERR(prepared))
1806 return PTR_ERR(prepared);
1807
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001808 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
1809 listener = get_unused_fd_flags(O_CLOEXEC);
1810 if (listener < 0) {
1811 ret = listener;
1812 goto out_free;
1813 }
1814
1815 listener_f = init_listener(prepared);
1816 if (IS_ERR(listener_f)) {
1817 put_unused_fd(listener);
1818 ret = PTR_ERR(listener_f);
1819 goto out_free;
1820 }
1821 }
1822
Kees Cookc2e1f2e2014-06-05 00:23:17 -07001823 /*
1824 * Make sure we cannot change seccomp or nnp state via TSYNC
1825 * while another thread is in the middle of calling exec.
1826 */
1827 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
1828 mutex_lock_killable(&current->signal->cred_guard_mutex))
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001829 goto out_put_fd;
Kees Cookc2e1f2e2014-06-05 00:23:17 -07001830
Kees Cookdbd952122014-06-27 15:18:48 -07001831 spin_lock_irq(&current->sighand->siglock);
1832
Kees Cook3b23dd12014-06-25 15:55:25 -07001833 if (!seccomp_may_assign_mode(seccomp_mode))
Will Drewrye2cfabdf2012-04-12 16:47:57 -05001834 goto out;
Kees Cook3b23dd12014-06-25 15:55:25 -07001835
Jann Horndfe719f2020-10-05 03:44:01 +02001836 if (has_duplicate_listener(prepared)) {
1837 ret = -EBUSY;
1838 goto out;
1839 }
1840
Kees Cookc8bee432014-06-27 15:16:33 -07001841 ret = seccomp_attach_filter(flags, prepared);
Kees Cook3b23dd12014-06-25 15:55:25 -07001842 if (ret)
1843 goto out;
Kees Cookc8bee432014-06-27 15:16:33 -07001844 /* Do not free the successfully attached filter. */
1845 prepared = NULL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -07001846
Kees Cook00a02d02018-05-03 14:56:12 -07001847 seccomp_assign_mode(current, seccomp_mode, flags);
Will Drewrye2cfabdf2012-04-12 16:47:57 -05001848out:
Kees Cookdbd952122014-06-27 15:18:48 -07001849 spin_unlock_irq(&current->sighand->siglock);
Kees Cookc2e1f2e2014-06-05 00:23:17 -07001850 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
1851 mutex_unlock(&current->signal->cred_guard_mutex);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001852out_put_fd:
1853 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
Tycho Andersen7a0df7f2019-03-06 13:14:13 -07001854 if (ret) {
Tycho Andersena811dc62019-01-12 11:24:20 -07001855 listener_f->private_data = NULL;
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001856 fput(listener_f);
1857 put_unused_fd(listener);
Tycho Andersena566a902020-09-01 19:40:16 -06001858 seccomp_notify_detach(prepared);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001859 } else {
1860 fd_install(listener, listener_f);
1861 ret = listener;
1862 }
1863 }
Kees Cookc2e1f2e2014-06-05 00:23:17 -07001864out_free:
Kees Cookc8bee432014-06-27 15:16:33 -07001865 seccomp_filter_free(prepared);
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -07001866 return ret;
1867}
Kees Cook3b23dd12014-06-25 15:55:25 -07001868#else
Kees Cook48dc92b2014-06-25 16:08:24 -07001869static inline long seccomp_set_mode_filter(unsigned int flags,
1870 const char __user *filter)
Kees Cook3b23dd12014-06-25 15:55:25 -07001871{
1872 return -EINVAL;
1873}
1874#endif
Kees Cookd78ab022014-05-21 15:02:11 -07001875
Tyler Hicksd612b1f2017-08-11 04:33:53 +00001876static long seccomp_get_action_avail(const char __user *uaction)
1877{
1878 u32 action;
1879
1880 if (copy_from_user(&action, uaction, sizeof(action)))
1881 return -EFAULT;
1882
1883 switch (action) {
Kees Cook0466bdb2017-08-11 13:12:11 -07001884 case SECCOMP_RET_KILL_PROCESS:
Kees Cookfd768752017-08-11 12:53:18 -07001885 case SECCOMP_RET_KILL_THREAD:
Tyler Hicksd612b1f2017-08-11 04:33:53 +00001886 case SECCOMP_RET_TRAP:
1887 case SECCOMP_RET_ERRNO:
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001888 case SECCOMP_RET_USER_NOTIF:
Tyler Hicksd612b1f2017-08-11 04:33:53 +00001889 case SECCOMP_RET_TRACE:
Tyler Hicks59f5cf42017-08-11 04:33:57 +00001890 case SECCOMP_RET_LOG:
Tyler Hicksd612b1f2017-08-11 04:33:53 +00001891 case SECCOMP_RET_ALLOW:
1892 break;
1893 default:
1894 return -EOPNOTSUPP;
1895 }
1896
1897 return 0;
1898}
1899
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001900static long seccomp_get_notif_sizes(void __user *usizes)
1901{
1902 struct seccomp_notif_sizes sizes = {
1903 .seccomp_notif = sizeof(struct seccomp_notif),
1904 .seccomp_notif_resp = sizeof(struct seccomp_notif_resp),
1905 .seccomp_data = sizeof(struct seccomp_data),
1906 };
1907
1908 if (copy_to_user(usizes, &sizes, sizeof(sizes)))
1909 return -EFAULT;
1910
1911 return 0;
1912}
1913
Kees Cook48dc92b2014-06-25 16:08:24 -07001914/* Common entry point for both prctl and syscall. */
1915static long do_seccomp(unsigned int op, unsigned int flags,
Tycho Andersena5662e42018-12-09 11:24:12 -07001916 void __user *uargs)
Kees Cook48dc92b2014-06-25 16:08:24 -07001917{
1918 switch (op) {
1919 case SECCOMP_SET_MODE_STRICT:
1920 if (flags != 0 || uargs != NULL)
1921 return -EINVAL;
1922 return seccomp_set_mode_strict();
1923 case SECCOMP_SET_MODE_FILTER:
1924 return seccomp_set_mode_filter(flags, uargs);
Tyler Hicksd612b1f2017-08-11 04:33:53 +00001925 case SECCOMP_GET_ACTION_AVAIL:
1926 if (flags != 0)
1927 return -EINVAL;
1928
1929 return seccomp_get_action_avail(uargs);
Tycho Andersen6a21cc52018-12-09 11:24:13 -07001930 case SECCOMP_GET_NOTIF_SIZES:
1931 if (flags != 0)
1932 return -EINVAL;
1933
1934 return seccomp_get_notif_sizes(uargs);
Kees Cook48dc92b2014-06-25 16:08:24 -07001935 default:
1936 return -EINVAL;
1937 }
1938}
1939
1940SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
Tycho Andersena5662e42018-12-09 11:24:12 -07001941 void __user *, uargs)
Kees Cook48dc92b2014-06-25 16:08:24 -07001942{
1943 return do_seccomp(op, flags, uargs);
1944}
1945
Kees Cookd78ab022014-05-21 15:02:11 -07001946/**
1947 * prctl_set_seccomp: configures current->seccomp.mode
1948 * @seccomp_mode: requested mode to use
1949 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
1950 *
1951 * Returns 0 on success or -EINVAL on failure.
1952 */
Tycho Andersena5662e42018-12-09 11:24:12 -07001953long prctl_set_seccomp(unsigned long seccomp_mode, void __user *filter)
Kees Cookd78ab022014-05-21 15:02:11 -07001954{
Kees Cook48dc92b2014-06-25 16:08:24 -07001955 unsigned int op;
Tycho Andersena5662e42018-12-09 11:24:12 -07001956 void __user *uargs;
Kees Cook48dc92b2014-06-25 16:08:24 -07001957
Kees Cook3b23dd12014-06-25 15:55:25 -07001958 switch (seccomp_mode) {
1959 case SECCOMP_MODE_STRICT:
Kees Cook48dc92b2014-06-25 16:08:24 -07001960 op = SECCOMP_SET_MODE_STRICT;
1961 /*
1962 * Setting strict mode through prctl always ignored filter,
1963 * so make sure it is always NULL here to pass the internal
1964 * check in do_seccomp().
1965 */
1966 uargs = NULL;
1967 break;
Kees Cook3b23dd12014-06-25 15:55:25 -07001968 case SECCOMP_MODE_FILTER:
Kees Cook48dc92b2014-06-25 16:08:24 -07001969 op = SECCOMP_SET_MODE_FILTER;
1970 uargs = filter;
1971 break;
Kees Cook3b23dd12014-06-25 15:55:25 -07001972 default:
1973 return -EINVAL;
1974 }
Kees Cook48dc92b2014-06-25 16:08:24 -07001975
1976 /* prctl interface doesn't have flags, so they are always zero. */
1977 return do_seccomp(op, 0, uargs);
Kees Cookd78ab022014-05-21 15:02:11 -07001978}
Tycho Andersenf8e529e2015-10-27 09:23:59 +09001979
1980#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
Tycho Andersenf06eae82017-10-11 09:39:20 -06001981static struct seccomp_filter *get_nth_filter(struct task_struct *task,
1982 unsigned long filter_off)
1983{
1984 struct seccomp_filter *orig, *filter;
1985 unsigned long count;
1986
1987 /*
1988 * Note: this is only correct because the caller should be the (ptrace)
1989 * tracer of the task, otherwise lock_task_sighand is needed.
1990 */
1991 spin_lock_irq(&task->sighand->siglock);
1992
1993 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
1994 spin_unlock_irq(&task->sighand->siglock);
1995 return ERR_PTR(-EINVAL);
1996 }
1997
1998 orig = task->seccomp.filter;
1999 __get_seccomp_filter(orig);
2000 spin_unlock_irq(&task->sighand->siglock);
2001
2002 count = 0;
2003 for (filter = orig; filter; filter = filter->prev)
2004 count++;
2005
2006 if (filter_off >= count) {
2007 filter = ERR_PTR(-ENOENT);
2008 goto out;
2009 }
2010
2011 count -= filter_off;
2012 for (filter = orig; filter && count > 1; filter = filter->prev)
2013 count--;
2014
2015 if (WARN_ON(count != 1 || !filter)) {
2016 filter = ERR_PTR(-ENOENT);
2017 goto out;
2018 }
2019
2020 __get_seccomp_filter(filter);
2021
2022out:
2023 __put_seccomp_filter(orig);
2024 return filter;
2025}
2026
Tycho Andersenf8e529e2015-10-27 09:23:59 +09002027long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
2028 void __user *data)
2029{
2030 struct seccomp_filter *filter;
2031 struct sock_fprog_kern *fprog;
2032 long ret;
Tycho Andersenf8e529e2015-10-27 09:23:59 +09002033
2034 if (!capable(CAP_SYS_ADMIN) ||
2035 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
2036 return -EACCES;
2037 }
2038
Tycho Andersenf06eae82017-10-11 09:39:20 -06002039 filter = get_nth_filter(task, filter_off);
2040 if (IS_ERR(filter))
2041 return PTR_ERR(filter);
Tycho Andersenf8e529e2015-10-27 09:23:59 +09002042
2043 fprog = filter->prog->orig_prog;
2044 if (!fprog) {
Mickaël Salaün470bf1f2016-03-24 02:46:33 +01002045 /* This must be a new non-cBPF filter, since we save
Tycho Andersenf8e529e2015-10-27 09:23:59 +09002046 * every cBPF filter's orig_prog above when
2047 * CONFIG_CHECKPOINT_RESTORE is enabled.
2048 */
2049 ret = -EMEDIUMTYPE;
2050 goto out;
2051 }
2052
2053 ret = fprog->len;
2054 if (!data)
2055 goto out;
2056
Tycho Andersenf8e529e2015-10-27 09:23:59 +09002057 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
2058 ret = -EFAULT;
2059
Tycho Andersenf8e529e2015-10-27 09:23:59 +09002060out:
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00002061 __put_seccomp_filter(filter);
2062 return ret;
Tycho Andersenf8e529e2015-10-27 09:23:59 +09002063}
Tycho Andersenf8e529e2015-10-27 09:23:59 +09002064
Tycho Andersen26500472017-10-11 09:39:21 -06002065long seccomp_get_metadata(struct task_struct *task,
2066 unsigned long size, void __user *data)
2067{
2068 long ret;
2069 struct seccomp_filter *filter;
2070 struct seccomp_metadata kmd = {};
2071
2072 if (!capable(CAP_SYS_ADMIN) ||
2073 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
2074 return -EACCES;
2075 }
2076
2077 size = min_t(unsigned long, size, sizeof(kmd));
2078
Tycho Andersen63bb0042018-02-20 19:47:46 -07002079 if (size < sizeof(kmd.filter_off))
2080 return -EINVAL;
2081
2082 if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
Tycho Andersen26500472017-10-11 09:39:21 -06002083 return -EFAULT;
2084
2085 filter = get_nth_filter(task, kmd.filter_off);
2086 if (IS_ERR(filter))
2087 return PTR_ERR(filter);
2088
Tycho Andersen26500472017-10-11 09:39:21 -06002089 if (filter->log)
2090 kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
2091
2092 ret = size;
2093 if (copy_to_user(data, &kmd, size))
2094 ret = -EFAULT;
2095
2096 __put_seccomp_filter(filter);
Tycho Andersenf8e529e2015-10-27 09:23:59 +09002097 return ret;
2098}
2099#endif
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00002100
2101#ifdef CONFIG_SYSCTL
2102
2103/* Human readable action names for friendly sysctl interaction */
Kees Cook0466bdb2017-08-11 13:12:11 -07002104#define SECCOMP_RET_KILL_PROCESS_NAME "kill_process"
Kees Cookfd768752017-08-11 12:53:18 -07002105#define SECCOMP_RET_KILL_THREAD_NAME "kill_thread"
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00002106#define SECCOMP_RET_TRAP_NAME "trap"
2107#define SECCOMP_RET_ERRNO_NAME "errno"
Tycho Andersen6a21cc52018-12-09 11:24:13 -07002108#define SECCOMP_RET_USER_NOTIF_NAME "user_notif"
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00002109#define SECCOMP_RET_TRACE_NAME "trace"
Tyler Hicks59f5cf42017-08-11 04:33:57 +00002110#define SECCOMP_RET_LOG_NAME "log"
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00002111#define SECCOMP_RET_ALLOW_NAME "allow"
2112
Kees Cookfd768752017-08-11 12:53:18 -07002113static const char seccomp_actions_avail[] =
Kees Cook0466bdb2017-08-11 13:12:11 -07002114 SECCOMP_RET_KILL_PROCESS_NAME " "
Kees Cookfd768752017-08-11 12:53:18 -07002115 SECCOMP_RET_KILL_THREAD_NAME " "
2116 SECCOMP_RET_TRAP_NAME " "
2117 SECCOMP_RET_ERRNO_NAME " "
Tycho Andersen6a21cc52018-12-09 11:24:13 -07002118 SECCOMP_RET_USER_NOTIF_NAME " "
Kees Cookfd768752017-08-11 12:53:18 -07002119 SECCOMP_RET_TRACE_NAME " "
2120 SECCOMP_RET_LOG_NAME " "
2121 SECCOMP_RET_ALLOW_NAME;
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00002122
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002123struct seccomp_log_name {
2124 u32 log;
2125 const char *name;
2126};
2127
2128static const struct seccomp_log_name seccomp_log_names[] = {
Kees Cook0466bdb2017-08-11 13:12:11 -07002129 { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
Kees Cookfd768752017-08-11 12:53:18 -07002130 { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002131 { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
2132 { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
Tycho Andersen6a21cc52018-12-09 11:24:13 -07002133 { SECCOMP_LOG_USER_NOTIF, SECCOMP_RET_USER_NOTIF_NAME },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002134 { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
Tyler Hicks59f5cf42017-08-11 04:33:57 +00002135 { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002136 { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
2137 { }
2138};
2139
2140static bool seccomp_names_from_actions_logged(char *names, size_t size,
Tyler Hicksbeb44ac2018-05-04 01:08:13 +00002141 u32 actions_logged,
2142 const char *sep)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002143{
2144 const struct seccomp_log_name *cur;
Tyler Hicksbeb44ac2018-05-04 01:08:13 +00002145 bool append_sep = false;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002146
2147 for (cur = seccomp_log_names; cur->name && size; cur++) {
2148 ssize_t ret;
2149
2150 if (!(actions_logged & cur->log))
2151 continue;
2152
Tyler Hicksbeb44ac2018-05-04 01:08:13 +00002153 if (append_sep) {
2154 ret = strscpy(names, sep, size);
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002155 if (ret < 0)
2156 return false;
2157
2158 names += ret;
2159 size -= ret;
2160 } else
Tyler Hicksbeb44ac2018-05-04 01:08:13 +00002161 append_sep = true;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002162
2163 ret = strscpy(names, cur->name, size);
2164 if (ret < 0)
2165 return false;
2166
2167 names += ret;
2168 size -= ret;
2169 }
2170
2171 return true;
2172}
2173
2174static bool seccomp_action_logged_from_name(u32 *action_logged,
2175 const char *name)
2176{
2177 const struct seccomp_log_name *cur;
2178
2179 for (cur = seccomp_log_names; cur->name; cur++) {
2180 if (!strcmp(cur->name, name)) {
2181 *action_logged = cur->log;
2182 return true;
2183 }
2184 }
2185
2186 return false;
2187}
2188
2189static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
2190{
2191 char *name;
2192
2193 *actions_logged = 0;
2194 while ((name = strsep(&names, " ")) && *name) {
2195 u32 action_logged = 0;
2196
2197 if (!seccomp_action_logged_from_name(&action_logged, name))
2198 return false;
2199
2200 *actions_logged |= action_logged;
2201 }
2202
2203 return true;
2204}
2205
Jann Hornfab686e2020-11-20 18:05:45 +01002206static int read_actions_logged(struct ctl_table *ro_table, void *buffer,
Tyler Hicksd013db02018-05-04 01:08:12 +00002207 size_t *lenp, loff_t *ppos)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002208{
2209 char names[sizeof(seccomp_actions_avail)];
2210 struct ctl_table table;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002211
2212 memset(names, 0, sizeof(names));
2213
Tyler Hicksd013db02018-05-04 01:08:12 +00002214 if (!seccomp_names_from_actions_logged(names, sizeof(names),
Tyler Hicksbeb44ac2018-05-04 01:08:13 +00002215 seccomp_actions_logged, " "))
Tyler Hicksd013db02018-05-04 01:08:12 +00002216 return -EINVAL;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002217
2218 table = *ro_table;
2219 table.data = names;
2220 table.maxlen = sizeof(names);
Tyler Hicksd013db02018-05-04 01:08:12 +00002221 return proc_dostring(&table, 0, buffer, lenp, ppos);
2222}
2223
Jann Hornfab686e2020-11-20 18:05:45 +01002224static int write_actions_logged(struct ctl_table *ro_table, void *buffer,
Tyler Hicksea6eca72018-05-04 01:08:14 +00002225 size_t *lenp, loff_t *ppos, u32 *actions_logged)
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002226{
2227 char names[sizeof(seccomp_actions_avail)];
2228 struct ctl_table table;
2229 int ret;
2230
Tyler Hicksd013db02018-05-04 01:08:12 +00002231 if (!capable(CAP_SYS_ADMIN))
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002232 return -EPERM;
2233
2234 memset(names, 0, sizeof(names));
2235
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002236 table = *ro_table;
2237 table.data = names;
2238 table.maxlen = sizeof(names);
Tyler Hicksd013db02018-05-04 01:08:12 +00002239 ret = proc_dostring(&table, 1, buffer, lenp, ppos);
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002240 if (ret)
2241 return ret;
2242
Tyler Hicksea6eca72018-05-04 01:08:14 +00002243 if (!seccomp_actions_logged_from_names(actions_logged, table.data))
Tyler Hicksd013db02018-05-04 01:08:12 +00002244 return -EINVAL;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002245
Tyler Hicksea6eca72018-05-04 01:08:14 +00002246 if (*actions_logged & SECCOMP_LOG_ALLOW)
Tyler Hicksd013db02018-05-04 01:08:12 +00002247 return -EINVAL;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002248
Tyler Hicksea6eca72018-05-04 01:08:14 +00002249 seccomp_actions_logged = *actions_logged;
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002250 return 0;
2251}
2252
Tyler Hicksea6eca72018-05-04 01:08:14 +00002253static void audit_actions_logged(u32 actions_logged, u32 old_actions_logged,
2254 int ret)
2255{
2256 char names[sizeof(seccomp_actions_avail)];
2257 char old_names[sizeof(seccomp_actions_avail)];
2258 const char *new = names;
2259 const char *old = old_names;
2260
2261 if (!audit_enabled)
2262 return;
2263
2264 memset(names, 0, sizeof(names));
2265 memset(old_names, 0, sizeof(old_names));
2266
2267 if (ret)
2268 new = "?";
2269 else if (!actions_logged)
2270 new = "(none)";
2271 else if (!seccomp_names_from_actions_logged(names, sizeof(names),
2272 actions_logged, ","))
2273 new = "?";
2274
2275 if (!old_actions_logged)
2276 old = "(none)";
2277 else if (!seccomp_names_from_actions_logged(old_names,
2278 sizeof(old_names),
2279 old_actions_logged, ","))
2280 old = "?";
2281
2282 return audit_seccomp_actions_logged(new, old, !ret);
2283}
2284
Tyler Hicksd013db02018-05-04 01:08:12 +00002285static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002286 void *buffer, size_t *lenp,
Tyler Hicksd013db02018-05-04 01:08:12 +00002287 loff_t *ppos)
2288{
Tyler Hicksea6eca72018-05-04 01:08:14 +00002289 int ret;
2290
2291 if (write) {
2292 u32 actions_logged = 0;
2293 u32 old_actions_logged = seccomp_actions_logged;
2294
2295 ret = write_actions_logged(ro_table, buffer, lenp, ppos,
2296 &actions_logged);
2297 audit_actions_logged(actions_logged, old_actions_logged, ret);
2298 } else
2299 ret = read_actions_logged(ro_table, buffer, lenp, ppos);
2300
2301 return ret;
Tyler Hicksd013db02018-05-04 01:08:12 +00002302}
2303
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00002304static struct ctl_path seccomp_sysctl_path[] = {
2305 { .procname = "kernel", },
2306 { .procname = "seccomp", },
2307 { }
2308};
2309
2310static struct ctl_table seccomp_sysctl_table[] = {
2311 {
2312 .procname = "actions_avail",
2313 .data = (void *) &seccomp_actions_avail,
2314 .maxlen = sizeof(seccomp_actions_avail),
2315 .mode = 0444,
2316 .proc_handler = proc_dostring,
2317 },
Tyler Hicks0ddec0f2017-08-11 04:33:54 +00002318 {
2319 .procname = "actions_logged",
2320 .mode = 0644,
2321 .proc_handler = seccomp_actions_logged_handler,
2322 },
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00002323 { }
2324};
2325
2326static int __init seccomp_sysctl_init(void)
2327{
2328 struct ctl_table_header *hdr;
2329
2330 hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
2331 if (!hdr)
Kees Cooke68f9d42020-06-15 22:02:56 -07002332 pr_warn("sysctl registration failed\n");
Tyler Hicks8e5f1ad2017-08-11 04:33:52 +00002333 else
2334 kmemleak_not_leak(hdr);
2335
2336 return 0;
2337}
2338
2339device_initcall(seccomp_sysctl_init)
2340
2341#endif /* CONFIG_SYSCTL */
YiFei Zhu0d8315d2020-11-11 07:33:54 -06002342
2343#ifdef CONFIG_SECCOMP_CACHE_DEBUG
2344/* Currently CONFIG_SECCOMP_CACHE_DEBUG implies SECCOMP_ARCH_NATIVE */
2345static void proc_pid_seccomp_cache_arch(struct seq_file *m, const char *name,
2346 const void *bitmap, size_t bitmap_size)
2347{
2348 int nr;
2349
2350 for (nr = 0; nr < bitmap_size; nr++) {
2351 bool cached = test_bit(nr, bitmap);
2352 char *status = cached ? "ALLOW" : "FILTER";
2353
2354 seq_printf(m, "%s %d %s\n", name, nr, status);
2355 }
2356}
2357
2358int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
2359 struct pid *pid, struct task_struct *task)
2360{
2361 struct seccomp_filter *f;
2362 unsigned long flags;
2363
2364 /*
2365 * We don't want some sandboxed process to know what their seccomp
2366 * filters consist of.
2367 */
2368 if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
2369 return -EACCES;
2370
2371 if (!lock_task_sighand(task, &flags))
2372 return -ESRCH;
2373
2374 f = READ_ONCE(task->seccomp.filter);
2375 if (!f) {
2376 unlock_task_sighand(task, &flags);
2377 return 0;
2378 }
2379
2380 /* prevent filter from being freed while we are printing it */
2381 __get_seccomp_filter(f);
2382 unlock_task_sighand(task, &flags);
2383
2384 proc_pid_seccomp_cache_arch(m, SECCOMP_ARCH_NATIVE_NAME,
2385 f->cache.allow_native,
2386 SECCOMP_ARCH_NATIVE_NR);
2387
2388#ifdef SECCOMP_ARCH_COMPAT
2389 proc_pid_seccomp_cache_arch(m, SECCOMP_ARCH_COMPAT_NAME,
2390 f->cache.allow_compat,
2391 SECCOMP_ARCH_COMPAT_NR);
2392#endif /* SECCOMP_ARCH_COMPAT */
2393
2394 __put_seccomp_filter(f);
2395 return 0;
2396}
2397#endif /* CONFIG_SECCOMP_CACHE_DEBUG */