blob: 5a8d9f7467bf4d9836773f31ae2aa95c338d861d [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Daniel Borkmannb2197752015-10-29 14:58:09 +01002/*
3 * Minimal file system backend for holding eBPF maps and programs,
4 * used by bpf(2) object pinning.
5 *
6 * Authors:
7 *
8 * Daniel Borkmann <daniel@iogearbox.net>
Daniel Borkmannb2197752015-10-29 14:58:09 +01009 */
10
Paul Gortmakera536a6e2016-07-11 12:51:01 -040011#include <linux/init.h>
Daniel Borkmannb2197752015-10-29 14:58:09 +010012#include <linux/magic.h>
13#include <linux/major.h>
14#include <linux/mount.h>
15#include <linux/namei.h>
16#include <linux/fs.h>
David Howellsd2935de2019-03-22 14:58:36 +000017#include <linux/fs_context.h>
18#include <linux/fs_parser.h>
Daniel Borkmannb2197752015-10-29 14:58:09 +010019#include <linux/kdev_t.h>
20#include <linux/filter.h>
21#include <linux/bpf.h>
Daniel Borkmanna67edbf2017-01-25 02:28:18 +010022#include <linux/bpf_trace.h>
Alexei Starovoitovd71fa5c2020-08-18 21:27:58 -070023#include "preload/bpf_preload.h"
Daniel Borkmannb2197752015-10-29 14:58:09 +010024
25enum bpf_type {
26 BPF_TYPE_UNSPEC = 0,
27 BPF_TYPE_PROG,
28 BPF_TYPE_MAP,
Andrii Nakryiko70ed5062020-03-02 20:31:57 -080029 BPF_TYPE_LINK,
Daniel Borkmannb2197752015-10-29 14:58:09 +010030};
31
32static void *bpf_any_get(void *raw, enum bpf_type type)
33{
34 switch (type) {
35 case BPF_TYPE_PROG:
Andrii Nakryiko85192db2019-11-17 09:28:03 -080036 bpf_prog_inc(raw);
Daniel Borkmannb2197752015-10-29 14:58:09 +010037 break;
38 case BPF_TYPE_MAP:
Andrii Nakryiko1e0bd5a2019-11-17 09:28:02 -080039 bpf_map_inc_with_uref(raw);
Daniel Borkmannb2197752015-10-29 14:58:09 +010040 break;
Andrii Nakryiko70ed5062020-03-02 20:31:57 -080041 case BPF_TYPE_LINK:
42 bpf_link_inc(raw);
43 break;
Daniel Borkmannb2197752015-10-29 14:58:09 +010044 default:
45 WARN_ON_ONCE(1);
46 break;
47 }
48
49 return raw;
50}
51
52static void bpf_any_put(void *raw, enum bpf_type type)
53{
54 switch (type) {
55 case BPF_TYPE_PROG:
56 bpf_prog_put(raw);
57 break;
58 case BPF_TYPE_MAP:
Daniel Borkmannc9da1612015-11-24 21:28:15 +010059 bpf_map_put_with_uref(raw);
Daniel Borkmannb2197752015-10-29 14:58:09 +010060 break;
Andrii Nakryiko70ed5062020-03-02 20:31:57 -080061 case BPF_TYPE_LINK:
62 bpf_link_put(raw);
63 break;
Daniel Borkmannb2197752015-10-29 14:58:09 +010064 default:
65 WARN_ON_ONCE(1);
66 break;
67 }
68}
69
70static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
71{
72 void *raw;
73
Daniel Borkmannc9da1612015-11-24 21:28:15 +010074 raw = bpf_map_get_with_uref(ufd);
Andrii Nakryiko70ed5062020-03-02 20:31:57 -080075 if (!IS_ERR(raw)) {
76 *type = BPF_TYPE_MAP;
77 return raw;
Daniel Borkmannb2197752015-10-29 14:58:09 +010078 }
79
Andrii Nakryiko70ed5062020-03-02 20:31:57 -080080 raw = bpf_prog_get(ufd);
81 if (!IS_ERR(raw)) {
82 *type = BPF_TYPE_PROG;
83 return raw;
84 }
85
86 raw = bpf_link_get_from_fd(ufd);
87 if (!IS_ERR(raw)) {
88 *type = BPF_TYPE_LINK;
89 return raw;
90 }
91
92 return ERR_PTR(-EINVAL);
Daniel Borkmannb2197752015-10-29 14:58:09 +010093}
94
95static const struct inode_operations bpf_dir_iops;
96
97static const struct inode_operations bpf_prog_iops = { };
98static const struct inode_operations bpf_map_iops = { };
Andrii Nakryiko70ed5062020-03-02 20:31:57 -080099static const struct inode_operations bpf_link_iops = { };
Daniel Borkmannb2197752015-10-29 14:58:09 +0100100
101static struct inode *bpf_get_inode(struct super_block *sb,
102 const struct inode *dir,
103 umode_t mode)
104{
105 struct inode *inode;
106
107 switch (mode & S_IFMT) {
108 case S_IFDIR:
109 case S_IFREG:
Daniel Borkmann0f986212016-10-29 02:30:46 +0200110 case S_IFLNK:
Daniel Borkmannb2197752015-10-29 14:58:09 +0100111 break;
112 default:
113 return ERR_PTR(-EINVAL);
114 }
115
116 inode = new_inode(sb);
117 if (!inode)
118 return ERR_PTR(-ENOSPC);
119
120 inode->i_ino = get_next_ino();
Deepa Dinamani078cd822016-09-14 07:48:04 -0700121 inode->i_atime = current_time(inode);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100122 inode->i_mtime = inode->i_atime;
123 inode->i_ctime = inode->i_atime;
124
Christian Brauner21cb47b2021-01-21 14:19:25 +0100125 inode_init_owner(&init_user_ns, inode, dir, mode);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100126
127 return inode;
128}
129
130static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
131{
132 *type = BPF_TYPE_UNSPEC;
133 if (inode->i_op == &bpf_prog_iops)
134 *type = BPF_TYPE_PROG;
135 else if (inode->i_op == &bpf_map_iops)
136 *type = BPF_TYPE_MAP;
Andrii Nakryiko70ed5062020-03-02 20:31:57 -0800137 else if (inode->i_op == &bpf_link_iops)
138 *type = BPF_TYPE_LINK;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100139 else
140 return -EACCES;
141
142 return 0;
143}
144
Daniel Borkmann0f986212016-10-29 02:30:46 +0200145static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
146 struct inode *dir)
147{
148 d_instantiate(dentry, inode);
149 dget(dentry);
150
151 dir->i_mtime = current_time(dir);
152 dir->i_ctime = dir->i_mtime;
153}
154
Christian Brauner549c7292021-01-21 14:19:43 +0100155static int bpf_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
156 struct dentry *dentry, umode_t mode)
Daniel Borkmannb2197752015-10-29 14:58:09 +0100157{
158 struct inode *inode;
159
Daniel Borkmannb2197752015-10-29 14:58:09 +0100160 inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
161 if (IS_ERR(inode))
162 return PTR_ERR(inode);
163
164 inode->i_op = &bpf_dir_iops;
165 inode->i_fop = &simple_dir_operations;
166
167 inc_nlink(inode);
168 inc_nlink(dir);
169
Daniel Borkmann0f986212016-10-29 02:30:46 +0200170 bpf_dentry_finalize(dentry, inode, dir);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100171 return 0;
172}
173
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700174struct map_iter {
175 void *key;
176 bool done;
177};
178
179static struct map_iter *map_iter(struct seq_file *m)
180{
181 return m->private;
182}
183
184static struct bpf_map *seq_file_to_map(struct seq_file *m)
185{
186 return file_inode(m->file)->i_private;
187}
188
189static void map_iter_free(struct map_iter *iter)
190{
191 if (iter) {
192 kfree(iter->key);
193 kfree(iter);
194 }
195}
196
197static struct map_iter *map_iter_alloc(struct bpf_map *map)
198{
199 struct map_iter *iter;
200
201 iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
202 if (!iter)
203 goto error;
204
205 iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
206 if (!iter->key)
207 goto error;
208
209 return iter;
210
211error:
212 map_iter_free(iter);
213 return NULL;
214}
215
216static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
217{
218 struct bpf_map *map = seq_file_to_map(m);
219 void *key = map_iter(m)->key;
Yonghong Songdc1508a2018-08-09 08:55:19 -0700220 void *prev_key;
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700221
Vasily Averin90435a72020-01-25 12:10:02 +0300222 (*pos)++;
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700223 if (map_iter(m)->done)
224 return NULL;
225
226 if (unlikely(v == SEQ_START_TOKEN))
Yonghong Songdc1508a2018-08-09 08:55:19 -0700227 prev_key = NULL;
228 else
229 prev_key = key;
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700230
Yonghong Songce880cb822020-09-15 17:44:01 -0700231 rcu_read_lock();
Yonghong Songdc1508a2018-08-09 08:55:19 -0700232 if (map->ops->map_get_next_key(map, prev_key, key)) {
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700233 map_iter(m)->done = true;
Yonghong Songce880cb822020-09-15 17:44:01 -0700234 key = NULL;
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700235 }
Yonghong Songce880cb822020-09-15 17:44:01 -0700236 rcu_read_unlock();
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700237 return key;
238}
239
240static void *map_seq_start(struct seq_file *m, loff_t *pos)
241{
242 if (map_iter(m)->done)
243 return NULL;
244
245 return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
246}
247
248static void map_seq_stop(struct seq_file *m, void *v)
249{
250}
251
252static int map_seq_show(struct seq_file *m, void *v)
253{
254 struct bpf_map *map = seq_file_to_map(m);
255 void *key = map_iter(m)->key;
256
257 if (unlikely(v == SEQ_START_TOKEN)) {
258 seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
259 seq_puts(m, "# WARNING!! The output format will change\n");
260 } else {
261 map->ops->map_seq_show_elem(map, key, m);
262 }
263
264 return 0;
265}
266
267static const struct seq_operations bpffs_map_seq_ops = {
268 .start = map_seq_start,
269 .next = map_seq_next,
270 .show = map_seq_show,
271 .stop = map_seq_stop,
272};
273
274static int bpffs_map_open(struct inode *inode, struct file *file)
275{
276 struct bpf_map *map = inode->i_private;
277 struct map_iter *iter;
278 struct seq_file *m;
279 int err;
280
281 iter = map_iter_alloc(map);
282 if (!iter)
283 return -ENOMEM;
284
285 err = seq_open(file, &bpffs_map_seq_ops);
286 if (err) {
287 map_iter_free(iter);
288 return err;
289 }
290
291 m = file->private_data;
292 m->private = iter;
293
294 return 0;
295}
296
297static int bpffs_map_release(struct inode *inode, struct file *file)
298{
299 struct seq_file *m = file->private_data;
300
301 map_iter_free(map_iter(m));
302
303 return seq_release(inode, file);
304}
305
306/* bpffs_map_fops should only implement the basic
307 * read operation for a BPF map. The purpose is to
308 * provide a simple user intuitive way to do
309 * "cat bpffs/pathto/a-pinned-map".
310 *
311 * Other operations (e.g. write, lookup...) should be realized by
312 * the userspace tools (e.g. bpftool) through the
313 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
314 * interface.
315 */
316static const struct file_operations bpffs_map_fops = {
317 .open = bpffs_map_open,
318 .read = seq_read,
319 .release = bpffs_map_release,
320};
321
Daniel Borkmannb1655852018-06-08 18:10:34 +0200322static int bpffs_obj_open(struct inode *inode, struct file *file)
323{
324 return -EIO;
325}
326
327static const struct file_operations bpffs_obj_fops = {
328 .open = bpffs_obj_open,
329};
330
Al Viroa4a06832017-12-01 17:22:19 -0500331static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700332 const struct inode_operations *iops,
333 const struct file_operations *fops)
Daniel Borkmannb2197752015-10-29 14:58:09 +0100334{
Al Viroa4a06832017-12-01 17:22:19 -0500335 struct inode *dir = dentry->d_parent->d_inode;
336 struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100337 if (IS_ERR(inode))
338 return PTR_ERR(inode);
339
340 inode->i_op = iops;
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700341 inode->i_fop = fops;
Al Viroa4a06832017-12-01 17:22:19 -0500342 inode->i_private = raw;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100343
Daniel Borkmann0f986212016-10-29 02:30:46 +0200344 bpf_dentry_finalize(dentry, inode, dir);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100345 return 0;
346}
347
Al Viroa4a06832017-12-01 17:22:19 -0500348static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
Daniel Borkmannb2197752015-10-29 14:58:09 +0100349{
Daniel Borkmannb1655852018-06-08 18:10:34 +0200350 return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
351 &bpffs_obj_fops);
Al Viroa4a06832017-12-01 17:22:19 -0500352}
Daniel Borkmannb2197752015-10-29 14:58:09 +0100353
Al Viroa4a06832017-12-01 17:22:19 -0500354static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
355{
Martin KaFai Laua26ca7c2018-04-18 15:56:03 -0700356 struct bpf_map *map = arg;
357
358 return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
Daniel Borkmanne8d2bec2018-08-12 01:59:17 +0200359 bpf_map_support_seq_show(map) ?
360 &bpffs_map_fops : &bpffs_obj_fops);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100361}
362
Andrii Nakryiko70ed5062020-03-02 20:31:57 -0800363static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
364{
Yonghong Song367ec3e2020-05-09 10:59:06 -0700365 struct bpf_link *link = arg;
366
Andrii Nakryiko70ed5062020-03-02 20:31:57 -0800367 return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
Yonghong Song367ec3e2020-05-09 10:59:06 -0700368 bpf_link_is_iter(link) ?
369 &bpf_iter_fops : &bpffs_obj_fops);
Andrii Nakryiko70ed5062020-03-02 20:31:57 -0800370}
371
Al Viro0c93b7d2016-03-25 12:06:51 -0400372static struct dentry *
373bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
Daniel Borkmannbb35a6e2015-12-10 22:33:49 +0100374{
Quentin Monnet6d8cb042018-03-08 23:46:33 -0800375 /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
Alexei Starovoitovd71fa5c2020-08-18 21:27:58 -0700376 * extensions. That allows popoulate_bpffs() create special files.
Quentin Monnet6d8cb042018-03-08 23:46:33 -0800377 */
Alexei Starovoitovd71fa5c2020-08-18 21:27:58 -0700378 if ((dir->i_mode & S_IALLUGO) &&
379 strchr(dentry->d_name.name, '.'))
Al Viro0c93b7d2016-03-25 12:06:51 -0400380 return ERR_PTR(-EPERM);
Daniel Borkmann0f986212016-10-29 02:30:46 +0200381
Al Viro0c93b7d2016-03-25 12:06:51 -0400382 return simple_lookup(dir, dentry, flags);
Daniel Borkmannbb35a6e2015-12-10 22:33:49 +0100383}
384
Christian Brauner549c7292021-01-21 14:19:43 +0100385static int bpf_symlink(struct user_namespace *mnt_userns, struct inode *dir,
386 struct dentry *dentry, const char *target)
Daniel Borkmann0f986212016-10-29 02:30:46 +0200387{
388 char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
389 struct inode *inode;
390
391 if (!link)
392 return -ENOMEM;
393
394 inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
395 if (IS_ERR(inode)) {
396 kfree(link);
397 return PTR_ERR(inode);
398 }
399
400 inode->i_op = &simple_symlink_inode_operations;
401 inode->i_link = link;
402
403 bpf_dentry_finalize(dentry, inode, dir);
404 return 0;
405}
406
Daniel Borkmannb2197752015-10-29 14:58:09 +0100407static const struct inode_operations bpf_dir_iops = {
Al Viro0c93b7d2016-03-25 12:06:51 -0400408 .lookup = bpf_lookup,
Daniel Borkmannb2197752015-10-29 14:58:09 +0100409 .mkdir = bpf_mkdir,
Daniel Borkmann0f986212016-10-29 02:30:46 +0200410 .symlink = bpf_symlink,
Daniel Borkmannb2197752015-10-29 14:58:09 +0100411 .rmdir = simple_rmdir,
Al Viro0c93b7d2016-03-25 12:06:51 -0400412 .rename = simple_rename,
413 .link = simple_link,
Daniel Borkmannb2197752015-10-29 14:58:09 +0100414 .unlink = simple_unlink,
415};
416
Alexei Starovoitovd71fa5c2020-08-18 21:27:58 -0700417/* pin iterator link into bpffs */
418static int bpf_iter_link_pin_kernel(struct dentry *parent,
419 const char *name, struct bpf_link *link)
420{
421 umode_t mode = S_IFREG | S_IRUSR;
422 struct dentry *dentry;
423 int ret;
424
425 inode_lock(parent->d_inode);
426 dentry = lookup_one_len(name, parent, strlen(name));
427 if (IS_ERR(dentry)) {
428 inode_unlock(parent->d_inode);
429 return PTR_ERR(dentry);
430 }
431 ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
432 &bpf_iter_fops);
433 dput(dentry);
434 inode_unlock(parent->d_inode);
435 return ret;
436}
437
Al Virob87121d2020-01-20 23:28:58 +0000438static int bpf_obj_do_pin(const char __user *pathname, void *raw,
Daniel Borkmannb2197752015-10-29 14:58:09 +0100439 enum bpf_type type)
440{
441 struct dentry *dentry;
442 struct inode *dir;
443 struct path path;
444 umode_t mode;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100445 int ret;
446
Al Virob87121d2020-01-20 23:28:58 +0000447 dentry = user_path_create(AT_FDCWD, pathname, &path, 0);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100448 if (IS_ERR(dentry))
449 return PTR_ERR(dentry);
450
451 mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
Daniel Borkmannb2197752015-10-29 14:58:09 +0100452
Al Viroa4a06832017-12-01 17:22:19 -0500453 ret = security_path_mknod(&path, dentry, mode, 0);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100454 if (ret)
455 goto out;
456
457 dir = d_inode(path.dentry);
458 if (dir->i_op != &bpf_dir_iops) {
459 ret = -EPERM;
460 goto out;
461 }
462
Al Viroa4a06832017-12-01 17:22:19 -0500463 switch (type) {
464 case BPF_TYPE_PROG:
465 ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
466 break;
467 case BPF_TYPE_MAP:
468 ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
469 break;
Andrii Nakryiko70ed5062020-03-02 20:31:57 -0800470 case BPF_TYPE_LINK:
471 ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
472 break;
Al Viroa4a06832017-12-01 17:22:19 -0500473 default:
474 ret = -EPERM;
475 }
Daniel Borkmannb2197752015-10-29 14:58:09 +0100476out:
477 done_path_create(&path, dentry);
478 return ret;
479}
480
481int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
482{
Daniel Borkmannb2197752015-10-29 14:58:09 +0100483 enum bpf_type type;
484 void *raw;
485 int ret;
486
Daniel Borkmannb2197752015-10-29 14:58:09 +0100487 raw = bpf_fd_probe_obj(ufd, &type);
Al Virob87121d2020-01-20 23:28:58 +0000488 if (IS_ERR(raw))
489 return PTR_ERR(raw);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100490
Al Virob87121d2020-01-20 23:28:58 +0000491 ret = bpf_obj_do_pin(pathname, raw, type);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100492 if (ret != 0)
493 bpf_any_put(raw, type);
Al Virob87121d2020-01-20 23:28:58 +0000494
Daniel Borkmannb2197752015-10-29 14:58:09 +0100495 return ret;
496}
497
Al Virob87121d2020-01-20 23:28:58 +0000498static void *bpf_obj_do_get(const char __user *pathname,
Chenbo Feng6e71b042017-10-18 13:00:22 -0700499 enum bpf_type *type, int flags)
Daniel Borkmannb2197752015-10-29 14:58:09 +0100500{
501 struct inode *inode;
502 struct path path;
503 void *raw;
504 int ret;
505
Al Virob87121d2020-01-20 23:28:58 +0000506 ret = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW, &path);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100507 if (ret)
508 return ERR_PTR(ret);
509
510 inode = d_backing_inode(path.dentry);
Christian Brauner02f92b32021-01-21 14:19:22 +0100511 ret = path_permission(&path, ACC_MODE(flags));
Daniel Borkmannb2197752015-10-29 14:58:09 +0100512 if (ret)
513 goto out;
514
515 ret = bpf_inode_type(inode, type);
516 if (ret)
517 goto out;
518
519 raw = bpf_any_get(inode->i_private, *type);
Alexei Starovoitov92117d82016-04-27 18:56:20 -0700520 if (!IS_ERR(raw))
521 touch_atime(&path);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100522
523 path_put(&path);
524 return raw;
525out:
526 path_put(&path);
527 return ERR_PTR(ret);
528}
529
Chenbo Feng6e71b042017-10-18 13:00:22 -0700530int bpf_obj_get_user(const char __user *pathname, int flags)
Daniel Borkmannb2197752015-10-29 14:58:09 +0100531{
532 enum bpf_type type = BPF_TYPE_UNSPEC;
Chenbo Feng6e71b042017-10-18 13:00:22 -0700533 int f_flags;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100534 void *raw;
Al Virob87121d2020-01-20 23:28:58 +0000535 int ret;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100536
Chenbo Feng6e71b042017-10-18 13:00:22 -0700537 f_flags = bpf_get_file_flag(flags);
538 if (f_flags < 0)
539 return f_flags;
540
Al Virob87121d2020-01-20 23:28:58 +0000541 raw = bpf_obj_do_get(pathname, &type, f_flags);
542 if (IS_ERR(raw))
543 return PTR_ERR(raw);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100544
545 if (type == BPF_TYPE_PROG)
Maciej Żenczykowski5dec6d92021-06-18 03:55:26 -0700546 ret = bpf_prog_new_fd(raw);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100547 else if (type == BPF_TYPE_MAP)
Chenbo Feng6e71b042017-10-18 13:00:22 -0700548 ret = bpf_map_new_fd(raw, f_flags);
Andrii Nakryiko70ed5062020-03-02 20:31:57 -0800549 else if (type == BPF_TYPE_LINK)
Lorenz Bauer25fc94b2021-03-26 16:05:00 +0000550 ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100551 else
Al Virob87121d2020-01-20 23:28:58 +0000552 return -ENOENT;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100553
Alexei Starovoitov4d220ed2018-04-28 19:56:37 -0700554 if (ret < 0)
Daniel Borkmannb2197752015-10-29 14:58:09 +0100555 bpf_any_put(raw, type);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100556 return ret;
557}
Al Viro040ee692017-12-02 20:20:38 -0500558
559static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
560{
561 struct bpf_prog *prog;
Christian Brauner47291ba2021-01-21 14:19:24 +0100562 int ret = inode_permission(&init_user_ns, inode, MAY_READ);
Al Viro040ee692017-12-02 20:20:38 -0500563 if (ret)
564 return ERR_PTR(ret);
565
566 if (inode->i_op == &bpf_map_iops)
567 return ERR_PTR(-EINVAL);
Andrii Nakryiko70ed5062020-03-02 20:31:57 -0800568 if (inode->i_op == &bpf_link_iops)
569 return ERR_PTR(-EINVAL);
Al Viro040ee692017-12-02 20:20:38 -0500570 if (inode->i_op != &bpf_prog_iops)
571 return ERR_PTR(-EACCES);
572
573 prog = inode->i_private;
574
575 ret = security_bpf_prog(prog);
576 if (ret < 0)
577 return ERR_PTR(ret);
578
579 if (!bpf_prog_get_ok(prog, &type, false))
580 return ERR_PTR(-EINVAL);
581
Andrii Nakryiko85192db2019-11-17 09:28:03 -0800582 bpf_prog_inc(prog);
583 return prog;
Al Viro040ee692017-12-02 20:20:38 -0500584}
585
586struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
587{
588 struct bpf_prog *prog;
589 struct path path;
590 int ret = kern_path(name, LOOKUP_FOLLOW, &path);
591 if (ret)
592 return ERR_PTR(ret);
593 prog = __get_prog_inode(d_backing_inode(path.dentry), type);
594 if (!IS_ERR(prog))
595 touch_atime(&path);
596 path_put(&path);
597 return prog;
598}
599EXPORT_SYMBOL(bpf_prog_get_type_path);
Daniel Borkmannb2197752015-10-29 14:58:09 +0100600
David Howells4cc7c182017-07-05 16:24:49 +0100601/*
602 * Display the mount options in /proc/mounts.
603 */
604static int bpf_show_options(struct seq_file *m, struct dentry *root)
605{
606 umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
607
608 if (mode != S_IRWXUGO)
609 seq_printf(m, ",mode=%o", mode);
610 return 0;
611}
612
Al Viro524845f2019-04-15 22:31:29 -0400613static void bpf_free_inode(struct inode *inode)
Daniel Borkmann1da6c4d2019-03-25 15:54:43 +0100614{
Daniel Borkmann1da6c4d2019-03-25 15:54:43 +0100615 enum bpf_type type;
616
617 if (S_ISLNK(inode->i_mode))
618 kfree(inode->i_link);
619 if (!bpf_inode_type(inode, &type))
620 bpf_any_put(inode->i_private, type);
621 free_inode_nonrcu(inode);
622}
623
Daniel Borkmannb2197752015-10-29 14:58:09 +0100624static const struct super_operations bpf_super_ops = {
625 .statfs = simple_statfs,
626 .drop_inode = generic_delete_inode,
David Howells4cc7c182017-07-05 16:24:49 +0100627 .show_options = bpf_show_options,
Al Viro524845f2019-04-15 22:31:29 -0400628 .free_inode = bpf_free_inode,
Daniel Borkmannb2197752015-10-29 14:58:09 +0100629};
630
Daniel Borkmanna3af5f82016-11-26 01:28:08 +0100631enum {
632 OPT_MODE,
Daniel Borkmanna3af5f82016-11-26 01:28:08 +0100633};
634
Al Virod7167b12019-09-07 07:23:15 -0400635static const struct fs_parameter_spec bpf_fs_parameters[] = {
David Howellsd2935de2019-03-22 14:58:36 +0000636 fsparam_u32oct ("mode", OPT_MODE),
637 {}
638};
639
Daniel Borkmanna3af5f82016-11-26 01:28:08 +0100640struct bpf_mount_opts {
641 umode_t mode;
642};
643
David Howellsd2935de2019-03-22 14:58:36 +0000644static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
Daniel Borkmanna3af5f82016-11-26 01:28:08 +0100645{
David Howellsd2935de2019-03-22 14:58:36 +0000646 struct bpf_mount_opts *opts = fc->fs_private;
647 struct fs_parse_result result;
648 int opt;
Daniel Borkmanna3af5f82016-11-26 01:28:08 +0100649
Al Virod7167b12019-09-07 07:23:15 -0400650 opt = fs_parse(fc, bpf_fs_parameters, param, &result);
Yafang Shao1e9d7462022-01-08 13:46:23 +0000651 if (opt < 0) {
Daniel Borkmanna3af5f82016-11-26 01:28:08 +0100652 /* We might like to report bad mount options here, but
653 * traditionally we've ignored all mount options, so we'd
654 * better continue to ignore non-existing options for bpf.
655 */
Yafang Shao1e9d7462022-01-08 13:46:23 +0000656 if (opt == -ENOPARAM) {
657 opt = vfs_parse_fs_param_source(fc, param);
658 if (opt != -ENOPARAM)
659 return opt;
660
661 return 0;
662 }
663
664 if (opt < 0)
665 return opt;
666 }
David Howellsd2935de2019-03-22 14:58:36 +0000667
668 switch (opt) {
669 case OPT_MODE:
670 opts->mode = result.uint_32 & S_IALLUGO;
671 break;
Daniel Borkmanna3af5f82016-11-26 01:28:08 +0100672 }
673
674 return 0;
675}
676
Alexei Starovoitovd71fa5c2020-08-18 21:27:58 -0700677struct bpf_preload_ops *bpf_preload_ops;
678EXPORT_SYMBOL_GPL(bpf_preload_ops);
679
680static bool bpf_preload_mod_get(void)
681{
682 /* If bpf_preload.ko wasn't loaded earlier then load it now.
683 * When bpf_preload is built into vmlinux the module's __init
684 * function will populate it.
685 */
686 if (!bpf_preload_ops) {
687 request_module("bpf_preload");
688 if (!bpf_preload_ops)
689 return false;
690 }
691 /* And grab the reference, so the module doesn't disappear while the
692 * kernel is interacting with the kernel module and its UMD.
693 */
694 if (!try_module_get(bpf_preload_ops->owner)) {
695 pr_err("bpf_preload module get failed.\n");
696 return false;
697 }
698 return true;
699}
700
701static void bpf_preload_mod_put(void)
702{
703 if (bpf_preload_ops)
704 /* now user can "rmmod bpf_preload" if necessary */
705 module_put(bpf_preload_ops->owner);
706}
707
708static DEFINE_MUTEX(bpf_preload_lock);
709
710static int populate_bpffs(struct dentry *parent)
711{
712 struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
713 struct bpf_link *links[BPF_PRELOAD_LINKS] = {};
714 int err = 0, i;
715
716 /* grab the mutex to make sure the kernel interactions with bpf_preload
717 * UMD are serialized
718 */
719 mutex_lock(&bpf_preload_lock);
720
721 /* if bpf_preload.ko wasn't built into vmlinux then load it */
722 if (!bpf_preload_mod_get())
723 goto out;
724
725 if (!bpf_preload_ops->info.tgid) {
726 /* preload() will start UMD that will load BPF iterator programs */
727 err = bpf_preload_ops->preload(objs);
728 if (err)
729 goto out_put;
730 for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
731 links[i] = bpf_link_by_id(objs[i].link_id);
732 if (IS_ERR(links[i])) {
733 err = PTR_ERR(links[i]);
734 goto out_put;
735 }
736 }
737 for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
738 err = bpf_iter_link_pin_kernel(parent,
739 objs[i].link_name, links[i]);
740 if (err)
741 goto out_put;
742 /* do not unlink successfully pinned links even
743 * if later link fails to pin
744 */
745 links[i] = NULL;
746 }
747 /* finish() will tell UMD process to exit */
748 err = bpf_preload_ops->finish();
749 if (err)
750 goto out_put;
751 }
752out_put:
753 bpf_preload_mod_put();
754out:
755 mutex_unlock(&bpf_preload_lock);
756 for (i = 0; i < BPF_PRELOAD_LINKS && err; i++)
757 if (!IS_ERR_OR_NULL(links[i]))
758 bpf_link_put(links[i]);
759 return err;
760}
761
David Howellsd2935de2019-03-22 14:58:36 +0000762static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
Daniel Borkmannb2197752015-10-29 14:58:09 +0100763{
Eric Biggerscda37122017-03-25 21:15:37 -0700764 static const struct tree_descr bpf_rfiles[] = { { "" } };
David Howellsd2935de2019-03-22 14:58:36 +0000765 struct bpf_mount_opts *opts = fc->fs_private;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100766 struct inode *inode;
767 int ret;
768
769 ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
770 if (ret)
771 return ret;
772
773 sb->s_op = &bpf_super_ops;
774
775 inode = sb->s_root->d_inode;
776 inode->i_op = &bpf_dir_iops;
777 inode->i_mode &= ~S_IALLUGO;
Alexei Starovoitovd71fa5c2020-08-18 21:27:58 -0700778 populate_bpffs(sb->s_root);
David Howellsd2935de2019-03-22 14:58:36 +0000779 inode->i_mode |= S_ISVTX | opts->mode;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100780 return 0;
781}
782
David Howellsd2935de2019-03-22 14:58:36 +0000783static int bpf_get_tree(struct fs_context *fc)
Daniel Borkmannb2197752015-10-29 14:58:09 +0100784{
David Howellsd2935de2019-03-22 14:58:36 +0000785 return get_tree_nodev(fc, bpf_fill_super);
786}
787
788static void bpf_free_fc(struct fs_context *fc)
789{
790 kfree(fc->fs_private);
791}
792
793static const struct fs_context_operations bpf_context_ops = {
794 .free = bpf_free_fc,
795 .parse_param = bpf_parse_param,
796 .get_tree = bpf_get_tree,
797};
798
799/*
800 * Set up the filesystem mount context.
801 */
802static int bpf_init_fs_context(struct fs_context *fc)
803{
804 struct bpf_mount_opts *opts;
805
806 opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
807 if (!opts)
808 return -ENOMEM;
809
810 opts->mode = S_IRWXUGO;
811
812 fc->fs_private = opts;
813 fc->ops = &bpf_context_ops;
814 return 0;
Daniel Borkmannb2197752015-10-29 14:58:09 +0100815}
816
817static struct file_system_type bpf_fs_type = {
818 .owner = THIS_MODULE,
819 .name = "bpf",
David Howellsd2935de2019-03-22 14:58:36 +0000820 .init_fs_context = bpf_init_fs_context,
Al Virod7167b12019-09-07 07:23:15 -0400821 .parameters = bpf_fs_parameters,
Daniel Borkmannb2197752015-10-29 14:58:09 +0100822 .kill_sb = kill_litter_super,
Daniel Borkmannb2197752015-10-29 14:58:09 +0100823};
824
Daniel Borkmannb2197752015-10-29 14:58:09 +0100825static int __init bpf_init(void)
826{
827 int ret;
828
Daniel Borkmannb2197752015-10-29 14:58:09 +0100829 ret = sysfs_create_mount_point(fs_kobj, "bpf");
830 if (ret)
831 return ret;
832
833 ret = register_filesystem(&bpf_fs_type);
834 if (ret)
835 sysfs_remove_mount_point(fs_kobj, "bpf");
836
837 return ret;
838}
839fs_initcall(bpf_init);