blob: df19a847829e8a14246070f1f9ff658f095c8877 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +01002/*
3 * Berkeley Packet Filter based traffic classifier
4 *
5 * Might be used to classify traffic through flexible, user-defined and
6 * possibly JIT-ed BPF filters for traffic control as an alternative to
7 * ematches.
8 *
9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010010 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/skbuff.h>
15#include <linux/filter.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010016#include <linux/bpf.h>
Cong Wang76cf5462017-09-25 10:13:49 -070017#include <linux/idr.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010018
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010019#include <net/rtnetlink.h>
20#include <net/pkt_cls.h>
21#include <net/sock.h>
22
23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25MODULE_DESCRIPTION("TC BPF based classifier");
26
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010027#define CLS_BPF_NAME_LEN 256
Jakub Kicinski0d01d452016-09-21 11:43:54 +010028#define CLS_BPF_SUPPORTED_GEN_FLAGS \
Jakub Kicinskieadb4142016-09-21 11:43:55 +010029 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010030
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010031struct cls_bpf_head {
32 struct list_head plist;
Cong Wang76cf5462017-09-25 10:13:49 -070033 struct idr handle_idr;
John Fastabend1f947bf2014-09-12 20:10:24 -070034 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010035};
36
37struct cls_bpf_prog {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070038 struct bpf_prog *filter;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010039 struct list_head link;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010040 struct tcf_result res;
Daniel Borkmann045efa82015-09-15 23:05:42 -070041 bool exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +010042 u32 gen_flags;
John Hurley7e916b72018-06-25 14:30:09 -070043 unsigned int in_hw_count;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010044 struct tcf_exts exts;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010045 u32 handle;
Daniel Borkmann55556dd2016-11-26 01:28:05 +010046 u16 bpf_num_ops;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010047 struct sock_filter *bpf_ops;
48 const char *bpf_name;
John Fastabend1f947bf2014-09-12 20:10:24 -070049 struct tcf_proto *tp;
Cong Wangaaa908f2018-05-23 15:26:53 -070050 struct rcu_work rwork;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010051};
52
53static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
54 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
Daniel Borkmann045efa82015-09-15 23:05:42 -070055 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
Jakub Kicinski0d01d452016-09-21 11:43:54 +010056 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010057 [TCA_BPF_FD] = { .type = NLA_U32 },
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -040058 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
59 .len = CLS_BPF_NAME_LEN },
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010060 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
61 [TCA_BPF_OPS] = { .type = NLA_BINARY,
62 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
63};
64
Daniel Borkmann045efa82015-09-15 23:05:42 -070065static int cls_bpf_exec_opcode(int code)
66{
67 switch (code) {
68 case TC_ACT_OK:
Daniel Borkmann045efa82015-09-15 23:05:42 -070069 case TC_ACT_SHOT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070070 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +020071 case TC_ACT_TRAP:
Alexei Starovoitov27b29f62015-09-15 23:05:43 -070072 case TC_ACT_REDIRECT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070073 case TC_ACT_UNSPEC:
74 return code;
75 default:
76 return TC_ACT_UNSPEC;
77 }
78}
79
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010080static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
81 struct tcf_result *res)
82{
WANG Cong80dcbd12014-09-15 14:21:50 -070083 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
Daniel Borkmannfdc54322016-01-07 15:50:22 +010084 bool at_ingress = skb_at_tc_ingress(skb);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010085 struct cls_bpf_prog *prog;
Daniel Borkmann54720df2015-03-12 20:03:12 +010086 int ret = -1;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010087
John Fastabend1f947bf2014-09-12 20:10:24 -070088 list_for_each_entry_rcu(prog, &head->plist, link) {
Alexei Starovoitov34312052015-06-04 10:11:53 -070089 int filter_res;
90
Daniel Borkmann045efa82015-09-15 23:05:42 -070091 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
92
Jakub Kicinskieadb4142016-09-21 11:43:55 +010093 if (tc_skip_sw(prog->gen_flags)) {
94 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
95 } else if (at_ingress) {
Alexei Starovoitov34312052015-06-04 10:11:53 -070096 /* It is safe to push/pull even if skb_shared() */
97 __skb_push(skb, skb->mac_len);
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +020098 bpf_compute_data_pointers(skb);
Andrii Nakryikofb7dd8b2021-08-15 00:05:54 -070099 filter_res = bpf_prog_run(prog->filter, skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700100 __skb_pull(skb, skb->mac_len);
101 } else {
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200102 bpf_compute_data_pointers(skb);
Andrii Nakryikofb7dd8b2021-08-15 00:05:54 -0700103 filter_res = bpf_prog_run(prog->filter, skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700104 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100105
Daniel Borkmann045efa82015-09-15 23:05:42 -0700106 if (prog->exts_integrated) {
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100107 res->class = 0;
108 res->classid = TC_H_MAJ(prog->res.classid) |
109 qdisc_skb_cb(skb)->tc_classid;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700110
111 ret = cls_bpf_exec_opcode(filter_res);
112 if (ret == TC_ACT_UNSPEC)
113 continue;
114 break;
115 }
116
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100117 if (filter_res == 0)
118 continue;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100119 if (filter_res != -1) {
120 res->class = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100121 res->classid = filter_res;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100122 } else {
123 *res = prog->res;
124 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100125
126 ret = tcf_exts_exec(skb, &prog->exts, res);
127 if (ret < 0)
128 continue;
129
Daniel Borkmann54720df2015-03-12 20:03:12 +0100130 break;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100131 }
132
Daniel Borkmann54720df2015-03-12 20:03:12 +0100133 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100134}
135
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100136static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
137{
138 return !prog->bpf_ops;
139}
140
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100141static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
Quentin Monnet631f65f2018-01-19 17:44:46 -0800142 struct cls_bpf_prog *oldprog,
143 struct netlink_ext_ack *extack)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100144{
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200145 struct tcf_block *block = tp->chain->block;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200146 struct tc_cls_bpf_offload cls_bpf = {};
Jakub Kicinski102740b2017-12-19 13:32:13 -0800147 struct cls_bpf_prog *obj;
148 bool skip_sw;
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200149 int err;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100150
Jakub Kicinski102740b2017-12-19 13:32:13 -0800151 skip_sw = prog && tc_skip_sw(prog->gen_flags);
152 obj = prog ?: oldprog;
153
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700154 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800155 cls_bpf.command = TC_CLSBPF_OFFLOAD;
156 cls_bpf.exts = &obj->exts;
157 cls_bpf.prog = prog ? prog->filter : NULL;
158 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
159 cls_bpf.name = obj->bpf_name;
160 cls_bpf.exts_integrated = obj->exts_integrated;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100161
Jakub Kicinski41aa29a2019-10-31 20:06:59 -0700162 if (oldprog && prog)
Vlad Buslov40119212019-08-26 16:44:59 +0300163 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
164 skip_sw, &oldprog->gen_flags,
165 &oldprog->in_hw_count,
166 &prog->gen_flags, &prog->in_hw_count,
167 true);
Jakub Kicinski41aa29a2019-10-31 20:06:59 -0700168 else if (prog)
Vlad Buslov40119212019-08-26 16:44:59 +0300169 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
170 skip_sw, &prog->gen_flags,
171 &prog->in_hw_count, true);
Jakub Kicinski41aa29a2019-10-31 20:06:59 -0700172 else
173 err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
174 skip_sw, &oldprog->gen_flags,
175 &oldprog->in_hw_count, true);
Jiri Pirkocaa72602018-01-17 11:46:50 +0100176
Vlad Buslov40119212019-08-26 16:44:59 +0300177 if (prog && err) {
178 cls_bpf_offload_cmd(tp, oldprog, prog, extack);
179 return err;
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200180 }
181
Jakub Kicinski102740b2017-12-19 13:32:13 -0800182 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200183 return -EINVAL;
184
185 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100186}
187
Daniel Borkmannad9294d2018-01-17 22:36:49 +0100188static u32 cls_bpf_flags(u32 flags)
189{
190 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
191}
192
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100193static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
Quentin Monnet631f65f2018-01-19 17:44:46 -0800194 struct cls_bpf_prog *oldprog,
195 struct netlink_ext_ack *extack)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100196{
Daniel Borkmannad9294d2018-01-17 22:36:49 +0100197 if (prog && oldprog &&
198 cls_bpf_flags(prog->gen_flags) !=
199 cls_bpf_flags(oldprog->gen_flags))
Jakub Kicinski102740b2017-12-19 13:32:13 -0800200 return -EINVAL;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100201
Jakub Kicinski102740b2017-12-19 13:32:13 -0800202 if (prog && tc_skip_hw(prog->gen_flags))
203 prog = NULL;
204 if (oldprog && tc_skip_hw(oldprog->gen_flags))
205 oldprog = NULL;
206 if (!prog && !oldprog)
207 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100208
Quentin Monnet631f65f2018-01-19 17:44:46 -0800209 return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100210}
211
212static void cls_bpf_stop_offload(struct tcf_proto *tp,
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800213 struct cls_bpf_prog *prog,
214 struct netlink_ext_ack *extack)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100215{
216 int err;
217
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800218 err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800219 if (err)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100220 pr_err("Stopping hardware offload failed: %d\n", err);
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100221}
222
Jakub Kicinski68d64062016-09-21 11:44:02 +0100223static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
224 struct cls_bpf_prog *prog)
225{
Jakub Kicinski102740b2017-12-19 13:32:13 -0800226 struct tcf_block *block = tp->chain->block;
227 struct tc_cls_bpf_offload cls_bpf = {};
Jakub Kicinski68d64062016-09-21 11:44:02 +0100228
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700229 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800230 cls_bpf.command = TC_CLSBPF_STATS;
231 cls_bpf.exts = &prog->exts;
232 cls_bpf.prog = prog->filter;
233 cls_bpf.name = prog->bpf_name;
234 cls_bpf.exts_integrated = prog->exts_integrated;
Jakub Kicinski102740b2017-12-19 13:32:13 -0800235
Vlad Buslov40119212019-08-26 16:44:59 +0300236 tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
Jakub Kicinski68d64062016-09-21 11:44:02 +0100237}
238
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100239static int cls_bpf_init(struct tcf_proto *tp)
240{
241 struct cls_bpf_head *head;
242
243 head = kzalloc(sizeof(*head), GFP_KERNEL);
244 if (head == NULL)
245 return -ENOBUFS;
246
John Fastabend1f947bf2014-09-12 20:10:24 -0700247 INIT_LIST_HEAD_RCU(&head->plist);
Cong Wang76cf5462017-09-25 10:13:49 -0700248 idr_init(&head->handle_idr);
John Fastabend1f947bf2014-09-12 20:10:24 -0700249 rcu_assign_pointer(tp->root, head);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100250
251 return 0;
252}
253
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800254static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100255{
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100256 if (cls_bpf_is_ebpf(prog))
257 bpf_prog_put(prog->filter);
258 else
259 bpf_prog_destroy(prog->filter);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100260
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100261 kfree(prog->bpf_name);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100262 kfree(prog->bpf_ops);
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800263}
264
265static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
266{
267 tcf_exts_destroy(&prog->exts);
268 tcf_exts_put_net(&prog->exts);
269
270 cls_bpf_free_parms(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100271 kfree(prog);
272}
273
Cong Wange910af62017-10-26 18:24:30 -0700274static void cls_bpf_delete_prog_work(struct work_struct *work)
275{
Cong Wangaaa908f2018-05-23 15:26:53 -0700276 struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
277 struct cls_bpf_prog,
278 rwork);
Cong Wange910af62017-10-26 18:24:30 -0700279 rtnl_lock();
280 __cls_bpf_delete_prog(prog);
281 rtnl_unlock();
282}
283
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800284static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
285 struct netlink_ext_ack *extack)
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100286{
Cong Wang76cf5462017-09-25 10:13:49 -0700287 struct cls_bpf_head *head = rtnl_dereference(tp->root);
288
Matthew Wilcox9c160942017-11-28 09:48:43 -0500289 idr_remove(&head->handle_idr, prog->handle);
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800290 cls_bpf_stop_offload(tp, prog, extack);
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100291 list_del_rcu(&prog->link);
292 tcf_unbind_filter(tp, &prog->res);
Cong Wangaae2c352017-11-06 13:47:21 -0800293 if (tcf_exts_get_net(&prog->exts))
Cong Wangaaa908f2018-05-23 15:26:53 -0700294 tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
Cong Wangaae2c352017-11-06 13:47:21 -0800295 else
296 __cls_bpf_delete_prog(prog);
John Fastabend1f947bf2014-09-12 20:10:24 -0700297}
298
Alexander Aring571acf22018-01-18 11:20:53 -0500299static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
Vlad Buslov12db03b2019-02-11 10:55:45 +0200300 bool rtnl_held, struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100301{
WANG Cong763dbf62017-04-19 14:21:21 -0700302 struct cls_bpf_head *head = rtnl_dereference(tp->root);
303
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800304 __cls_bpf_delete(tp, arg, extack);
WANG Cong763dbf62017-04-19 14:21:21 -0700305 *last = list_empty(&head->plist);
Jiri Pirko472f5832014-12-02 18:00:32 +0100306 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100307}
308
Vlad Buslov12db03b2019-02-11 10:55:45 +0200309static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800310 struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100311{
John Fastabend1f947bf2014-09-12 20:10:24 -0700312 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100313 struct cls_bpf_prog *prog, *tmp;
314
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100315 list_for_each_entry_safe(prog, tmp, &head->plist, link)
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800316 __cls_bpf_delete(tp, prog, extack);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100317
Cong Wang76cf5462017-09-25 10:13:49 -0700318 idr_destroy(&head->handle_idr);
John Fastabend1f947bf2014-09-12 20:10:24 -0700319 kfree_rcu(head, rcu);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100320}
321
WANG Cong8113c092017-08-04 21:31:43 -0700322static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100323{
John Fastabend1f947bf2014-09-12 20:10:24 -0700324 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100325 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100326
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100327 list_for_each_entry(prog, &head->plist, link) {
WANG Cong8113c092017-08-04 21:31:43 -0700328 if (prog->handle == handle)
329 return prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100330 }
331
WANG Cong8113c092017-08-04 21:31:43 -0700332 return NULL;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100333}
334
Daniel Borkmann045efa82015-09-15 23:05:42 -0700335static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100336{
337 struct sock_filter *bpf_ops;
338 struct sock_fprog_kern fprog_tmp;
339 struct bpf_prog *fp;
340 u16 bpf_size, bpf_num_ops;
341 int ret;
342
343 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
344 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
345 return -EINVAL;
346
347 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
348 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
349 return -EINVAL;
350
YueHaibingf9562fa2018-07-28 18:35:15 +0800351 bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100352 if (bpf_ops == NULL)
353 return -ENOMEM;
354
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100355 fprog_tmp.len = bpf_num_ops;
356 fprog_tmp.filter = bpf_ops;
357
358 ret = bpf_prog_create(&fp, &fprog_tmp);
359 if (ret < 0) {
360 kfree(bpf_ops);
361 return ret;
362 }
363
364 prog->bpf_ops = bpf_ops;
365 prog->bpf_num_ops = bpf_num_ops;
366 prog->bpf_name = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100367 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100368
369 return 0;
370}
371
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200372static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
Jakub Kicinski6c8dfe22017-11-03 13:56:21 -0700373 u32 gen_flags, const struct tcf_proto *tp)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100374{
375 struct bpf_prog *fp;
376 char *name = NULL;
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800377 bool skip_sw;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100378 u32 bpf_fd;
379
380 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800381 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100382
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800383 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100384 if (IS_ERR(fp))
385 return PTR_ERR(fp);
386
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100387 if (tb[TCA_BPF_NAME]) {
Thomas Grafb15ca182016-10-26 10:53:16 +0200388 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100389 if (!name) {
390 bpf_prog_put(fp);
391 return -ENOMEM;
392 }
393 }
394
395 prog->bpf_ops = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100396 prog->bpf_name = name;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100397 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100398
Jiri Pirkof36fe1c2018-01-17 11:46:48 +0100399 if (fp->dst_needed)
400 tcf_block_netif_keep_dst(tp->chain->block);
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200401
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100402 return 0;
403}
404
Jiri Pirko6a725c42017-08-04 14:29:04 +0200405static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
406 struct cls_bpf_prog *prog, unsigned long base,
Cong Wang695176b2021-07-29 16:12:14 -0700407 struct nlattr **tb, struct nlattr *est, u32 flags,
Alexander Aring50a56192018-01-18 11:20:52 -0500408 struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100409{
Daniel Borkmann045efa82015-09-15 23:05:42 -0700410 bool is_bpf, is_ebpf, have_exts = false;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100411 u32 gen_flags = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100412 int ret;
413
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100414 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
415 is_ebpf = tb[TCA_BPF_FD];
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200416 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100417 return -EINVAL;
418
Cong Wang695176b2021-07-29 16:12:14 -0700419 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, flags,
Vlad Buslovec6743a2019-02-11 10:55:43 +0200420 extack);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100421 if (ret < 0)
422 return ret;
423
Daniel Borkmann045efa82015-09-15 23:05:42 -0700424 if (tb[TCA_BPF_FLAGS]) {
425 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100426
Jiri Pirko6839da32017-08-04 14:29:10 +0200427 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
428 return -EINVAL;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700429
430 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
431 }
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100432 if (tb[TCA_BPF_FLAGS_GEN]) {
433 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
434 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
Jiri Pirko6839da32017-08-04 14:29:10 +0200435 !tc_flags_valid(gen_flags))
436 return -EINVAL;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100437 }
Daniel Borkmann045efa82015-09-15 23:05:42 -0700438
Daniel Borkmann045efa82015-09-15 23:05:42 -0700439 prog->exts_integrated = have_exts;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100440 prog->gen_flags = gen_flags;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700441
442 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
Jakub Kicinski6c8dfe22017-11-03 13:56:21 -0700443 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
WANG Congb9a24bb2016-08-19 12:36:54 -0700444 if (ret < 0)
Jiri Pirko6839da32017-08-04 14:29:10 +0200445 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100446
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200447 if (tb[TCA_BPF_CLASSID]) {
448 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
449 tcf_bind_filter(tp, &prog->res, base);
450 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100451
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100452 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100453}
454
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100455static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
456 struct tcf_proto *tp, unsigned long base,
457 u32 handle, struct nlattr **tca,
Cong Wang695176b2021-07-29 16:12:14 -0700458 void **arg, u32 flags,
Vlad Buslov12db03b2019-02-11 10:55:45 +0200459 struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100460{
John Fastabend1f947bf2014-09-12 20:10:24 -0700461 struct cls_bpf_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -0700462 struct cls_bpf_prog *oldprog = *arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100463 struct nlattr *tb[TCA_BPF_MAX + 1];
John Fastabend1f947bf2014-09-12 20:10:24 -0700464 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100465 int ret;
466
467 if (tca[TCA_OPTIONS] == NULL)
468 return -EINVAL;
469
Johannes Berg8cb08172019-04-26 14:07:28 +0200470 ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
471 bpf_policy, NULL);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100472 if (ret < 0)
473 return ret;
474
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100475 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
John Fastabend1f947bf2014-09-12 20:10:24 -0700476 if (!prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100477 return -ENOBUFS;
478
Cong Wang14215102019-02-20 21:37:42 -0800479 ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
WANG Congb9a24bb2016-08-19 12:36:54 -0700480 if (ret < 0)
481 goto errout;
John Fastabend1f947bf2014-09-12 20:10:24 -0700482
483 if (oldprog) {
484 if (handle && oldprog->handle != handle) {
485 ret = -EINVAL;
486 goto errout;
487 }
488 }
489
Cong Wang76cf5462017-09-25 10:13:49 -0700490 if (handle == 0) {
Matthew Wilcox0b4ce8d2017-11-28 10:46:29 -0500491 handle = 1;
492 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
493 INT_MAX, GFP_KERNEL);
494 } else if (!oldprog) {
495 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
496 handle, GFP_KERNEL);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100497 }
498
Matthew Wilcox0b4ce8d2017-11-28 10:46:29 -0500499 if (ret)
500 goto errout;
501 prog->handle = handle;
502
Cong Wang695176b2021-07-29 16:12:14 -0700503 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], flags,
Alexander Aring50a56192018-01-18 11:20:52 -0500504 extack);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100505 if (ret < 0)
Cong Wang76cf5462017-09-25 10:13:49 -0700506 goto errout_idr;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100507
Quentin Monnet631f65f2018-01-19 17:44:46 -0800508 ret = cls_bpf_offload(tp, prog, oldprog, extack);
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800509 if (ret)
510 goto errout_parms;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100511
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200512 if (!tc_in_hw(prog->gen_flags))
513 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
514
John Fastabend1f947bf2014-09-12 20:10:24 -0700515 if (oldprog) {
Matthew Wilcox234a4622017-11-28 09:56:36 -0500516 idr_replace(&head->handle_idr, prog, handle);
Daniel Borkmannf6bfc462015-07-17 22:38:43 +0200517 list_replace_rcu(&oldprog->link, &prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700518 tcf_unbind_filter(tp, &oldprog->res);
Cong Wangaae2c352017-11-06 13:47:21 -0800519 tcf_exts_get_net(&oldprog->exts);
Cong Wangaaa908f2018-05-23 15:26:53 -0700520 tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
John Fastabend1f947bf2014-09-12 20:10:24 -0700521 } else {
522 list_add_rcu(&prog->link, &head->plist);
523 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100524
WANG Cong8113c092017-08-04 21:31:43 -0700525 *arg = prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100526 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100527
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800528errout_parms:
529 cls_bpf_free_parms(prog);
Cong Wang76cf5462017-09-25 10:13:49 -0700530errout_idr:
531 if (!oldprog)
Matthew Wilcox9c160942017-11-28 09:48:43 -0500532 idr_remove(&head->handle_idr, prog->handle);
WANG Congb9a24bb2016-08-19 12:36:54 -0700533errout:
534 tcf_exts_destroy(&prog->exts);
535 kfree(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100536 return ret;
537}
538
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100539static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
540 struct sk_buff *skb)
541{
542 struct nlattr *nla;
543
544 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
545 return -EMSGSIZE;
546
547 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
548 sizeof(struct sock_filter));
549 if (nla == NULL)
550 return -EMSGSIZE;
551
552 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
553
554 return 0;
555}
556
557static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
558 struct sk_buff *skb)
559{
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100560 struct nlattr *nla;
561
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100562 if (prog->bpf_name &&
563 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
564 return -EMSGSIZE;
565
Daniel Borkmanne8628302017-06-21 20:16:11 +0200566 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
567 return -EMSGSIZE;
568
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100569 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100570 if (nla == NULL)
571 return -EMSGSIZE;
572
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100573 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100574
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100575 return 0;
576}
577
WANG Cong8113c092017-08-04 21:31:43 -0700578static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
Vlad Buslov12db03b2019-02-11 10:55:45 +0200579 struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100580{
WANG Cong8113c092017-08-04 21:31:43 -0700581 struct cls_bpf_prog *prog = fh;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100582 struct nlattr *nest;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200583 u32 bpf_flags = 0;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100584 int ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100585
586 if (prog == NULL)
587 return skb->len;
588
589 tm->tcm_handle = prog->handle;
590
Jakub Kicinski68d64062016-09-21 11:44:02 +0100591 cls_bpf_offload_update_stats(tp, prog);
592
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200593 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100594 if (nest == NULL)
595 goto nla_put_failure;
596
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200597 if (prog->res.classid &&
598 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100599 goto nla_put_failure;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100600
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100601 if (cls_bpf_is_ebpf(prog))
602 ret = cls_bpf_dump_ebpf_info(prog, skb);
603 else
604 ret = cls_bpf_dump_bpf_info(prog, skb);
605 if (ret)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100606 goto nla_put_failure;
607
WANG Cong5da57f42013-12-15 20:15:07 -0800608 if (tcf_exts_dump(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100609 goto nla_put_failure;
610
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200611 if (prog->exts_integrated)
612 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
613 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
614 goto nla_put_failure;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100615 if (prog->gen_flags &&
616 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
617 goto nla_put_failure;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200618
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100619 nla_nest_end(skb, nest);
620
WANG Cong5da57f42013-12-15 20:15:07 -0800621 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100622 goto nla_put_failure;
623
624 return skb->len;
625
626nla_put_failure:
627 nla_nest_cancel(skb, nest);
628 return -1;
629}
630
Cong Wang2e24cd72020-01-23 16:26:18 -0800631static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
632 void *q, unsigned long base)
Cong Wang07d79fc2017-08-30 14:30:36 -0700633{
634 struct cls_bpf_prog *prog = fh;
635
Cong Wang2e24cd72020-01-23 16:26:18 -0800636 if (prog && prog->res.classid == classid) {
637 if (cl)
638 __tcf_bind_filter(q, &prog->res, base);
639 else
640 __tcf_unbind_filter(q, &prog->res);
641 }
Cong Wang07d79fc2017-08-30 14:30:36 -0700642}
643
Vlad Buslov12db03b2019-02-11 10:55:45 +0200644static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
645 bool rtnl_held)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100646{
John Fastabend1f947bf2014-09-12 20:10:24 -0700647 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100648 struct cls_bpf_prog *prog;
649
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100650 list_for_each_entry(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100651 if (arg->count < arg->skip)
652 goto skip;
WANG Cong8113c092017-08-04 21:31:43 -0700653 if (arg->fn(tp, prog, arg) < 0) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100654 arg->stop = 1;
655 break;
656 }
657skip:
658 arg->count++;
659 }
660}
661
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200662static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
John Hurley7e916b72018-06-25 14:30:09 -0700663 void *cb_priv, struct netlink_ext_ack *extack)
664{
665 struct cls_bpf_head *head = rtnl_dereference(tp->root);
666 struct tcf_block *block = tp->chain->block;
667 struct tc_cls_bpf_offload cls_bpf = {};
668 struct cls_bpf_prog *prog;
669 int err;
670
671 list_for_each_entry(prog, &head->plist, link) {
672 if (tc_skip_hw(prog->gen_flags))
673 continue;
674
675 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
Pieter Jansen van Vuurend6787142019-05-06 17:24:21 -0700676 extack);
John Hurley7e916b72018-06-25 14:30:09 -0700677 cls_bpf.command = TC_CLSBPF_OFFLOAD;
678 cls_bpf.exts = &prog->exts;
679 cls_bpf.prog = add ? prog->filter : NULL;
680 cls_bpf.oldprog = add ? NULL : prog->filter;
681 cls_bpf.name = prog->bpf_name;
682 cls_bpf.exts_integrated = prog->exts_integrated;
683
Vlad Buslov40119212019-08-26 16:44:59 +0300684 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
685 &cls_bpf, cb_priv, &prog->gen_flags,
686 &prog->in_hw_count);
687 if (err)
688 return err;
John Hurley7e916b72018-06-25 14:30:09 -0700689 }
690
691 return 0;
692}
693
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100694static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
695 .kind = "bpf",
696 .owner = THIS_MODULE,
697 .classify = cls_bpf_classify,
698 .init = cls_bpf_init,
699 .destroy = cls_bpf_destroy,
700 .get = cls_bpf_get,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100701 .change = cls_bpf_change,
702 .delete = cls_bpf_delete,
703 .walk = cls_bpf_walk,
John Hurley7e916b72018-06-25 14:30:09 -0700704 .reoffload = cls_bpf_reoffload,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100705 .dump = cls_bpf_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -0700706 .bind_class = cls_bpf_bind_class,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100707};
708
709static int __init cls_bpf_init_mod(void)
710{
711 return register_tcf_proto_ops(&cls_bpf_ops);
712}
713
714static void __exit cls_bpf_exit_mod(void)
715{
716 unregister_tcf_proto_ops(&cls_bpf_ops);
717}
718
719module_init(cls_bpf_init_mod);
720module_exit(cls_bpf_exit_mod);