blob: b07c1fa8bc0dd8ccf2a207e50aaeeaed4110b7a8 [file] [log] [blame]
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +01001/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010019#include <linux/bpf.h>
Cong Wang76cf5462017-09-25 10:13:49 -070020#include <linux/idr.h>
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010021
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010022#include <net/rtnetlink.h>
23#include <net/pkt_cls.h>
24#include <net/sock.h>
25
26MODULE_LICENSE("GPL");
27MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
28MODULE_DESCRIPTION("TC BPF based classifier");
29
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010030#define CLS_BPF_NAME_LEN 256
Jakub Kicinski0d01d452016-09-21 11:43:54 +010031#define CLS_BPF_SUPPORTED_GEN_FLAGS \
Jakub Kicinskieadb4142016-09-21 11:43:55 +010032 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010033
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010034struct cls_bpf_head {
35 struct list_head plist;
Cong Wang76cf5462017-09-25 10:13:49 -070036 struct idr handle_idr;
John Fastabend1f947bf2014-09-12 20:10:24 -070037 struct rcu_head rcu;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010038};
39
40struct cls_bpf_prog {
Alexei Starovoitov7ae457c2014-07-30 20:34:16 -070041 struct bpf_prog *filter;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010042 struct list_head link;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010043 struct tcf_result res;
Daniel Borkmann045efa82015-09-15 23:05:42 -070044 bool exts_integrated;
Jakub Kicinski0d01d452016-09-21 11:43:54 +010045 u32 gen_flags;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010046 struct tcf_exts exts;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010047 u32 handle;
Daniel Borkmann55556dd2016-11-26 01:28:05 +010048 u16 bpf_num_ops;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010049 struct sock_filter *bpf_ops;
50 const char *bpf_name;
John Fastabend1f947bf2014-09-12 20:10:24 -070051 struct tcf_proto *tp;
Cong Wange910af62017-10-26 18:24:30 -070052 union {
53 struct work_struct work;
54 struct rcu_head rcu;
55 };
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010056};
57
58static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
59 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
Daniel Borkmann045efa82015-09-15 23:05:42 -070060 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
Jakub Kicinski0d01d452016-09-21 11:43:54 +010061 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
Daniel Borkmanne2e9b652015-03-01 12:31:48 +010062 [TCA_BPF_FD] = { .type = NLA_U32 },
Jamal Hadi Salim5a7a5552016-09-18 08:45:33 -040063 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
64 .len = CLS_BPF_NAME_LEN },
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010065 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
66 [TCA_BPF_OPS] = { .type = NLA_BINARY,
67 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
68};
69
Daniel Borkmann045efa82015-09-15 23:05:42 -070070static int cls_bpf_exec_opcode(int code)
71{
72 switch (code) {
73 case TC_ACT_OK:
Daniel Borkmann045efa82015-09-15 23:05:42 -070074 case TC_ACT_SHOT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070075 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +020076 case TC_ACT_TRAP:
Alexei Starovoitov27b29f62015-09-15 23:05:43 -070077 case TC_ACT_REDIRECT:
Daniel Borkmann045efa82015-09-15 23:05:42 -070078 case TC_ACT_UNSPEC:
79 return code;
80 default:
81 return TC_ACT_UNSPEC;
82 }
83}
84
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010085static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
86 struct tcf_result *res)
87{
WANG Cong80dcbd12014-09-15 14:21:50 -070088 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
Daniel Borkmannfdc54322016-01-07 15:50:22 +010089 bool at_ingress = skb_at_tc_ingress(skb);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010090 struct cls_bpf_prog *prog;
Daniel Borkmann54720df2015-03-12 20:03:12 +010091 int ret = -1;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +010092
Daniel Borkmann54720df2015-03-12 20:03:12 +010093 /* Needed here for accessing maps. */
94 rcu_read_lock();
John Fastabend1f947bf2014-09-12 20:10:24 -070095 list_for_each_entry_rcu(prog, &head->plist, link) {
Alexei Starovoitov34312052015-06-04 10:11:53 -070096 int filter_res;
97
Daniel Borkmann045efa82015-09-15 23:05:42 -070098 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
99
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100100 if (tc_skip_sw(prog->gen_flags)) {
101 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
102 } else if (at_ingress) {
Alexei Starovoitov34312052015-06-04 10:11:53 -0700103 /* It is safe to push/pull even if skb_shared() */
104 __skb_push(skb, skb->mac_len);
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200105 bpf_compute_data_pointers(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700106 filter_res = BPF_PROG_RUN(prog->filter, skb);
107 __skb_pull(skb, skb->mac_len);
108 } else {
Daniel Borkmann6aaae2b2017-09-25 02:25:50 +0200109 bpf_compute_data_pointers(skb);
Alexei Starovoitov34312052015-06-04 10:11:53 -0700110 filter_res = BPF_PROG_RUN(prog->filter, skb);
111 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100112
Daniel Borkmann045efa82015-09-15 23:05:42 -0700113 if (prog->exts_integrated) {
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100114 res->class = 0;
115 res->classid = TC_H_MAJ(prog->res.classid) |
116 qdisc_skb_cb(skb)->tc_classid;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700117
118 ret = cls_bpf_exec_opcode(filter_res);
119 if (ret == TC_ACT_UNSPEC)
120 continue;
121 break;
122 }
123
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100124 if (filter_res == 0)
125 continue;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100126 if (filter_res != -1) {
127 res->class = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100128 res->classid = filter_res;
Daniel Borkmann3a461da2016-03-15 22:41:22 +0100129 } else {
130 *res = prog->res;
131 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100132
133 ret = tcf_exts_exec(skb, &prog->exts, res);
134 if (ret < 0)
135 continue;
136
Daniel Borkmann54720df2015-03-12 20:03:12 +0100137 break;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100138 }
Daniel Borkmann54720df2015-03-12 20:03:12 +0100139 rcu_read_unlock();
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100140
Daniel Borkmann54720df2015-03-12 20:03:12 +0100141 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100142}
143
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100144static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
145{
146 return !prog->bpf_ops;
147}
148
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100149static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
Quentin Monnet631f65f2018-01-19 17:44:46 -0800150 struct cls_bpf_prog *oldprog,
151 struct netlink_ext_ack *extack)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100152{
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200153 struct tcf_block *block = tp->chain->block;
Jiri Pirkode4784c2017-08-07 10:15:32 +0200154 struct tc_cls_bpf_offload cls_bpf = {};
Jakub Kicinski102740b2017-12-19 13:32:13 -0800155 struct cls_bpf_prog *obj;
156 bool skip_sw;
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200157 int err;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100158
Jakub Kicinski102740b2017-12-19 13:32:13 -0800159 skip_sw = prog && tc_skip_sw(prog->gen_flags);
160 obj = prog ?: oldprog;
161
Jakub Kicinskia6ffd6b2018-01-24 12:54:16 -0800162 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags,
163 extack);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800164 cls_bpf.command = TC_CLSBPF_OFFLOAD;
165 cls_bpf.exts = &obj->exts;
166 cls_bpf.prog = prog ? prog->filter : NULL;
167 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
168 cls_bpf.name = obj->bpf_name;
169 cls_bpf.exts_integrated = obj->exts_integrated;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100170
Jiri Pirkocaa72602018-01-17 11:46:50 +0100171 if (oldprog)
172 tcf_block_offload_dec(block, &oldprog->gen_flags);
173
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200174 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800175 if (prog) {
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200176 if (err < 0) {
Quentin Monnet631f65f2018-01-19 17:44:46 -0800177 cls_bpf_offload_cmd(tp, oldprog, prog, extack);
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200178 return err;
179 } else if (err > 0) {
Jiri Pirkocaa72602018-01-17 11:46:50 +0100180 tcf_block_offload_inc(block, &prog->gen_flags);
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200181 }
182 }
183
Jakub Kicinski102740b2017-12-19 13:32:13 -0800184 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
Jiri Pirko3f7889c2017-10-19 15:50:36 +0200185 return -EINVAL;
186
187 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100188}
189
Daniel Borkmannad9294d2018-01-17 22:36:49 +0100190static u32 cls_bpf_flags(u32 flags)
191{
192 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
193}
194
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100195static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
Quentin Monnet631f65f2018-01-19 17:44:46 -0800196 struct cls_bpf_prog *oldprog,
197 struct netlink_ext_ack *extack)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100198{
Daniel Borkmannad9294d2018-01-17 22:36:49 +0100199 if (prog && oldprog &&
200 cls_bpf_flags(prog->gen_flags) !=
201 cls_bpf_flags(oldprog->gen_flags))
Jakub Kicinski102740b2017-12-19 13:32:13 -0800202 return -EINVAL;
Jakub Kicinskieadb4142016-09-21 11:43:55 +0100203
Jakub Kicinski102740b2017-12-19 13:32:13 -0800204 if (prog && tc_skip_hw(prog->gen_flags))
205 prog = NULL;
206 if (oldprog && tc_skip_hw(oldprog->gen_flags))
207 oldprog = NULL;
208 if (!prog && !oldprog)
209 return 0;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100210
Quentin Monnet631f65f2018-01-19 17:44:46 -0800211 return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100212}
213
214static void cls_bpf_stop_offload(struct tcf_proto *tp,
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800215 struct cls_bpf_prog *prog,
216 struct netlink_ext_ack *extack)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100217{
218 int err;
219
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800220 err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800221 if (err)
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100222 pr_err("Stopping hardware offload failed: %d\n", err);
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100223}
224
Jakub Kicinski68d64062016-09-21 11:44:02 +0100225static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
226 struct cls_bpf_prog *prog)
227{
Jakub Kicinski102740b2017-12-19 13:32:13 -0800228 struct tcf_block *block = tp->chain->block;
229 struct tc_cls_bpf_offload cls_bpf = {};
Jakub Kicinski68d64062016-09-21 11:44:02 +0100230
Jakub Kicinskia6ffd6b2018-01-24 12:54:16 -0800231 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
Jakub Kicinski102740b2017-12-19 13:32:13 -0800232 cls_bpf.command = TC_CLSBPF_STATS;
233 cls_bpf.exts = &prog->exts;
234 cls_bpf.prog = prog->filter;
235 cls_bpf.name = prog->bpf_name;
236 cls_bpf.exts_integrated = prog->exts_integrated;
Jakub Kicinski102740b2017-12-19 13:32:13 -0800237
238 tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
Jakub Kicinski68d64062016-09-21 11:44:02 +0100239}
240
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100241static int cls_bpf_init(struct tcf_proto *tp)
242{
243 struct cls_bpf_head *head;
244
245 head = kzalloc(sizeof(*head), GFP_KERNEL);
246 if (head == NULL)
247 return -ENOBUFS;
248
John Fastabend1f947bf2014-09-12 20:10:24 -0700249 INIT_LIST_HEAD_RCU(&head->plist);
Cong Wang76cf5462017-09-25 10:13:49 -0700250 idr_init(&head->handle_idr);
John Fastabend1f947bf2014-09-12 20:10:24 -0700251 rcu_assign_pointer(tp->root, head);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100252
253 return 0;
254}
255
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800256static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100257{
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100258 if (cls_bpf_is_ebpf(prog))
259 bpf_prog_put(prog->filter);
260 else
261 bpf_prog_destroy(prog->filter);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100262
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100263 kfree(prog->bpf_name);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100264 kfree(prog->bpf_ops);
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800265}
266
267static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
268{
269 tcf_exts_destroy(&prog->exts);
270 tcf_exts_put_net(&prog->exts);
271
272 cls_bpf_free_parms(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100273 kfree(prog);
274}
275
Cong Wange910af62017-10-26 18:24:30 -0700276static void cls_bpf_delete_prog_work(struct work_struct *work)
277{
278 struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
279
280 rtnl_lock();
281 __cls_bpf_delete_prog(prog);
282 rtnl_unlock();
283}
284
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100285static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
John Fastabend1f947bf2014-09-12 20:10:24 -0700286{
Cong Wange910af62017-10-26 18:24:30 -0700287 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
288
289 INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
290 tcf_queue_work(&prog->work);
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100291}
John Fastabend1f947bf2014-09-12 20:10:24 -0700292
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800293static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
294 struct netlink_ext_ack *extack)
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100295{
Cong Wang76cf5462017-09-25 10:13:49 -0700296 struct cls_bpf_head *head = rtnl_dereference(tp->root);
297
Matthew Wilcox9c160942017-11-28 09:48:43 -0500298 idr_remove(&head->handle_idr, prog->handle);
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800299 cls_bpf_stop_offload(tp, prog, extack);
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100300 list_del_rcu(&prog->link);
301 tcf_unbind_filter(tp, &prog->res);
Cong Wangaae2c352017-11-06 13:47:21 -0800302 if (tcf_exts_get_net(&prog->exts))
303 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
304 else
305 __cls_bpf_delete_prog(prog);
John Fastabend1f947bf2014-09-12 20:10:24 -0700306}
307
Alexander Aring571acf22018-01-18 11:20:53 -0500308static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
309 struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100310{
WANG Cong763dbf62017-04-19 14:21:21 -0700311 struct cls_bpf_head *head = rtnl_dereference(tp->root);
312
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800313 __cls_bpf_delete(tp, arg, extack);
WANG Cong763dbf62017-04-19 14:21:21 -0700314 *last = list_empty(&head->plist);
Jiri Pirko472f5832014-12-02 18:00:32 +0100315 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100316}
317
Jakub Kicinski715df5e2018-01-24 12:54:13 -0800318static void cls_bpf_destroy(struct tcf_proto *tp,
319 struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100320{
John Fastabend1f947bf2014-09-12 20:10:24 -0700321 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100322 struct cls_bpf_prog *prog, *tmp;
323
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100324 list_for_each_entry_safe(prog, tmp, &head->plist, link)
Jakub Kicinski0e908a42018-01-24 12:54:17 -0800325 __cls_bpf_delete(tp, prog, extack);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100326
Cong Wang76cf5462017-09-25 10:13:49 -0700327 idr_destroy(&head->handle_idr);
John Fastabend1f947bf2014-09-12 20:10:24 -0700328 kfree_rcu(head, rcu);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100329}
330
WANG Cong8113c092017-08-04 21:31:43 -0700331static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100332{
John Fastabend1f947bf2014-09-12 20:10:24 -0700333 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100334 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100335
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100336 list_for_each_entry(prog, &head->plist, link) {
WANG Cong8113c092017-08-04 21:31:43 -0700337 if (prog->handle == handle)
338 return prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100339 }
340
WANG Cong8113c092017-08-04 21:31:43 -0700341 return NULL;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100342}
343
Daniel Borkmann045efa82015-09-15 23:05:42 -0700344static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100345{
346 struct sock_filter *bpf_ops;
347 struct sock_fprog_kern fprog_tmp;
348 struct bpf_prog *fp;
349 u16 bpf_size, bpf_num_ops;
350 int ret;
351
352 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
353 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
354 return -EINVAL;
355
356 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
357 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
358 return -EINVAL;
359
360 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
361 if (bpf_ops == NULL)
362 return -ENOMEM;
363
364 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
365
366 fprog_tmp.len = bpf_num_ops;
367 fprog_tmp.filter = bpf_ops;
368
369 ret = bpf_prog_create(&fp, &fprog_tmp);
370 if (ret < 0) {
371 kfree(bpf_ops);
372 return ret;
373 }
374
375 prog->bpf_ops = bpf_ops;
376 prog->bpf_num_ops = bpf_num_ops;
377 prog->bpf_name = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100378 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100379
380 return 0;
381}
382
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200383static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
Jakub Kicinski6c8dfe22017-11-03 13:56:21 -0700384 u32 gen_flags, const struct tcf_proto *tp)
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100385{
386 struct bpf_prog *fp;
387 char *name = NULL;
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800388 bool skip_sw;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100389 u32 bpf_fd;
390
391 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800392 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100393
Jakub Kicinski288b3de2017-11-20 15:21:54 -0800394 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100395 if (IS_ERR(fp))
396 return PTR_ERR(fp);
397
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100398 if (tb[TCA_BPF_NAME]) {
Thomas Grafb15ca182016-10-26 10:53:16 +0200399 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100400 if (!name) {
401 bpf_prog_put(fp);
402 return -ENOMEM;
403 }
404 }
405
406 prog->bpf_ops = NULL;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100407 prog->bpf_name = name;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100408 prog->filter = fp;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100409
Jiri Pirkof36fe1c2018-01-17 11:46:48 +0100410 if (fp->dst_needed)
411 tcf_block_netif_keep_dst(tp->chain->block);
Daniel Borkmannc46646d2015-09-30 01:41:51 +0200412
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100413 return 0;
414}
415
Jiri Pirko6a725c42017-08-04 14:29:04 +0200416static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
417 struct cls_bpf_prog *prog, unsigned long base,
Alexander Aring50a56192018-01-18 11:20:52 -0500418 struct nlattr **tb, struct nlattr *est, bool ovr,
419 struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100420{
Daniel Borkmann045efa82015-09-15 23:05:42 -0700421 bool is_bpf, is_ebpf, have_exts = false;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100422 u32 gen_flags = 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100423 int ret;
424
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100425 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
426 is_ebpf = tb[TCA_BPF_FD];
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200427 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100428 return -EINVAL;
429
Alexander Aring50a56192018-01-18 11:20:52 -0500430 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, extack);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100431 if (ret < 0)
432 return ret;
433
Daniel Borkmann045efa82015-09-15 23:05:42 -0700434 if (tb[TCA_BPF_FLAGS]) {
435 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100436
Jiri Pirko6839da32017-08-04 14:29:10 +0200437 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
438 return -EINVAL;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700439
440 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
441 }
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100442 if (tb[TCA_BPF_FLAGS_GEN]) {
443 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
444 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
Jiri Pirko6839da32017-08-04 14:29:10 +0200445 !tc_flags_valid(gen_flags))
446 return -EINVAL;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100447 }
Daniel Borkmann045efa82015-09-15 23:05:42 -0700448
Daniel Borkmann045efa82015-09-15 23:05:42 -0700449 prog->exts_integrated = have_exts;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100450 prog->gen_flags = gen_flags;
Daniel Borkmann045efa82015-09-15 23:05:42 -0700451
452 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
Jakub Kicinski6c8dfe22017-11-03 13:56:21 -0700453 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
WANG Congb9a24bb2016-08-19 12:36:54 -0700454 if (ret < 0)
Jiri Pirko6839da32017-08-04 14:29:10 +0200455 return ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100456
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200457 if (tb[TCA_BPF_CLASSID]) {
458 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
459 tcf_bind_filter(tp, &prog->res, base);
460 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100461
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100462 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100463}
464
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100465static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
466 struct tcf_proto *tp, unsigned long base,
467 u32 handle, struct nlattr **tca,
Alexander Aring7306db32018-01-18 11:20:51 -0500468 void **arg, bool ovr, struct netlink_ext_ack *extack)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100469{
John Fastabend1f947bf2014-09-12 20:10:24 -0700470 struct cls_bpf_head *head = rtnl_dereference(tp->root);
WANG Cong8113c092017-08-04 21:31:43 -0700471 struct cls_bpf_prog *oldprog = *arg;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100472 struct nlattr *tb[TCA_BPF_MAX + 1];
John Fastabend1f947bf2014-09-12 20:10:24 -0700473 struct cls_bpf_prog *prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100474 int ret;
475
476 if (tca[TCA_OPTIONS] == NULL)
477 return -EINVAL;
478
Johannes Bergfceb6432017-04-12 14:34:07 +0200479 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
480 NULL);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100481 if (ret < 0)
482 return ret;
483
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100484 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
John Fastabend1f947bf2014-09-12 20:10:24 -0700485 if (!prog)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100486 return -ENOBUFS;
487
WANG Congb9a24bb2016-08-19 12:36:54 -0700488 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
489 if (ret < 0)
490 goto errout;
John Fastabend1f947bf2014-09-12 20:10:24 -0700491
492 if (oldprog) {
493 if (handle && oldprog->handle != handle) {
494 ret = -EINVAL;
495 goto errout;
496 }
497 }
498
Cong Wang76cf5462017-09-25 10:13:49 -0700499 if (handle == 0) {
Matthew Wilcox0b4ce8d2017-11-28 10:46:29 -0500500 handle = 1;
501 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
502 INT_MAX, GFP_KERNEL);
503 } else if (!oldprog) {
504 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
505 handle, GFP_KERNEL);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100506 }
507
Matthew Wilcox0b4ce8d2017-11-28 10:46:29 -0500508 if (ret)
509 goto errout;
510 prog->handle = handle;
511
Alexander Aring50a56192018-01-18 11:20:52 -0500512 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
513 extack);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100514 if (ret < 0)
Cong Wang76cf5462017-09-25 10:13:49 -0700515 goto errout_idr;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100516
Quentin Monnet631f65f2018-01-19 17:44:46 -0800517 ret = cls_bpf_offload(tp, prog, oldprog, extack);
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800518 if (ret)
519 goto errout_parms;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100520
Or Gerlitz5cecb6c2017-02-16 10:31:16 +0200521 if (!tc_in_hw(prog->gen_flags))
522 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
523
John Fastabend1f947bf2014-09-12 20:10:24 -0700524 if (oldprog) {
Matthew Wilcox234a4622017-11-28 09:56:36 -0500525 idr_replace(&head->handle_idr, prog, handle);
Daniel Borkmannf6bfc462015-07-17 22:38:43 +0200526 list_replace_rcu(&oldprog->link, &prog->link);
John Fastabend18cdb372014-10-05 21:28:52 -0700527 tcf_unbind_filter(tp, &oldprog->res);
Cong Wangaae2c352017-11-06 13:47:21 -0800528 tcf_exts_get_net(&oldprog->exts);
Daniel Borkmann8d829bd2016-12-04 23:19:40 +0100529 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
John Fastabend1f947bf2014-09-12 20:10:24 -0700530 } else {
531 list_add_rcu(&prog->link, &head->plist);
532 }
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100533
WANG Cong8113c092017-08-04 21:31:43 -0700534 *arg = prog;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100535 return 0;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100536
Jakub Kicinski25415ce2017-11-27 11:11:41 -0800537errout_parms:
538 cls_bpf_free_parms(prog);
Cong Wang76cf5462017-09-25 10:13:49 -0700539errout_idr:
540 if (!oldprog)
Matthew Wilcox9c160942017-11-28 09:48:43 -0500541 idr_remove(&head->handle_idr, prog->handle);
WANG Congb9a24bb2016-08-19 12:36:54 -0700542errout:
543 tcf_exts_destroy(&prog->exts);
544 kfree(prog);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100545 return ret;
546}
547
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100548static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
549 struct sk_buff *skb)
550{
551 struct nlattr *nla;
552
553 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
554 return -EMSGSIZE;
555
556 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
557 sizeof(struct sock_filter));
558 if (nla == NULL)
559 return -EMSGSIZE;
560
561 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
562
563 return 0;
564}
565
566static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
567 struct sk_buff *skb)
568{
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100569 struct nlattr *nla;
570
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100571 if (prog->bpf_name &&
572 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
573 return -EMSGSIZE;
574
Daniel Borkmanne8628302017-06-21 20:16:11 +0200575 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
576 return -EMSGSIZE;
577
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100578 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100579 if (nla == NULL)
580 return -EMSGSIZE;
581
Daniel Borkmannf1f77142017-01-13 23:38:15 +0100582 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
Daniel Borkmann7bd509e2016-12-04 23:19:41 +0100583
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100584 return 0;
585}
586
WANG Cong8113c092017-08-04 21:31:43 -0700587static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100588 struct sk_buff *skb, struct tcmsg *tm)
589{
WANG Cong8113c092017-08-04 21:31:43 -0700590 struct cls_bpf_prog *prog = fh;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100591 struct nlattr *nest;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200592 u32 bpf_flags = 0;
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100593 int ret;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100594
595 if (prog == NULL)
596 return skb->len;
597
598 tm->tcm_handle = prog->handle;
599
Jakub Kicinski68d64062016-09-21 11:44:02 +0100600 cls_bpf_offload_update_stats(tp, prog);
601
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100602 nest = nla_nest_start(skb, TCA_OPTIONS);
603 if (nest == NULL)
604 goto nla_put_failure;
605
Daniel Borkmannef146fa2015-09-23 21:56:47 +0200606 if (prog->res.classid &&
607 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100608 goto nla_put_failure;
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100609
Daniel Borkmanne2e9b652015-03-01 12:31:48 +0100610 if (cls_bpf_is_ebpf(prog))
611 ret = cls_bpf_dump_ebpf_info(prog, skb);
612 else
613 ret = cls_bpf_dump_bpf_info(prog, skb);
614 if (ret)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100615 goto nla_put_failure;
616
WANG Cong5da57f42013-12-15 20:15:07 -0800617 if (tcf_exts_dump(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100618 goto nla_put_failure;
619
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200620 if (prog->exts_integrated)
621 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
622 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
623 goto nla_put_failure;
Jakub Kicinski0d01d452016-09-21 11:43:54 +0100624 if (prog->gen_flags &&
625 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
626 goto nla_put_failure;
Daniel Borkmannbf007d12015-09-23 21:56:46 +0200627
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100628 nla_nest_end(skb, nest);
629
WANG Cong5da57f42013-12-15 20:15:07 -0800630 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100631 goto nla_put_failure;
632
633 return skb->len;
634
635nla_put_failure:
636 nla_nest_cancel(skb, nest);
637 return -1;
638}
639
Cong Wang07d79fc2017-08-30 14:30:36 -0700640static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
641{
642 struct cls_bpf_prog *prog = fh;
643
644 if (prog && prog->res.classid == classid)
645 prog->res.class = cl;
646}
647
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100648static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
649{
John Fastabend1f947bf2014-09-12 20:10:24 -0700650 struct cls_bpf_head *head = rtnl_dereference(tp->root);
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100651 struct cls_bpf_prog *prog;
652
Jiri Pirko3fe6b492014-12-02 18:00:33 +0100653 list_for_each_entry(prog, &head->plist, link) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100654 if (arg->count < arg->skip)
655 goto skip;
WANG Cong8113c092017-08-04 21:31:43 -0700656 if (arg->fn(tp, prog, arg) < 0) {
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100657 arg->stop = 1;
658 break;
659 }
660skip:
661 arg->count++;
662 }
663}
664
665static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
666 .kind = "bpf",
667 .owner = THIS_MODULE,
668 .classify = cls_bpf_classify,
669 .init = cls_bpf_init,
670 .destroy = cls_bpf_destroy,
671 .get = cls_bpf_get,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100672 .change = cls_bpf_change,
673 .delete = cls_bpf_delete,
674 .walk = cls_bpf_walk,
675 .dump = cls_bpf_dump,
Cong Wang07d79fc2017-08-30 14:30:36 -0700676 .bind_class = cls_bpf_bind_class,
Daniel Borkmann7d1d65c2013-10-28 16:43:02 +0100677};
678
679static int __init cls_bpf_init_mod(void)
680{
681 return register_tcf_proto_ops(&cls_bpf_ops);
682}
683
684static void __exit cls_bpf_exit_mod(void)
685{
686 unregister_tcf_proto_ops(&cls_bpf_ops);
687}
688
689module_init(cls_bpf_init_mod);
690module_exit(cls_bpf_exit_mod);