blob: 9a22cdda6bbdcd38abc0d0c4d5365fad2f4088dd [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Yotam Gigi5c5670f2017-01-23 11:07:09 +01002/*
3 * net/sched/act_sample.c - Packet sampling tc action
4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
Yotam Gigi5c5670f2017-01-23 11:07:09 +01005 */
6
7#include <linux/types.h>
8#include <linux/kernel.h>
9#include <linux/string.h>
10#include <linux/errno.h>
11#include <linux/skbuff.h>
12#include <linux/rtnetlink.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/gfp.h>
16#include <net/net_namespace.h>
17#include <net/netlink.h>
18#include <net/pkt_sched.h>
19#include <linux/tc_act/tc_sample.h>
20#include <net/tc_act/tc_sample.h>
21#include <net/psample.h>
Davide Carattie8c87c62019-03-20 15:00:09 +010022#include <net/pkt_cls.h>
Yotam Gigi5c5670f2017-01-23 11:07:09 +010023
24#include <linux/if_arp.h>
25
Yotam Gigi5c5670f2017-01-23 11:07:09 +010026static unsigned int sample_net_id;
27static struct tc_action_ops act_sample_ops;
28
29static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
30 [TCA_SAMPLE_PARMS] = { .len = sizeof(struct tc_sample) },
31 [TCA_SAMPLE_RATE] = { .type = NLA_U32 },
32 [TCA_SAMPLE_TRUNC_SIZE] = { .type = NLA_U32 },
33 [TCA_SAMPLE_PSAMPLE_GROUP] = { .type = NLA_U32 },
34};
35
36static int tcf_sample_init(struct net *net, struct nlattr *nla,
Cong Wang695176b2021-07-29 16:12:14 -070037 struct nlattr *est, struct tc_action **a,
38 struct tcf_proto *tp,
Vlad Buslovabbb0d32019-10-30 16:09:05 +020039 u32 flags, struct netlink_ext_ack *extack)
Yotam Gigi5c5670f2017-01-23 11:07:09 +010040{
41 struct tc_action_net *tn = net_generic(net, sample_net_id);
Cong Wang695176b2021-07-29 16:12:14 -070042 bool bind = flags & TCA_ACT_FLAGS_BIND;
Yotam Gigi5c5670f2017-01-23 11:07:09 +010043 struct nlattr *tb[TCA_SAMPLE_MAX + 1];
44 struct psample_group *psample_group;
Dmytro Linkin7be8ef22019-08-01 13:02:51 +000045 u32 psample_group_num, rate, index;
Davide Carattie8c87c62019-03-20 15:00:09 +010046 struct tcf_chain *goto_ch = NULL;
Yotam Gigi5c5670f2017-01-23 11:07:09 +010047 struct tc_sample *parm;
48 struct tcf_sample *s;
49 bool exists = false;
Vlad Buslov0190c1d2018-07-05 17:24:32 +030050 int ret, err;
Yotam Gigi5c5670f2017-01-23 11:07:09 +010051
52 if (!nla)
53 return -EINVAL;
Johannes Berg8cb08172019-04-26 14:07:28 +020054 ret = nla_parse_nested_deprecated(tb, TCA_SAMPLE_MAX, nla,
55 sample_policy, NULL);
Yotam Gigi5c5670f2017-01-23 11:07:09 +010056 if (ret < 0)
57 return ret;
58 if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] ||
59 !tb[TCA_SAMPLE_PSAMPLE_GROUP])
60 return -EINVAL;
61
62 parm = nla_data(tb[TCA_SAMPLE_PARMS]);
Dmytro Linkin7be8ef22019-08-01 13:02:51 +000063 index = parm->index;
64 err = tcf_idr_check_alloc(tn, &index, a, bind);
Vlad Buslov0190c1d2018-07-05 17:24:32 +030065 if (err < 0)
66 return err;
67 exists = err;
Yotam Gigi5c5670f2017-01-23 11:07:09 +010068 if (exists && bind)
69 return 0;
70
71 if (!exists) {
Dmytro Linkin7be8ef22019-08-01 13:02:51 +000072 ret = tcf_idr_create(tn, index, est, a,
Baowen Zheng40bd0942021-12-17 19:16:17 +010073 &act_sample_ops, bind, true, flags);
Vlad Buslov0190c1d2018-07-05 17:24:32 +030074 if (ret) {
Dmytro Linkin7be8ef22019-08-01 13:02:51 +000075 tcf_idr_cleanup(tn, index);
Yotam Gigi5c5670f2017-01-23 11:07:09 +010076 return ret;
Vlad Buslov0190c1d2018-07-05 17:24:32 +030077 }
Yotam Gigi5c5670f2017-01-23 11:07:09 +010078 ret = ACT_P_CREATED;
Cong Wang695176b2021-07-29 16:12:14 -070079 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
Chris Mi65a206c2017-08-30 02:31:59 -040080 tcf_idr_release(*a, bind);
Vlad Buslov4e8ddd72018-07-05 17:24:30 +030081 return -EEXIST;
Yotam Gigi5c5670f2017-01-23 11:07:09 +010082 }
Davide Carattie8c87c62019-03-20 15:00:09 +010083 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
84 if (err < 0)
85 goto release_idr;
Yotam Gigi5c5670f2017-01-23 11:07:09 +010086
Davide Carattifae27082019-04-04 12:31:35 +020087 rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
88 if (!rate) {
89 NL_SET_ERR_MSG(extack, "invalid sample rate");
90 err = -EINVAL;
91 goto put_chain;
92 }
Vlad Buslov653cd282018-08-14 21:46:16 +030093 psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
94 psample_group = psample_group_get(net, psample_group_num);
Yotam Gigicadb9c92017-01-31 11:33:53 +020095 if (!psample_group) {
Davide Carattie8c87c62019-03-20 15:00:09 +010096 err = -ENOMEM;
97 goto put_chain;
Yotam Gigicadb9c92017-01-31 11:33:53 +020098 }
Vlad Buslov653cd282018-08-14 21:46:16 +030099
100 s = to_sample(*a);
101
102 spin_lock_bh(&s->tcf_lock);
Davide Carattie8c87c62019-03-20 15:00:09 +0100103 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
Davide Carattifae27082019-04-04 12:31:35 +0200104 s->rate = rate;
Vlad Buslov653cd282018-08-14 21:46:16 +0300105 s->psample_group_num = psample_group_num;
Paul E. McKenney445d3742019-09-23 16:09:18 -0700106 psample_group = rcu_replace_pointer(s->psample_group, psample_group,
107 lockdep_is_held(&s->tcf_lock));
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100108
109 if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
110 s->truncate = true;
111 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
112 }
Vlad Buslov653cd282018-08-14 21:46:16 +0300113 spin_unlock_bh(&s->tcf_lock);
Vlad Buslovdbf47a22019-08-27 21:49:38 +0300114
115 if (psample_group)
116 psample_group_put(psample_group);
Davide Carattie8c87c62019-03-20 15:00:09 +0100117 if (goto_ch)
118 tcf_chain_put_by_act(goto_ch);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100119
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100120 return ret;
Davide Carattie8c87c62019-03-20 15:00:09 +0100121put_chain:
122 if (goto_ch)
123 tcf_chain_put_by_act(goto_ch);
124release_idr:
125 tcf_idr_release(*a, bind);
126 return err;
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100127}
128
Cong Wang9a63b252017-12-05 12:53:07 -0800129static void tcf_sample_cleanup(struct tc_action *a)
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100130{
131 struct tcf_sample *s = to_sample(a);
Cong Wang90a6ec82017-11-29 16:07:51 -0800132 struct psample_group *psample_group;
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100133
Vlad Buslovd7728492018-08-10 20:51:47 +0300134 /* last reference to action, no need to lock */
135 psample_group = rcu_dereference_protected(s->psample_group, 1);
Cong Wang90a6ec82017-11-29 16:07:51 -0800136 RCU_INIT_POINTER(s->psample_group, NULL);
Davide Caratti1f110e72018-03-16 00:00:56 +0100137 if (psample_group)
138 psample_group_put(psample_group);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100139}
140
141static bool tcf_sample_dev_ok_push(struct net_device *dev)
142{
143 switch (dev->type) {
144 case ARPHRD_TUNNEL:
145 case ARPHRD_TUNNEL6:
146 case ARPHRD_SIT:
147 case ARPHRD_IPGRE:
Davide Caratti92974a12019-09-17 11:30:55 +0200148 case ARPHRD_IP6GRE:
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100149 case ARPHRD_VOID:
150 case ARPHRD_NONE:
151 return false;
152 default:
153 return true;
154 }
155}
156
157static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
158 struct tcf_result *res)
159{
160 struct tcf_sample *s = to_sample(a);
161 struct psample_group *psample_group;
Ido Schimmela03e99d2021-03-14 14:19:30 +0200162 struct psample_metadata md = {};
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100163 int retval;
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100164
165 tcf_lastuse_update(&s->tcf_tm);
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200166 bstats_update(this_cpu_ptr(s->common.cpu_bstats), skb);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100167 retval = READ_ONCE(s->tcf_action);
168
Paolo Abeni7fd4b282018-07-30 14:30:43 +0200169 psample_group = rcu_dereference_bh(s->psample_group);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100170
171 /* randomly sample packets according to rate */
172 if (psample_group && (prandom_u32() % s->rate == 0)) {
173 if (!skb_at_tc_ingress(skb)) {
Ido Schimmela03e99d2021-03-14 14:19:30 +0200174 md.in_ifindex = skb->skb_iif;
175 md.out_ifindex = skb->dev->ifindex;
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100176 } else {
Ido Schimmela03e99d2021-03-14 14:19:30 +0200177 md.in_ifindex = skb->dev->ifindex;
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100178 }
179
180 /* on ingress, the mac header gets popped, so push it back */
181 if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
182 skb_push(skb, skb->mac_len);
183
Ido Schimmela03e99d2021-03-14 14:19:30 +0200184 md.trunc_size = s->truncate ? s->trunc_size : skb->len;
185 psample_sample_packet(psample_group, skb, s->rate, &md);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100186
187 if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
188 skb_pull(skb, skb->mac_len);
189 }
190
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100191 return retval;
192}
193
Ido Schimmel58c04392021-03-10 12:33:20 +0200194static void tcf_sample_stats_update(struct tc_action *a, u64 bytes, u64 packets,
195 u64 drops, u64 lastuse, bool hw)
196{
197 struct tcf_sample *s = to_sample(a);
198 struct tcf_t *tm = &s->tcf_tm;
199
200 tcf_action_update_stats(a, bytes, packets, drops, hw);
201 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
202}
203
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100204static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
205 int bind, int ref)
206{
207 unsigned char *b = skb_tail_pointer(skb);
208 struct tcf_sample *s = to_sample(a);
209 struct tc_sample opt = {
210 .index = s->tcf_index,
Vlad Buslov036bb442018-07-05 17:24:24 +0300211 .refcnt = refcount_read(&s->tcf_refcnt) - ref,
212 .bindcnt = atomic_read(&s->tcf_bindcnt) - bind,
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100213 };
214 struct tcf_t t;
215
Vlad Buslov653cd282018-08-14 21:46:16 +0300216 spin_lock_bh(&s->tcf_lock);
Vlad Buslovd7728492018-08-10 20:51:47 +0300217 opt.action = s->tcf_action;
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100218 if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt))
219 goto nla_put_failure;
220
221 tcf_tm_dump(&t, &s->tcf_tm);
222 if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD))
223 goto nla_put_failure;
224
225 if (nla_put_u32(skb, TCA_SAMPLE_RATE, s->rate))
226 goto nla_put_failure;
227
228 if (s->truncate)
229 if (nla_put_u32(skb, TCA_SAMPLE_TRUNC_SIZE, s->trunc_size))
230 goto nla_put_failure;
231
232 if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num))
233 goto nla_put_failure;
Vlad Buslov653cd282018-08-14 21:46:16 +0300234 spin_unlock_bh(&s->tcf_lock);
Vlad Buslovd7728492018-08-10 20:51:47 +0300235
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100236 return skb->len;
237
238nla_put_failure:
Vlad Buslov653cd282018-08-14 21:46:16 +0300239 spin_unlock_bh(&s->tcf_lock);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100240 nlmsg_trim(skb, b);
241 return -1;
242}
243
244static int tcf_sample_walker(struct net *net, struct sk_buff *skb,
245 struct netlink_callback *cb, int type,
Alexander Aring41780102018-02-15 10:54:58 -0500246 const struct tc_action_ops *ops,
247 struct netlink_ext_ack *extack)
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100248{
249 struct tc_action_net *tn = net_generic(net, sample_net_id);
250
Alexander Aringb3620142018-02-15 10:54:59 -0500251 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100252}
253
Cong Wangf061b482018-08-29 10:15:35 -0700254static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index)
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100255{
256 struct tc_action_net *tn = net_generic(net, sample_net_id);
257
Chris Mi65a206c2017-08-30 02:31:59 -0400258 return tcf_idr_search(tn, a, index);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100259}
260
Vlad Buslov4a5da472019-09-13 18:28:40 +0300261static void tcf_psample_group_put(void *priv)
262{
263 struct psample_group *group = priv;
264
265 psample_group_put(group);
266}
267
268static struct psample_group *
269tcf_sample_get_group(const struct tc_action *a,
270 tc_action_priv_destructor *destructor)
271{
272 struct tcf_sample *s = to_sample(a);
273 struct psample_group *group;
274
Vlad Buslov4a5da472019-09-13 18:28:40 +0300275 group = rcu_dereference_protected(s->psample_group,
276 lockdep_is_held(&s->tcf_lock));
277 if (group) {
278 psample_group_take(group);
279 *destructor = tcf_psample_group_put;
280 }
Vlad Buslov4a5da472019-09-13 18:28:40 +0300281
282 return group;
283}
284
Baowen Zhengc54e1d92021-12-17 19:16:21 +0100285static void tcf_offload_sample_get_group(struct flow_action_entry *entry,
286 const struct tc_action *act)
287{
288 entry->sample.psample_group =
289 act->ops->get_psample_group(act, &entry->destructor);
290 entry->destructor_priv = entry->sample.psample_group;
291}
292
293static int tcf_sample_offload_act_setup(struct tc_action *act, void *entry_data,
294 u32 *index_inc, bool bind)
295{
296 if (bind) {
297 struct flow_action_entry *entry = entry_data;
298
299 entry->id = FLOW_ACTION_SAMPLE;
300 entry->sample.trunc_size = tcf_sample_trunc_size(act);
301 entry->sample.truncate = tcf_sample_truncate(act);
302 entry->sample.rate = tcf_sample_rate(act);
303 tcf_offload_sample_get_group(entry, act);
304 *index_inc = 1;
305 } else {
Baowen Zheng8cbfe932021-12-17 19:16:22 +0100306 struct flow_offload_action *fl_action = entry_data;
307
308 fl_action->id = FLOW_ACTION_SAMPLE;
Baowen Zhengc54e1d92021-12-17 19:16:21 +0100309 }
310
311 return 0;
312}
313
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100314static struct tc_action_ops act_sample_ops = {
315 .kind = "sample",
Eli Coheneddd2cf2019-02-10 14:25:00 +0200316 .id = TCA_ID_SAMPLE,
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100317 .owner = THIS_MODULE,
318 .act = tcf_sample_act,
Ido Schimmel58c04392021-03-10 12:33:20 +0200319 .stats_update = tcf_sample_stats_update,
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100320 .dump = tcf_sample_dump,
321 .init = tcf_sample_init,
322 .cleanup = tcf_sample_cleanup,
323 .walk = tcf_sample_walker,
324 .lookup = tcf_sample_search,
Vlad Buslov4a5da472019-09-13 18:28:40 +0300325 .get_psample_group = tcf_sample_get_group,
Baowen Zhengc54e1d92021-12-17 19:16:21 +0100326 .offload_act_setup = tcf_sample_offload_act_setup,
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100327 .size = sizeof(struct tcf_sample),
328};
329
330static __net_init int sample_init_net(struct net *net)
331{
332 struct tc_action_net *tn = net_generic(net, sample_net_id);
333
Cong Wang981471b2019-08-25 10:01:32 -0700334 return tc_action_net_init(net, tn, &act_sample_ops);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100335}
336
Cong Wang039af9c2017-12-11 15:35:03 -0800337static void __net_exit sample_exit_net(struct list_head *net_list)
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100338{
Cong Wang039af9c2017-12-11 15:35:03 -0800339 tc_action_net_exit(net_list, sample_net_id);
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100340}
341
342static struct pernet_operations sample_net_ops = {
343 .init = sample_init_net,
Cong Wang039af9c2017-12-11 15:35:03 -0800344 .exit_batch = sample_exit_net,
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100345 .id = &sample_net_id,
346 .size = sizeof(struct tc_action_net),
347};
348
349static int __init sample_init_module(void)
350{
351 return tcf_register_action(&act_sample_ops, &sample_net_ops);
352}
353
354static void __exit sample_cleanup_module(void)
355{
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100356 tcf_unregister_action(&act_sample_ops, &sample_net_ops);
357}
358
359module_init(sample_init_module);
360module_exit(sample_cleanup_module);
361
Yotam Gigif1fd20c2017-10-30 11:41:36 +0200362MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
Yotam Gigi5c5670f2017-01-23 11:07:09 +0100363MODULE_DESCRIPTION("Packet sampling action");
364MODULE_LICENSE("GPL v2");