blob: f9afb5abff213d5af0f07e738237b497cfe41393 [file] [log] [blame]
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/pkt_cls.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
Paul Blakeyc34b9612020-03-03 15:07:49 +020018#include <linux/rhashtable.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030019#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/act_api.h>
23#include <net/ip.h>
24#include <net/ipv6_frag.h>
25#include <uapi/linux/tc_act/tc_ct.h>
26#include <net/tc_act/tc_ct.h>
27
Paul Blakeyc34b9612020-03-03 15:07:49 +020028#include <net/netfilter/nf_flow_table.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030029#include <net/netfilter/nf_conntrack.h>
30#include <net/netfilter/nf_conntrack_core.h>
31#include <net/netfilter/nf_conntrack_zones.h>
32#include <net/netfilter/nf_conntrack_helper.h>
wenxubeb97d32020-04-21 07:55:43 +080033#include <net/netfilter/nf_conntrack_acct.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030034#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
Jeremy Sowden40d102c2019-09-13 09:13:05 +010035#include <uapi/linux/netfilter/nf_nat.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030036
Paul Blakeyc34b9612020-03-03 15:07:49 +020037static struct workqueue_struct *act_ct_wq;
38static struct rhashtable zones_ht;
Eric Dumazet138470a2020-03-08 14:27:48 -070039static DEFINE_MUTEX(zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +020040
41struct tcf_ct_flow_table {
42 struct rhash_head node; /* In zones tables */
43
44 struct rcu_work rwork;
45 struct nf_flowtable nf_ft;
Eric Dumazet138470a2020-03-08 14:27:48 -070046 refcount_t ref;
Paul Blakeyc34b9612020-03-03 15:07:49 +020047 u16 zone;
Paul Blakeyc34b9612020-03-03 15:07:49 +020048
49 bool dying;
50};
51
52static const struct rhashtable_params zones_params = {
53 .head_offset = offsetof(struct tcf_ct_flow_table, node),
54 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
55 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56 .automatic_shrinking = true,
57};
58
Paul Blakey9c26ba92020-03-12 12:23:06 +020059static struct flow_action_entry *
60tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
61{
62 int i = flow_action->num_entries++;
63
64 return &flow_action->entries[i];
65}
66
67static void tcf_ct_add_mangle_action(struct flow_action *action,
68 enum flow_action_mangle_base htype,
69 u32 offset,
70 u32 mask,
71 u32 val)
72{
73 struct flow_action_entry *entry;
74
75 entry = tcf_ct_flow_table_flow_action_get_next(action);
76 entry->id = FLOW_ACTION_MANGLE;
77 entry->mangle.htype = htype;
78 entry->mangle.mask = ~mask;
79 entry->mangle.offset = offset;
80 entry->mangle.val = val;
81}
82
83/* The following nat helper functions check if the inverted reverse tuple
84 * (target) is different then the current dir tuple - meaning nat for ports
85 * and/or ip is needed, and add the relevant mangle actions.
86 */
87static void
88tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89 struct nf_conntrack_tuple target,
90 struct flow_action *action)
91{
92 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94 offsetof(struct iphdr, saddr),
95 0xFFFFFFFF,
96 be32_to_cpu(target.src.u3.ip));
97 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99 offsetof(struct iphdr, daddr),
100 0xFFFFFFFF,
101 be32_to_cpu(target.dst.u3.ip));
102}
103
104static void
105tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106 union nf_inet_addr *addr,
107 u32 offset)
108{
109 int i;
110
111 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113 i * sizeof(u32) + offset,
114 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
115}
116
117static void
118tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119 struct nf_conntrack_tuple target,
120 struct flow_action *action)
121{
122 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124 offsetof(struct ipv6hdr,
125 saddr));
126 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128 offsetof(struct ipv6hdr,
129 daddr));
130}
131
132static void
133tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134 struct nf_conntrack_tuple target,
135 struct flow_action *action)
136{
137 __be16 target_src = target.src.u.tcp.port;
138 __be16 target_dst = target.dst.u.tcp.port;
139
140 if (target_src != tuple->src.u.tcp.port)
141 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142 offsetof(struct tcphdr, source),
143 0xFFFF, be16_to_cpu(target_src));
144 if (target_dst != tuple->dst.u.tcp.port)
145 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146 offsetof(struct tcphdr, dest),
147 0xFFFF, be16_to_cpu(target_dst));
148}
149
150static void
151tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152 struct nf_conntrack_tuple target,
153 struct flow_action *action)
154{
155 __be16 target_src = target.src.u.udp.port;
156 __be16 target_dst = target.dst.u.udp.port;
157
158 if (target_src != tuple->src.u.udp.port)
Roi Dayan47b5d2a2020-10-19 12:02:44 +0300159 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
Paul Blakey9c26ba92020-03-12 12:23:06 +0200160 offsetof(struct udphdr, source),
161 0xFFFF, be16_to_cpu(target_src));
162 if (target_dst != tuple->dst.u.udp.port)
Roi Dayan47b5d2a2020-10-19 12:02:44 +0300163 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
Paul Blakey9c26ba92020-03-12 12:23:06 +0200164 offsetof(struct udphdr, dest),
165 0xFFFF, be16_to_cpu(target_dst));
166}
167
168static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169 enum ip_conntrack_dir dir,
170 struct flow_action *action)
171{
172 struct nf_conn_labels *ct_labels;
173 struct flow_action_entry *entry;
Paul Blakey30b0cf92020-03-12 12:23:07 +0200174 enum ip_conntrack_info ctinfo;
Paul Blakey9c26ba92020-03-12 12:23:06 +0200175 u32 *act_ct_labels;
176
177 entry = tcf_ct_flow_table_flow_action_get_next(action);
178 entry->id = FLOW_ACTION_CT_METADATA;
179#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180 entry->ct_metadata.mark = ct->mark;
181#endif
Paul Blakey30b0cf92020-03-12 12:23:07 +0200182 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183 IP_CT_ESTABLISHED_REPLY;
184 /* aligns with the CT reference on the SKB nf_ct_set */
185 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
Paul Blakey941eff52021-01-27 16:32:46 +0200186 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
Paul Blakey9c26ba92020-03-12 12:23:06 +0200187
188 act_ct_labels = entry->ct_metadata.labels;
189 ct_labels = nf_ct_labels_find(ct);
190 if (ct_labels)
191 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
192 else
193 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
194}
195
196static int tcf_ct_flow_table_add_action_nat(struct net *net,
197 struct nf_conn *ct,
198 enum ip_conntrack_dir dir,
199 struct flow_action *action)
200{
201 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
202 struct nf_conntrack_tuple target;
203
wenxu05aa69e2020-05-30 13:54:51 +0800204 if (!(ct->status & IPS_NAT_MASK))
205 return 0;
206
Paul Blakey9c26ba92020-03-12 12:23:06 +0200207 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
208
209 switch (tuple->src.l3num) {
210 case NFPROTO_IPV4:
211 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
212 action);
213 break;
214 case NFPROTO_IPV6:
215 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
216 action);
217 break;
218 default:
219 return -EOPNOTSUPP;
220 }
221
222 switch (nf_ct_protonum(ct)) {
223 case IPPROTO_TCP:
224 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
225 break;
226 case IPPROTO_UDP:
227 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
228 break;
229 default:
230 return -EOPNOTSUPP;
231 }
232
233 return 0;
234}
235
236static int tcf_ct_flow_table_fill_actions(struct net *net,
237 const struct flow_offload *flow,
238 enum flow_offload_tuple_dir tdir,
239 struct nf_flow_rule *flow_rule)
240{
241 struct flow_action *action = &flow_rule->rule->action;
242 int num_entries = action->num_entries;
243 struct nf_conn *ct = flow->ct;
244 enum ip_conntrack_dir dir;
245 int i, err;
246
247 switch (tdir) {
248 case FLOW_OFFLOAD_DIR_ORIGINAL:
249 dir = IP_CT_DIR_ORIGINAL;
250 break;
251 case FLOW_OFFLOAD_DIR_REPLY:
252 dir = IP_CT_DIR_REPLY;
253 break;
254 default:
255 return -EOPNOTSUPP;
256 }
257
258 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
259 if (err)
260 goto err_nat;
261
262 tcf_ct_flow_table_add_action_meta(ct, dir, action);
263 return 0;
264
265err_nat:
266 /* Clear filled actions */
267 for (i = num_entries; i < action->num_entries; i++)
268 memset(&action->entries[i], 0, sizeof(action->entries[i]));
269 action->num_entries = num_entries;
270
271 return err;
272}
273
Paul Blakeyc34b9612020-03-03 15:07:49 +0200274static struct nf_flowtable_type flowtable_ct = {
Paul Blakey9c26ba92020-03-12 12:23:06 +0200275 .action = tcf_ct_flow_table_fill_actions,
Paul Blakeyc34b9612020-03-03 15:07:49 +0200276 .owner = THIS_MODULE,
277};
278
279static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
280{
281 struct tcf_ct_flow_table *ct_ft;
282 int err = -ENOMEM;
283
Eric Dumazet138470a2020-03-08 14:27:48 -0700284 mutex_lock(&zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200285 ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
Eric Dumazet138470a2020-03-08 14:27:48 -0700286 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
287 goto out_unlock;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200288
Eric Dumazet138470a2020-03-08 14:27:48 -0700289 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200290 if (!ct_ft)
291 goto err_alloc;
Eric Dumazet138470a2020-03-08 14:27:48 -0700292 refcount_set(&ct_ft->ref, 1);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200293
294 ct_ft->zone = params->zone;
295 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
296 if (err)
297 goto err_insert;
298
299 ct_ft->nf_ft.type = &flowtable_ct;
Marcelo Ricardo Leitner3567e232020-11-26 15:40:49 -0300300 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
301 NF_FLOWTABLE_COUNTER;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200302 err = nf_flow_table_init(&ct_ft->nf_ft);
303 if (err)
304 goto err_init;
305
306 __module_get(THIS_MODULE);
Eric Dumazet138470a2020-03-08 14:27:48 -0700307out_unlock:
Paul Blakeyc34b9612020-03-03 15:07:49 +0200308 params->ct_ft = ct_ft;
Paul Blakeyedd58612020-03-12 12:23:09 +0200309 params->nf_ft = &ct_ft->nf_ft;
Eric Dumazet138470a2020-03-08 14:27:48 -0700310 mutex_unlock(&zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200311
312 return 0;
313
314err_init:
315 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
316err_insert:
317 kfree(ct_ft);
318err_alloc:
Eric Dumazet138470a2020-03-08 14:27:48 -0700319 mutex_unlock(&zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200320 return err;
321}
322
323static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
324{
Louis Peens77ac5e42021-07-02 11:21:38 +0200325 struct flow_block_cb *block_cb, *tmp_cb;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200326 struct tcf_ct_flow_table *ct_ft;
Louis Peens77ac5e42021-07-02 11:21:38 +0200327 struct flow_block *block;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200328
329 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
330 rwork);
331 nf_flow_table_free(&ct_ft->nf_ft);
Louis Peens77ac5e42021-07-02 11:21:38 +0200332
333 /* Remove any remaining callbacks before cleanup */
334 block = &ct_ft->nf_ft.flow_block;
335 down_write(&ct_ft->nf_ft.flow_block_lock);
336 list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
337 list_del(&block_cb->list);
338 flow_block_cb_free(block_cb);
339 }
340 up_write(&ct_ft->nf_ft.flow_block_lock);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200341 kfree(ct_ft);
342
343 module_put(THIS_MODULE);
344}
345
346static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
347{
348 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
349
Eric Dumazet138470a2020-03-08 14:27:48 -0700350 if (refcount_dec_and_test(&params->ct_ft->ref)) {
Paul Blakeyc34b9612020-03-03 15:07:49 +0200351 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
352 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
353 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
354 }
Paul Blakeyc34b9612020-03-03 15:07:49 +0200355}
356
Paul Blakey64ff70b2020-03-03 15:07:50 +0200357static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
358 struct nf_conn *ct,
359 bool tcp)
360{
361 struct flow_offload *entry;
362 int err;
363
364 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
365 return;
366
367 entry = flow_offload_alloc(ct);
368 if (!entry) {
369 WARN_ON_ONCE(1);
370 goto err_alloc;
371 }
372
373 if (tcp) {
374 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
375 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
376 }
377
378 err = flow_offload_add(&ct_ft->nf_ft, entry);
379 if (err)
380 goto err_add;
381
382 return;
383
384err_add:
385 flow_offload_free(entry);
386err_alloc:
387 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
388}
389
390static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
391 struct nf_conn *ct,
392 enum ip_conntrack_info ctinfo)
393{
394 bool tcp = false;
395
Chris Mi43332cf2021-12-01 15:31:53 +0200396 if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) ||
397 !test_bit(IPS_ASSURED_BIT, &ct->status))
Paul Blakey64ff70b2020-03-03 15:07:50 +0200398 return;
399
400 switch (nf_ct_protonum(ct)) {
401 case IPPROTO_TCP:
402 tcp = true;
403 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
404 return;
405 break;
406 case IPPROTO_UDP:
407 break;
408 default:
409 return;
410 }
411
412 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
413 ct->status & IPS_SEQ_ADJUST)
414 return;
415
416 tcf_ct_flow_table_add(ct_ft, ct, tcp);
417}
418
Paul Blakey46475bb2020-03-03 15:07:51 +0200419static bool
420tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
Paul Blakey07ac9d12020-03-04 13:49:38 +0200421 struct flow_offload_tuple *tuple,
422 struct tcphdr **tcph)
Paul Blakey46475bb2020-03-03 15:07:51 +0200423{
424 struct flow_ports *ports;
425 unsigned int thoff;
426 struct iphdr *iph;
427
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200428 if (!pskb_network_may_pull(skb, sizeof(*iph)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200429 return false;
430
431 iph = ip_hdr(skb);
432 thoff = iph->ihl * 4;
433
434 if (ip_is_fragment(iph) ||
435 unlikely(thoff != sizeof(struct iphdr)))
436 return false;
437
438 if (iph->protocol != IPPROTO_TCP &&
439 iph->protocol != IPPROTO_UDP)
440 return false;
441
442 if (iph->ttl <= 1)
443 return false;
444
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200445 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
446 thoff + sizeof(struct tcphdr) :
447 thoff + sizeof(*ports)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200448 return false;
449
Paul Blakey07ac9d12020-03-04 13:49:38 +0200450 iph = ip_hdr(skb);
451 if (iph->protocol == IPPROTO_TCP)
452 *tcph = (void *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200453
Paul Blakey07ac9d12020-03-04 13:49:38 +0200454 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200455 tuple->src_v4.s_addr = iph->saddr;
456 tuple->dst_v4.s_addr = iph->daddr;
457 tuple->src_port = ports->source;
458 tuple->dst_port = ports->dest;
459 tuple->l3proto = AF_INET;
460 tuple->l4proto = iph->protocol;
461
462 return true;
463}
464
465static bool
466tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
Paul Blakey07ac9d12020-03-04 13:49:38 +0200467 struct flow_offload_tuple *tuple,
468 struct tcphdr **tcph)
Paul Blakey46475bb2020-03-03 15:07:51 +0200469{
470 struct flow_ports *ports;
471 struct ipv6hdr *ip6h;
472 unsigned int thoff;
473
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200474 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200475 return false;
476
477 ip6h = ipv6_hdr(skb);
478
479 if (ip6h->nexthdr != IPPROTO_TCP &&
480 ip6h->nexthdr != IPPROTO_UDP)
481 return false;
482
483 if (ip6h->hop_limit <= 1)
484 return false;
485
486 thoff = sizeof(*ip6h);
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200487 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
488 thoff + sizeof(struct tcphdr) :
489 thoff + sizeof(*ports)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200490 return false;
491
Paul Blakey07ac9d12020-03-04 13:49:38 +0200492 ip6h = ipv6_hdr(skb);
493 if (ip6h->nexthdr == IPPROTO_TCP)
494 *tcph = (void *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200495
Paul Blakey07ac9d12020-03-04 13:49:38 +0200496 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200497 tuple->src_v6 = ip6h->saddr;
498 tuple->dst_v6 = ip6h->daddr;
499 tuple->src_port = ports->source;
500 tuple->dst_port = ports->dest;
501 tuple->l3proto = AF_INET6;
502 tuple->l4proto = ip6h->nexthdr;
503
504 return true;
505}
506
Paul Blakey46475bb2020-03-03 15:07:51 +0200507static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
508 struct sk_buff *skb,
509 u8 family)
510{
511 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
512 struct flow_offload_tuple_rhash *tuplehash;
513 struct flow_offload_tuple tuple = {};
514 enum ip_conntrack_info ctinfo;
Paul Blakey07ac9d12020-03-04 13:49:38 +0200515 struct tcphdr *tcph = NULL;
Paul Blakey46475bb2020-03-03 15:07:51 +0200516 struct flow_offload *flow;
517 struct nf_conn *ct;
Paul Blakey46475bb2020-03-03 15:07:51 +0200518 u8 dir;
519
520 /* Previously seen or loopback */
521 ct = nf_ct_get(skb, &ctinfo);
522 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
523 return false;
524
525 switch (family) {
526 case NFPROTO_IPV4:
Paul Blakey07ac9d12020-03-04 13:49:38 +0200527 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
Paul Blakey46475bb2020-03-03 15:07:51 +0200528 return false;
529 break;
530 case NFPROTO_IPV6:
Paul Blakey07ac9d12020-03-04 13:49:38 +0200531 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
Paul Blakey46475bb2020-03-03 15:07:51 +0200532 return false;
533 break;
534 default:
535 return false;
536 }
537
538 tuplehash = flow_offload_lookup(nf_ft, &tuple);
539 if (!tuplehash)
540 return false;
541
542 dir = tuplehash->tuple.dir;
543 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
544 ct = flow->ct;
545
Paul Blakey07ac9d12020-03-04 13:49:38 +0200546 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
547 flow_offload_teardown(flow);
548 return false;
549 }
550
Paul Blakey46475bb2020-03-03 15:07:51 +0200551 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
552 IP_CT_ESTABLISHED_REPLY;
553
Paul Blakey8b3646d2020-03-12 12:23:08 +0200554 flow_offload_refresh(nf_ft, flow);
Paul Blakey46475bb2020-03-03 15:07:51 +0200555 nf_conntrack_get(&ct->ct_general);
556 nf_ct_set(skb, ct, ctinfo);
Marcelo Ricardo Leitner3567e232020-11-26 15:40:49 -0300557 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
558 nf_ct_acct_update(ct, dir, skb->len);
Paul Blakey46475bb2020-03-03 15:07:51 +0200559
560 return true;
561}
562
Paul Blakeyc34b9612020-03-03 15:07:49 +0200563static int tcf_ct_flow_tables_init(void)
564{
565 return rhashtable_init(&zones_ht, &zones_params);
566}
567
568static void tcf_ct_flow_tables_uninit(void)
569{
570 rhashtable_destroy(&zones_ht);
571}
572
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300573static struct tc_action_ops act_ct_ops;
574static unsigned int ct_net_id;
575
576struct tc_ct_action_net {
577 struct tc_action_net tn; /* Must be first */
578 bool labels;
579};
580
581/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
582static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
583 u16 zone_id, bool force)
584{
585 enum ip_conntrack_info ctinfo;
586 struct nf_conn *ct;
587
588 ct = nf_ct_get(skb, &ctinfo);
589 if (!ct)
590 return false;
591 if (!net_eq(net, read_pnet(&ct->ct_net)))
592 return false;
593 if (nf_ct_zone(ct)->id != zone_id)
594 return false;
595
596 /* Force conntrack entry direction. */
597 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
598 if (nf_ct_is_confirmed(ct))
599 nf_ct_kill(ct);
600
601 nf_conntrack_put(&ct->ct_general);
602 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
603
604 return false;
605 }
606
607 return true;
608}
609
610/* Trim the skb to the length specified by the IP/IPv6 header,
611 * removing any trailing lower-layer padding. This prepares the skb
612 * for higher-layer processing that assumes skb->len excludes padding
613 * (such as nf_ip_checksum). The caller needs to pull the skb to the
614 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
615 */
616static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
617{
618 unsigned int len;
619 int err;
620
621 switch (family) {
622 case NFPROTO_IPV4:
623 len = ntohs(ip_hdr(skb)->tot_len);
624 break;
625 case NFPROTO_IPV6:
626 len = sizeof(struct ipv6hdr)
627 + ntohs(ipv6_hdr(skb)->payload_len);
628 break;
629 default:
630 len = skb->len;
631 }
632
633 err = pskb_trim_rcsum(skb, len);
634
635 return err;
636}
637
638static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
639{
640 u8 family = NFPROTO_UNSPEC;
641
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200642 switch (skb_protocol(skb, true)) {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300643 case htons(ETH_P_IP):
644 family = NFPROTO_IPV4;
645 break;
646 case htons(ETH_P_IPV6):
647 family = NFPROTO_IPV6;
648 break;
649 default:
650 break;
651 }
652
653 return family;
654}
655
656static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
657{
658 unsigned int len;
659
660 len = skb_network_offset(skb) + sizeof(struct iphdr);
661 if (unlikely(skb->len < len))
662 return -EINVAL;
663 if (unlikely(!pskb_may_pull(skb, len)))
664 return -ENOMEM;
665
666 *frag = ip_is_fragment(ip_hdr(skb));
667 return 0;
668}
669
670static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
671{
672 unsigned int flags = 0, len, payload_ofs = 0;
673 unsigned short frag_off;
674 int nexthdr;
675
676 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
677 if (unlikely(skb->len < len))
678 return -EINVAL;
679 if (unlikely(!pskb_may_pull(skb, len)))
680 return -ENOMEM;
681
682 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
683 if (unlikely(nexthdr < 0))
684 return -EPROTO;
685
686 *frag = flags & IP6_FH_F_FRAG;
687 return 0;
688}
689
690static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
wenxuae372cb2020-07-19 20:30:37 +0800691 u8 family, u16 zone, bool *defrag)
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300692{
693 enum ip_conntrack_info ctinfo;
694 struct nf_conn *ct;
695 int err = 0;
696 bool frag;
Paul Blakeyec624fe2021-12-14 19:24:33 +0200697 u16 mru;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300698
699 /* Previously seen (loopback)? Ignore. */
700 ct = nf_ct_get(skb, &ctinfo);
701 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
702 return 0;
703
704 if (family == NFPROTO_IPV4)
705 err = tcf_ct_ipv4_is_fragment(skb, &frag);
706 else
707 err = tcf_ct_ipv6_is_fragment(skb, &frag);
708 if (err || !frag)
709 return err;
710
711 skb_get(skb);
Paul Blakeyec624fe2021-12-14 19:24:33 +0200712 mru = tc_skb_cb(skb)->mru;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300713
714 if (family == NFPROTO_IPV4) {
715 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
716
717 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
718 local_bh_disable();
719 err = ip_defrag(net, skb, user);
720 local_bh_enable();
721 if (err && err != -EINPROGRESS)
Alaa Hleiheleda814b2020-08-19 18:24:10 +0300722 return err;
wenxuae372cb2020-07-19 20:30:37 +0800723
wenxu038ebb12020-07-31 10:45:01 +0800724 if (!err) {
wenxuae372cb2020-07-19 20:30:37 +0800725 *defrag = true;
Paul Blakeyec624fe2021-12-14 19:24:33 +0200726 mru = IPCB(skb)->frag_max_size;
wenxu038ebb12020-07-31 10:45:01 +0800727 }
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300728 } else { /* NFPROTO_IPV6 */
729#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
730 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
731
732 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
733 err = nf_ct_frag6_gather(net, skb, user);
734 if (err && err != -EINPROGRESS)
735 goto out_free;
wenxuae372cb2020-07-19 20:30:37 +0800736
wenxu038ebb12020-07-31 10:45:01 +0800737 if (!err) {
wenxuae372cb2020-07-19 20:30:37 +0800738 *defrag = true;
Paul Blakeyec624fe2021-12-14 19:24:33 +0200739 mru = IP6CB(skb)->frag_max_size;
wenxu038ebb12020-07-31 10:45:01 +0800740 }
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300741#else
742 err = -EOPNOTSUPP;
743 goto out_free;
744#endif
745 }
746
Davide Carattif77bd542021-04-26 17:45:51 +0200747 if (err != -EINPROGRESS)
Paul Blakeyec624fe2021-12-14 19:24:33 +0200748 tc_skb_cb(skb)->mru = mru;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300749 skb_clear_hash(skb);
750 skb->ignore_df = 1;
751 return err;
752
753out_free:
754 kfree_skb(skb);
755 return err;
756}
757
758static void tcf_ct_params_free(struct rcu_head *head)
759{
760 struct tcf_ct_params *params = container_of(head,
761 struct tcf_ct_params, rcu);
762
Paul Blakeyc34b9612020-03-03 15:07:49 +0200763 tcf_ct_flow_table_put(params);
764
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300765 if (params->tmpl)
766 nf_conntrack_put(&params->tmpl->ct_general);
767 kfree(params);
768}
769
770#if IS_ENABLED(CONFIG_NF_NAT)
771/* Modelled after nf_nat_ipv[46]_fn().
772 * range is only used for new, uninitialized NAT state.
773 * Returns either NF_ACCEPT or NF_DROP.
774 */
775static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
776 enum ip_conntrack_info ctinfo,
777 const struct nf_nat_range2 *range,
778 enum nf_nat_manip_type maniptype)
779{
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200780 __be16 proto = skb_protocol(skb, true);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300781 int hooknum, err = NF_ACCEPT;
782
783 /* See HOOK2MANIP(). */
784 if (maniptype == NF_NAT_MANIP_SRC)
785 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
786 else
787 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
788
789 switch (ctinfo) {
790 case IP_CT_RELATED:
791 case IP_CT_RELATED_REPLY:
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200792 if (proto == htons(ETH_P_IP) &&
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300793 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
794 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
795 hooknum))
796 err = NF_DROP;
797 goto out;
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200798 } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300799 __be16 frag_off;
800 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
801 int hdrlen = ipv6_skip_exthdr(skb,
802 sizeof(struct ipv6hdr),
803 &nexthdr, &frag_off);
804
805 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
806 if (!nf_nat_icmpv6_reply_translation(skb, ct,
807 ctinfo,
808 hooknum,
809 hdrlen))
810 err = NF_DROP;
811 goto out;
812 }
813 }
814 /* Non-ICMP, fall thru to initialize if needed. */
Gustavo A. R. Silva964201d2020-07-07 12:21:38 -0500815 fallthrough;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300816 case IP_CT_NEW:
817 /* Seen it before? This can happen for loopback, retrans,
818 * or local packets.
819 */
820 if (!nf_nat_initialized(ct, maniptype)) {
821 /* Initialize according to the NAT action. */
822 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
823 /* Action is set up to establish a new
824 * mapping.
825 */
826 ? nf_nat_setup_info(ct, range, maniptype)
827 : nf_nat_alloc_null_binding(ct, hooknum);
828 if (err != NF_ACCEPT)
829 goto out;
830 }
831 break;
832
833 case IP_CT_ESTABLISHED:
834 case IP_CT_ESTABLISHED_REPLY:
835 break;
836
837 default:
838 err = NF_DROP;
839 goto out;
840 }
841
842 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
843out:
844 return err;
845}
846#endif /* CONFIG_NF_NAT */
847
848static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
849{
850#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
851 u32 new_mark;
852
853 if (!mask)
854 return;
855
856 new_mark = mark | (ct->mark & ~(mask));
857 if (ct->mark != new_mark) {
858 ct->mark = new_mark;
859 if (nf_ct_is_confirmed(ct))
860 nf_conntrack_event_cache(IPCT_MARK, ct);
861 }
862#endif
863}
864
865static void tcf_ct_act_set_labels(struct nf_conn *ct,
866 u32 *labels,
867 u32 *labels_m)
868{
869#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
Pankaj Bharadiyac5936422019-12-09 10:31:43 -0800870 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300871
872 if (!memchr_inv(labels_m, 0, labels_sz))
873 return;
874
875 nf_connlabels_replace(ct, labels, labels_m, 4);
876#endif
877}
878
879static int tcf_ct_act_nat(struct sk_buff *skb,
880 struct nf_conn *ct,
881 enum ip_conntrack_info ctinfo,
882 int ct_action,
883 struct nf_nat_range2 *range,
884 bool commit)
885{
886#if IS_ENABLED(CONFIG_NF_NAT)
Aaron Conole95219af2019-12-03 16:34:14 -0500887 int err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300888 enum nf_nat_manip_type maniptype;
889
890 if (!(ct_action & TCA_CT_ACT_NAT))
891 return NF_ACCEPT;
892
893 /* Add NAT extension if not confirmed yet. */
894 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
895 return NF_DROP; /* Can't NAT. */
896
897 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
898 (ctinfo != IP_CT_RELATED || commit)) {
899 /* NAT an established or related connection like before. */
900 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
901 /* This is the REPLY direction for a connection
902 * for which NAT was applied in the forward
903 * direction. Do the reverse NAT.
904 */
905 maniptype = ct->status & IPS_SRC_NAT
906 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
907 else
908 maniptype = ct->status & IPS_SRC_NAT
909 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
910 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
911 maniptype = NF_NAT_MANIP_SRC;
912 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
913 maniptype = NF_NAT_MANIP_DST;
914 } else {
915 return NF_ACCEPT;
916 }
917
Aaron Conole95219af2019-12-03 16:34:14 -0500918 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
Marcelo Ricardo Leitner13c62f532021-06-09 11:23:56 -0300919 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
920 if (ct->status & IPS_SRC_NAT) {
921 if (maniptype == NF_NAT_MANIP_SRC)
922 maniptype = NF_NAT_MANIP_DST;
923 else
924 maniptype = NF_NAT_MANIP_SRC;
Aaron Conole95219af2019-12-03 16:34:14 -0500925
Marcelo Ricardo Leitner13c62f532021-06-09 11:23:56 -0300926 err = ct_nat_execute(skb, ct, ctinfo, range,
927 maniptype);
928 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
929 err = ct_nat_execute(skb, ct, ctinfo, NULL,
930 NF_NAT_MANIP_SRC);
931 }
Aaron Conole95219af2019-12-03 16:34:14 -0500932 }
933 return err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300934#else
935 return NF_ACCEPT;
936#endif
937}
938
939static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
940 struct tcf_result *res)
941{
942 struct net *net = dev_net(skb->dev);
943 bool cached, commit, clear, force;
944 enum ip_conntrack_info ctinfo;
945 struct tcf_ct *c = to_ct(a);
946 struct nf_conn *tmpl = NULL;
947 struct nf_hook_state state;
948 int nh_ofs, err, retval;
949 struct tcf_ct_params *p;
Paul Blakey46475bb2020-03-03 15:07:51 +0200950 bool skip_add = false;
wenxuae372cb2020-07-19 20:30:37 +0800951 bool defrag = false;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300952 struct nf_conn *ct;
953 u8 family;
954
955 p = rcu_dereference_bh(c->params);
956
957 retval = READ_ONCE(c->tcf_action);
958 commit = p->ct_action & TCA_CT_ACT_COMMIT;
959 clear = p->ct_action & TCA_CT_ACT_CLEAR;
960 force = p->ct_action & TCA_CT_ACT_FORCE;
961 tmpl = p->tmpl;
962
wenxu8367b3a2020-07-04 15:42:47 +0800963 tcf_lastuse_update(&c->tcf_tm);
Paul Blakey2dc4e9e2021-10-17 14:58:51 +0300964 tcf_action_update_bstats(&c->common, skb);
wenxu8367b3a2020-07-04 15:42:47 +0800965
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300966 if (clear) {
Paul Blakeyec624fe2021-12-14 19:24:33 +0200967 tc_skb_cb(skb)->post_ct = false;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300968 ct = nf_ct_get(skb, &ctinfo);
969 if (ct) {
970 nf_conntrack_put(&ct->ct_general);
971 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
972 }
973
Marcelo Ricardo Leitner8ca1b092021-03-22 15:13:22 -0300974 goto out_clear;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300975 }
976
977 family = tcf_ct_skb_nf_family(skb);
978 if (family == NFPROTO_UNSPEC)
979 goto drop;
980
981 /* The conntrack module expects to be working at L3.
982 * We also try to pull the IPv4/6 header to linear area
983 */
984 nh_ofs = skb_network_offset(skb);
985 skb_pull_rcsum(skb, nh_ofs);
wenxuae372cb2020-07-19 20:30:37 +0800986 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300987 if (err == -EINPROGRESS) {
988 retval = TC_ACT_STOLEN;
Davide Carattif77bd542021-04-26 17:45:51 +0200989 goto out_clear;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300990 }
991 if (err)
992 goto drop;
993
994 err = tcf_ct_skb_network_trim(skb, family);
995 if (err)
996 goto drop;
997
998 /* If we are recirculating packets to match on ct fields and
999 * committing with a separate ct action, then we don't need to
1000 * actually run the packet through conntrack twice unless it's for a
1001 * different zone.
1002 */
1003 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
1004 if (!cached) {
Paul Blakey0cc254e2021-05-26 14:44:09 +03001005 if (tcf_ct_flow_table_lookup(p, skb, family)) {
Paul Blakey46475bb2020-03-03 15:07:51 +02001006 skip_add = true;
1007 goto do_nat;
1008 }
1009
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001010 /* Associate skb with specified zone. */
1011 if (tmpl) {
Roi Dayan9be02dd2021-04-28 09:05:32 +03001012 nf_conntrack_put(skb_nfct(skb));
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001013 nf_conntrack_get(&tmpl->ct_general);
1014 nf_ct_set(skb, tmpl, IP_CT_NEW);
1015 }
1016
1017 state.hook = NF_INET_PRE_ROUTING;
1018 state.net = net;
1019 state.pf = family;
1020 err = nf_conntrack_in(skb, &state);
1021 if (err != NF_ACCEPT)
1022 goto out_push;
1023 }
1024
Paul Blakey46475bb2020-03-03 15:07:51 +02001025do_nat:
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001026 ct = nf_ct_get(skb, &ctinfo);
1027 if (!ct)
1028 goto out_push;
1029 nf_ct_deliver_cached_events(ct);
1030
1031 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1032 if (err != NF_ACCEPT)
1033 goto drop;
1034
1035 if (commit) {
1036 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1037 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1038
1039 /* This will take care of sending queued events
1040 * even if the connection is already confirmed.
1041 */
wenxu8955b902021-07-02 11:34:31 +08001042 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1043 goto drop;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001044 }
1045
Paul Blakey0cc254e2021-05-26 14:44:09 +03001046 if (!skip_add)
1047 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1048
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001049out_push:
1050 skb_push_rcsum(skb, nh_ofs);
1051
Paul Blakeyec624fe2021-12-14 19:24:33 +02001052 tc_skb_cb(skb)->post_ct = true;
Paul Blakey38495952021-12-14 19:24:34 +02001053 tc_skb_cb(skb)->zone = p->zone;
Marcelo Ricardo Leitner8ca1b092021-03-22 15:13:22 -03001054out_clear:
wenxuae372cb2020-07-19 20:30:37 +08001055 if (defrag)
1056 qdisc_skb_cb(skb)->pkt_len = skb->len;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001057 return retval;
1058
1059drop:
Vlad Buslov26b537a2019-10-30 16:09:02 +02001060 tcf_action_inc_drop_qstats(&c->common);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001061 return TC_ACT_SHOT;
1062}
1063
1064static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001065 [TCA_CT_ACTION] = { .type = NLA_U16 },
Johannes Berg81408602020-08-18 10:17:31 +02001066 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001067 [TCA_CT_ZONE] = { .type = NLA_U16 },
1068 [TCA_CT_MARK] = { .type = NLA_U32 },
1069 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1070 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1071 .len = 128 / BITS_PER_BYTE },
1072 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1073 .len = 128 / BITS_PER_BYTE },
1074 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1075 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
Johannes Berg81408602020-08-18 10:17:31 +02001076 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1077 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001078 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1079 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1080};
1081
1082static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1083 struct tc_ct *parm,
1084 struct nlattr **tb,
1085 struct netlink_ext_ack *extack)
1086{
1087 struct nf_nat_range2 *range;
1088
1089 if (!(p->ct_action & TCA_CT_ACT_NAT))
1090 return 0;
1091
1092 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1093 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1094 return -EOPNOTSUPP;
1095 }
1096
1097 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1098 return 0;
1099
1100 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1101 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1102 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1103 return -EOPNOTSUPP;
1104 }
1105
1106 range = &p->range;
1107 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1108 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1109
1110 p->ipv4_range = true;
1111 range->flags |= NF_NAT_RANGE_MAP_IPS;
1112 range->min_addr.ip =
1113 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1114
1115 range->max_addr.ip = max_attr ?
1116 nla_get_in_addr(max_attr) :
1117 range->min_addr.ip;
1118 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1119 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1120
1121 p->ipv4_range = false;
1122 range->flags |= NF_NAT_RANGE_MAP_IPS;
1123 range->min_addr.in6 =
1124 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1125
1126 range->max_addr.in6 = max_attr ?
1127 nla_get_in6_addr(max_attr) :
1128 range->min_addr.in6;
1129 }
1130
1131 if (tb[TCA_CT_NAT_PORT_MIN]) {
1132 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1133 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1134
1135 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1136 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1137 range->min_proto.all;
1138 }
1139
1140 return 0;
1141}
1142
1143static void tcf_ct_set_key_val(struct nlattr **tb,
1144 void *val, int val_type,
1145 void *mask, int mask_type,
1146 int len)
1147{
1148 if (!tb[val_type])
1149 return;
1150 nla_memcpy(val, tb[val_type], len);
1151
1152 if (!mask)
1153 return;
1154
1155 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1156 memset(mask, 0xff, len);
1157 else
1158 nla_memcpy(mask, tb[mask_type], len);
1159}
1160
1161static int tcf_ct_fill_params(struct net *net,
1162 struct tcf_ct_params *p,
1163 struct tc_ct *parm,
1164 struct nlattr **tb,
1165 struct netlink_ext_ack *extack)
1166{
1167 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1168 struct nf_conntrack_zone zone;
1169 struct nf_conn *tmpl;
1170 int err;
1171
1172 p->zone = NF_CT_DEFAULT_ZONE_ID;
1173
1174 tcf_ct_set_key_val(tb,
1175 &p->ct_action, TCA_CT_ACTION,
1176 NULL, TCA_CT_UNSPEC,
1177 sizeof(p->ct_action));
1178
1179 if (p->ct_action & TCA_CT_ACT_CLEAR)
1180 return 0;
1181
1182 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1183 if (err)
1184 return err;
1185
1186 if (tb[TCA_CT_MARK]) {
1187 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1188 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1189 return -EOPNOTSUPP;
1190 }
1191 tcf_ct_set_key_val(tb,
1192 &p->mark, TCA_CT_MARK,
1193 &p->mark_mask, TCA_CT_MARK_MASK,
1194 sizeof(p->mark));
1195 }
1196
1197 if (tb[TCA_CT_LABELS]) {
1198 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1199 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1200 return -EOPNOTSUPP;
1201 }
1202
1203 if (!tn->labels) {
1204 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1205 return -EOPNOTSUPP;
1206 }
1207 tcf_ct_set_key_val(tb,
1208 p->labels, TCA_CT_LABELS,
1209 p->labels_mask, TCA_CT_LABELS_MASK,
1210 sizeof(p->labels));
1211 }
1212
1213 if (tb[TCA_CT_ZONE]) {
1214 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1215 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1216 return -EOPNOTSUPP;
1217 }
1218
1219 tcf_ct_set_key_val(tb,
1220 &p->zone, TCA_CT_ZONE,
1221 NULL, TCA_CT_UNSPEC,
1222 sizeof(p->zone));
1223 }
1224
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001225 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1226 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1227 if (!tmpl) {
1228 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1229 return -ENOMEM;
1230 }
1231 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1232 nf_conntrack_get(&tmpl->ct_general);
1233 p->tmpl = tmpl;
1234
1235 return 0;
1236}
1237
1238static int tcf_ct_init(struct net *net, struct nlattr *nla,
1239 struct nlattr *est, struct tc_action **a,
Vlad Buslovabbb0d32019-10-30 16:09:05 +02001240 struct tcf_proto *tp, u32 flags,
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001241 struct netlink_ext_ack *extack)
1242{
1243 struct tc_action_net *tn = net_generic(net, ct_net_id);
Cong Wang695176b2021-07-29 16:12:14 -07001244 bool bind = flags & TCA_ACT_FLAGS_BIND;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001245 struct tcf_ct_params *params = NULL;
1246 struct nlattr *tb[TCA_CT_MAX + 1];
1247 struct tcf_chain *goto_ch = NULL;
1248 struct tc_ct *parm;
1249 struct tcf_ct *c;
1250 int err, res = 0;
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001251 u32 index;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001252
1253 if (!nla) {
1254 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1255 return -EINVAL;
1256 }
1257
1258 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1259 if (err < 0)
1260 return err;
1261
1262 if (!tb[TCA_CT_PARMS]) {
1263 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1264 return -EINVAL;
1265 }
1266 parm = nla_data(tb[TCA_CT_PARMS]);
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001267 index = parm->index;
1268 err = tcf_idr_check_alloc(tn, &index, a, bind);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001269 if (err < 0)
1270 return err;
1271
1272 if (!err) {
Vlad Buslove3822672019-10-30 16:09:06 +02001273 err = tcf_idr_create_from_flags(tn, index, est, a,
1274 &act_ct_ops, bind, flags);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001275 if (err) {
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001276 tcf_idr_cleanup(tn, index);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001277 return err;
1278 }
1279 res = ACT_P_CREATED;
1280 } else {
1281 if (bind)
1282 return 0;
1283
Cong Wang695176b2021-07-29 16:12:14 -07001284 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001285 tcf_idr_release(*a, bind);
1286 return -EEXIST;
1287 }
1288 }
1289 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1290 if (err < 0)
1291 goto cleanup;
1292
1293 c = to_ct(*a);
1294
1295 params = kzalloc(sizeof(*params), GFP_KERNEL);
1296 if (unlikely(!params)) {
1297 err = -ENOMEM;
1298 goto cleanup;
1299 }
1300
1301 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1302 if (err)
1303 goto cleanup;
1304
Paul Blakeyc34b9612020-03-03 15:07:49 +02001305 err = tcf_ct_flow_table_get(params);
1306 if (err)
1307 goto cleanup;
1308
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001309 spin_lock_bh(&c->tcf_lock);
1310 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
Paul E. McKenney445d3742019-09-23 16:09:18 -07001311 params = rcu_replace_pointer(c->params, params,
1312 lockdep_is_held(&c->tcf_lock));
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001313 spin_unlock_bh(&c->tcf_lock);
1314
1315 if (goto_ch)
1316 tcf_chain_put_by_act(goto_ch);
1317 if (params)
Paul Blakeydd2af102020-03-18 12:50:33 +02001318 call_rcu(&params->rcu, tcf_ct_params_free);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001319
1320 return res;
1321
1322cleanup:
1323 if (goto_ch)
1324 tcf_chain_put_by_act(goto_ch);
1325 kfree(params);
1326 tcf_idr_release(*a, bind);
1327 return err;
1328}
1329
1330static void tcf_ct_cleanup(struct tc_action *a)
1331{
1332 struct tcf_ct_params *params;
1333 struct tcf_ct *c = to_ct(a);
1334
1335 params = rcu_dereference_protected(c->params, 1);
1336 if (params)
1337 call_rcu(&params->rcu, tcf_ct_params_free);
1338}
1339
1340static int tcf_ct_dump_key_val(struct sk_buff *skb,
1341 void *val, int val_type,
1342 void *mask, int mask_type,
1343 int len)
1344{
1345 int err;
1346
1347 if (mask && !memchr_inv(mask, 0, len))
1348 return 0;
1349
1350 err = nla_put(skb, val_type, len, val);
1351 if (err)
1352 return err;
1353
1354 if (mask_type != TCA_CT_UNSPEC) {
1355 err = nla_put(skb, mask_type, len, mask);
1356 if (err)
1357 return err;
1358 }
1359
1360 return 0;
1361}
1362
1363static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1364{
1365 struct nf_nat_range2 *range = &p->range;
1366
1367 if (!(p->ct_action & TCA_CT_ACT_NAT))
1368 return 0;
1369
1370 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1371 return 0;
1372
1373 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1374 if (p->ipv4_range) {
1375 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1376 range->min_addr.ip))
1377 return -1;
1378 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1379 range->max_addr.ip))
1380 return -1;
1381 } else {
1382 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1383 &range->min_addr.in6))
1384 return -1;
1385 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1386 &range->max_addr.in6))
1387 return -1;
1388 }
1389 }
1390
1391 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1392 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1393 range->min_proto.all))
1394 return -1;
1395 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1396 range->max_proto.all))
1397 return -1;
1398 }
1399
1400 return 0;
1401}
1402
1403static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1404 int bind, int ref)
1405{
1406 unsigned char *b = skb_tail_pointer(skb);
1407 struct tcf_ct *c = to_ct(a);
1408 struct tcf_ct_params *p;
1409
1410 struct tc_ct opt = {
1411 .index = c->tcf_index,
1412 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1413 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1414 };
1415 struct tcf_t t;
1416
1417 spin_lock_bh(&c->tcf_lock);
1418 p = rcu_dereference_protected(c->params,
1419 lockdep_is_held(&c->tcf_lock));
1420 opt.action = c->tcf_action;
1421
1422 if (tcf_ct_dump_key_val(skb,
1423 &p->ct_action, TCA_CT_ACTION,
1424 NULL, TCA_CT_UNSPEC,
1425 sizeof(p->ct_action)))
1426 goto nla_put_failure;
1427
1428 if (p->ct_action & TCA_CT_ACT_CLEAR)
1429 goto skip_dump;
1430
1431 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1432 tcf_ct_dump_key_val(skb,
1433 &p->mark, TCA_CT_MARK,
1434 &p->mark_mask, TCA_CT_MARK_MASK,
1435 sizeof(p->mark)))
1436 goto nla_put_failure;
1437
1438 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1439 tcf_ct_dump_key_val(skb,
1440 p->labels, TCA_CT_LABELS,
1441 p->labels_mask, TCA_CT_LABELS_MASK,
1442 sizeof(p->labels)))
1443 goto nla_put_failure;
1444
1445 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1446 tcf_ct_dump_key_val(skb,
1447 &p->zone, TCA_CT_ZONE,
1448 NULL, TCA_CT_UNSPEC,
1449 sizeof(p->zone)))
1450 goto nla_put_failure;
1451
1452 if (tcf_ct_dump_nat(skb, p))
1453 goto nla_put_failure;
1454
1455skip_dump:
1456 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1457 goto nla_put_failure;
1458
1459 tcf_tm_dump(&t, &c->tcf_tm);
1460 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1461 goto nla_put_failure;
1462 spin_unlock_bh(&c->tcf_lock);
1463
1464 return skb->len;
1465nla_put_failure:
1466 spin_unlock_bh(&c->tcf_lock);
1467 nlmsg_trim(skb, b);
1468 return -1;
1469}
1470
1471static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1472 struct netlink_callback *cb, int type,
1473 const struct tc_action_ops *ops,
1474 struct netlink_ext_ack *extack)
1475{
1476 struct tc_action_net *tn = net_generic(net, ct_net_id);
1477
1478 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1479}
1480
1481static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1482{
1483 struct tc_action_net *tn = net_generic(net, ct_net_id);
1484
1485 return tcf_idr_search(tn, a, index);
1486}
1487
Po Liu4b61d3e2020-06-19 14:01:07 +08001488static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1489 u64 drops, u64 lastuse, bool hw)
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001490{
1491 struct tcf_ct *c = to_ct(a);
1492
Po Liu4b61d3e2020-06-19 14:01:07 +08001493 tcf_action_update_stats(a, bytes, packets, drops, hw);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001494 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1495}
1496
Baowen Zhengc54e1d92021-12-17 19:16:21 +01001497static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1498 u32 *index_inc, bool bind)
1499{
1500 if (bind) {
1501 struct flow_action_entry *entry = entry_data;
1502
1503 entry->id = FLOW_ACTION_CT;
1504 entry->ct.action = tcf_ct_action(act);
1505 entry->ct.zone = tcf_ct_zone(act);
1506 entry->ct.flow_table = tcf_ct_ft(act);
1507 *index_inc = 1;
1508 } else {
Baowen Zheng8cbfe932021-12-17 19:16:22 +01001509 struct flow_offload_action *fl_action = entry_data;
1510
1511 fl_action->id = FLOW_ACTION_CT;
Baowen Zhengc54e1d92021-12-17 19:16:21 +01001512 }
1513
1514 return 0;
1515}
1516
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001517static struct tc_action_ops act_ct_ops = {
1518 .kind = "ct",
1519 .id = TCA_ID_CT,
1520 .owner = THIS_MODULE,
1521 .act = tcf_ct_act,
1522 .dump = tcf_ct_dump,
1523 .init = tcf_ct_init,
1524 .cleanup = tcf_ct_cleanup,
1525 .walk = tcf_ct_walker,
1526 .lookup = tcf_ct_search,
1527 .stats_update = tcf_stats_update,
Baowen Zhengc54e1d92021-12-17 19:16:21 +01001528 .offload_act_setup = tcf_ct_offload_act_setup,
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001529 .size = sizeof(struct tcf_ct),
1530};
1531
1532static __net_init int ct_init_net(struct net *net)
1533{
Pankaj Bharadiyac5936422019-12-09 10:31:43 -08001534 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001535 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1536
1537 if (nf_connlabels_get(net, n_bits - 1)) {
1538 tn->labels = false;
1539 pr_err("act_ct: Failed to set connlabels length");
1540 } else {
1541 tn->labels = true;
1542 }
1543
Cong Wang981471b2019-08-25 10:01:32 -07001544 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001545}
1546
1547static void __net_exit ct_exit_net(struct list_head *net_list)
1548{
1549 struct net *net;
1550
1551 rtnl_lock();
1552 list_for_each_entry(net, net_list, exit_list) {
1553 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1554
1555 if (tn->labels)
1556 nf_connlabels_put(net);
1557 }
1558 rtnl_unlock();
1559
1560 tc_action_net_exit(net_list, ct_net_id);
1561}
1562
1563static struct pernet_operations ct_net_ops = {
1564 .init = ct_init_net,
1565 .exit_batch = ct_exit_net,
1566 .id = &ct_net_id,
1567 .size = sizeof(struct tc_ct_action_net),
1568};
1569
1570static int __init ct_init_module(void)
1571{
Paul Blakeyc34b9612020-03-03 15:07:49 +02001572 int err;
1573
1574 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1575 if (!act_ct_wq)
1576 return -ENOMEM;
1577
1578 err = tcf_ct_flow_tables_init();
1579 if (err)
1580 goto err_tbl_init;
1581
1582 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1583 if (err)
1584 goto err_register;
1585
wenxuc1294122020-11-25 12:01:23 +08001586 static_branch_inc(&tcf_frag_xmit_count);
1587
Paul Blakeyc34b9612020-03-03 15:07:49 +02001588 return 0;
1589
Paul Blakeyc34b9612020-03-03 15:07:49 +02001590err_register:
1591 tcf_ct_flow_tables_uninit();
liujian8c5c51f2020-07-30 16:14:28 +08001592err_tbl_init:
1593 destroy_workqueue(act_ct_wq);
Paul Blakeyc34b9612020-03-03 15:07:49 +02001594 return err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001595}
1596
1597static void __exit ct_cleanup_module(void)
1598{
wenxuc1294122020-11-25 12:01:23 +08001599 static_branch_dec(&tcf_frag_xmit_count);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001600 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
Paul Blakeyc34b9612020-03-03 15:07:49 +02001601 tcf_ct_flow_tables_uninit();
1602 destroy_workqueue(act_ct_wq);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001603}
1604
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001605module_init(ct_init_module);
1606module_exit(ct_cleanup_module);
1607MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1608MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1609MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1610MODULE_DESCRIPTION("Connection tracking action");
1611MODULE_LICENSE("GPL v2");