blob: a2d5582a701e93b39a4430ff2ad14c8f9200076f [file] [log] [blame]
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/pkt_cls.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
Paul Blakeyc34b9612020-03-03 15:07:49 +020018#include <linux/rhashtable.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030019#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/act_api.h>
23#include <net/ip.h>
24#include <net/ipv6_frag.h>
25#include <uapi/linux/tc_act/tc_ct.h>
26#include <net/tc_act/tc_ct.h>
27
Paul Blakeyc34b9612020-03-03 15:07:49 +020028#include <net/netfilter/nf_flow_table.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030029#include <net/netfilter/nf_conntrack.h>
30#include <net/netfilter/nf_conntrack_core.h>
31#include <net/netfilter/nf_conntrack_zones.h>
32#include <net/netfilter/nf_conntrack_helper.h>
33#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
Jeremy Sowden40d102c2019-09-13 09:13:05 +010034#include <uapi/linux/netfilter/nf_nat.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030035
Paul Blakeyc34b9612020-03-03 15:07:49 +020036static struct workqueue_struct *act_ct_wq;
37static struct rhashtable zones_ht;
38static DEFINE_SPINLOCK(zones_lock);
39
40struct tcf_ct_flow_table {
41 struct rhash_head node; /* In zones tables */
42
43 struct rcu_work rwork;
44 struct nf_flowtable nf_ft;
45 u16 zone;
46 u32 ref;
47
48 bool dying;
49};
50
51static const struct rhashtable_params zones_params = {
52 .head_offset = offsetof(struct tcf_ct_flow_table, node),
53 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
54 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
55 .automatic_shrinking = true,
56};
57
58static struct nf_flowtable_type flowtable_ct = {
59 .owner = THIS_MODULE,
60};
61
62static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
63{
64 struct tcf_ct_flow_table *ct_ft;
65 int err = -ENOMEM;
66
67 spin_lock_bh(&zones_lock);
68 ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
69 if (ct_ft)
70 goto take_ref;
71
72 ct_ft = kzalloc(sizeof(*ct_ft), GFP_ATOMIC);
73 if (!ct_ft)
74 goto err_alloc;
75
76 ct_ft->zone = params->zone;
77 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
78 if (err)
79 goto err_insert;
80
81 ct_ft->nf_ft.type = &flowtable_ct;
82 err = nf_flow_table_init(&ct_ft->nf_ft);
83 if (err)
84 goto err_init;
85
86 __module_get(THIS_MODULE);
87take_ref:
88 params->ct_ft = ct_ft;
89 ct_ft->ref++;
90 spin_unlock_bh(&zones_lock);
91
92 return 0;
93
94err_init:
95 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
96err_insert:
97 kfree(ct_ft);
98err_alloc:
99 spin_unlock_bh(&zones_lock);
100 return err;
101}
102
103static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
104{
105 struct tcf_ct_flow_table *ct_ft;
106
107 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
108 rwork);
109 nf_flow_table_free(&ct_ft->nf_ft);
110 kfree(ct_ft);
111
112 module_put(THIS_MODULE);
113}
114
115static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
116{
117 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
118
119 spin_lock_bh(&zones_lock);
120 if (--params->ct_ft->ref == 0) {
121 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
122 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
123 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
124 }
125 spin_unlock_bh(&zones_lock);
126}
127
Paul Blakey64ff70b2020-03-03 15:07:50 +0200128static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
129 struct nf_conn *ct,
130 bool tcp)
131{
132 struct flow_offload *entry;
133 int err;
134
135 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
136 return;
137
138 entry = flow_offload_alloc(ct);
139 if (!entry) {
140 WARN_ON_ONCE(1);
141 goto err_alloc;
142 }
143
144 if (tcp) {
145 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
146 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
147 }
148
149 err = flow_offload_add(&ct_ft->nf_ft, entry);
150 if (err)
151 goto err_add;
152
153 return;
154
155err_add:
156 flow_offload_free(entry);
157err_alloc:
158 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
159}
160
161static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
162 struct nf_conn *ct,
163 enum ip_conntrack_info ctinfo)
164{
165 bool tcp = false;
166
167 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
168 return;
169
170 switch (nf_ct_protonum(ct)) {
171 case IPPROTO_TCP:
172 tcp = true;
173 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
174 return;
175 break;
176 case IPPROTO_UDP:
177 break;
178 default:
179 return;
180 }
181
182 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
183 ct->status & IPS_SEQ_ADJUST)
184 return;
185
186 tcf_ct_flow_table_add(ct_ft, ct, tcp);
187}
188
Paul Blakey46475bb2020-03-03 15:07:51 +0200189static bool
190tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
191 struct flow_offload_tuple *tuple)
192{
193 struct flow_ports *ports;
194 unsigned int thoff;
195 struct iphdr *iph;
196
197 if (!pskb_may_pull(skb, sizeof(*iph)))
198 return false;
199
200 iph = ip_hdr(skb);
201 thoff = iph->ihl * 4;
202
203 if (ip_is_fragment(iph) ||
204 unlikely(thoff != sizeof(struct iphdr)))
205 return false;
206
207 if (iph->protocol != IPPROTO_TCP &&
208 iph->protocol != IPPROTO_UDP)
209 return false;
210
211 if (iph->ttl <= 1)
212 return false;
213
214 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
215 return false;
216
217 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
218
219 tuple->src_v4.s_addr = iph->saddr;
220 tuple->dst_v4.s_addr = iph->daddr;
221 tuple->src_port = ports->source;
222 tuple->dst_port = ports->dest;
223 tuple->l3proto = AF_INET;
224 tuple->l4proto = iph->protocol;
225
226 return true;
227}
228
229static bool
230tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
231 struct flow_offload_tuple *tuple)
232{
233 struct flow_ports *ports;
234 struct ipv6hdr *ip6h;
235 unsigned int thoff;
236
237 if (!pskb_may_pull(skb, sizeof(*ip6h)))
238 return false;
239
240 ip6h = ipv6_hdr(skb);
241
242 if (ip6h->nexthdr != IPPROTO_TCP &&
243 ip6h->nexthdr != IPPROTO_UDP)
244 return false;
245
246 if (ip6h->hop_limit <= 1)
247 return false;
248
249 thoff = sizeof(*ip6h);
250 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
251 return false;
252
253 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
254
255 tuple->src_v6 = ip6h->saddr;
256 tuple->dst_v6 = ip6h->daddr;
257 tuple->src_port = ports->source;
258 tuple->dst_port = ports->dest;
259 tuple->l3proto = AF_INET6;
260 tuple->l4proto = ip6h->nexthdr;
261
262 return true;
263}
264
265static bool tcf_ct_flow_table_check_tcp(struct flow_offload *flow,
266 struct sk_buff *skb,
267 unsigned int thoff)
268{
269 struct tcphdr *tcph;
270
271 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
272 return false;
273
274 tcph = (void *)(skb_network_header(skb) + thoff);
275 if (unlikely(tcph->fin || tcph->rst)) {
276 flow_offload_teardown(flow);
277 return false;
278 }
279
280 return true;
281}
282
283static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
284 struct sk_buff *skb,
285 u8 family)
286{
287 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
288 struct flow_offload_tuple_rhash *tuplehash;
289 struct flow_offload_tuple tuple = {};
290 enum ip_conntrack_info ctinfo;
291 struct flow_offload *flow;
292 struct nf_conn *ct;
293 unsigned int thoff;
294 int ip_proto;
295 u8 dir;
296
297 /* Previously seen or loopback */
298 ct = nf_ct_get(skb, &ctinfo);
299 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
300 return false;
301
302 switch (family) {
303 case NFPROTO_IPV4:
304 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple))
305 return false;
306 break;
307 case NFPROTO_IPV6:
308 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple))
309 return false;
310 break;
311 default:
312 return false;
313 }
314
315 tuplehash = flow_offload_lookup(nf_ft, &tuple);
316 if (!tuplehash)
317 return false;
318
319 dir = tuplehash->tuple.dir;
320 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
321 ct = flow->ct;
322
323 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
324 IP_CT_ESTABLISHED_REPLY;
325
326 thoff = ip_hdr(skb)->ihl * 4;
327 ip_proto = ip_hdr(skb)->protocol;
328 if (ip_proto == IPPROTO_TCP &&
329 !tcf_ct_flow_table_check_tcp(flow, skb, thoff))
330 return false;
331
332 nf_conntrack_get(&ct->ct_general);
333 nf_ct_set(skb, ct, ctinfo);
334
335 return true;
336}
337
Paul Blakeyc34b9612020-03-03 15:07:49 +0200338static int tcf_ct_flow_tables_init(void)
339{
340 return rhashtable_init(&zones_ht, &zones_params);
341}
342
343static void tcf_ct_flow_tables_uninit(void)
344{
345 rhashtable_destroy(&zones_ht);
346}
347
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300348static struct tc_action_ops act_ct_ops;
349static unsigned int ct_net_id;
350
351struct tc_ct_action_net {
352 struct tc_action_net tn; /* Must be first */
353 bool labels;
354};
355
356/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
357static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
358 u16 zone_id, bool force)
359{
360 enum ip_conntrack_info ctinfo;
361 struct nf_conn *ct;
362
363 ct = nf_ct_get(skb, &ctinfo);
364 if (!ct)
365 return false;
366 if (!net_eq(net, read_pnet(&ct->ct_net)))
367 return false;
368 if (nf_ct_zone(ct)->id != zone_id)
369 return false;
370
371 /* Force conntrack entry direction. */
372 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
373 if (nf_ct_is_confirmed(ct))
374 nf_ct_kill(ct);
375
376 nf_conntrack_put(&ct->ct_general);
377 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
378
379 return false;
380 }
381
382 return true;
383}
384
385/* Trim the skb to the length specified by the IP/IPv6 header,
386 * removing any trailing lower-layer padding. This prepares the skb
387 * for higher-layer processing that assumes skb->len excludes padding
388 * (such as nf_ip_checksum). The caller needs to pull the skb to the
389 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
390 */
391static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
392{
393 unsigned int len;
394 int err;
395
396 switch (family) {
397 case NFPROTO_IPV4:
398 len = ntohs(ip_hdr(skb)->tot_len);
399 break;
400 case NFPROTO_IPV6:
401 len = sizeof(struct ipv6hdr)
402 + ntohs(ipv6_hdr(skb)->payload_len);
403 break;
404 default:
405 len = skb->len;
406 }
407
408 err = pskb_trim_rcsum(skb, len);
409
410 return err;
411}
412
413static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
414{
415 u8 family = NFPROTO_UNSPEC;
416
417 switch (skb->protocol) {
418 case htons(ETH_P_IP):
419 family = NFPROTO_IPV4;
420 break;
421 case htons(ETH_P_IPV6):
422 family = NFPROTO_IPV6;
423 break;
424 default:
425 break;
426 }
427
428 return family;
429}
430
431static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
432{
433 unsigned int len;
434
435 len = skb_network_offset(skb) + sizeof(struct iphdr);
436 if (unlikely(skb->len < len))
437 return -EINVAL;
438 if (unlikely(!pskb_may_pull(skb, len)))
439 return -ENOMEM;
440
441 *frag = ip_is_fragment(ip_hdr(skb));
442 return 0;
443}
444
445static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
446{
447 unsigned int flags = 0, len, payload_ofs = 0;
448 unsigned short frag_off;
449 int nexthdr;
450
451 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
452 if (unlikely(skb->len < len))
453 return -EINVAL;
454 if (unlikely(!pskb_may_pull(skb, len)))
455 return -ENOMEM;
456
457 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
458 if (unlikely(nexthdr < 0))
459 return -EPROTO;
460
461 *frag = flags & IP6_FH_F_FRAG;
462 return 0;
463}
464
465static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
466 u8 family, u16 zone)
467{
468 enum ip_conntrack_info ctinfo;
469 struct nf_conn *ct;
470 int err = 0;
471 bool frag;
472
473 /* Previously seen (loopback)? Ignore. */
474 ct = nf_ct_get(skb, &ctinfo);
475 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
476 return 0;
477
478 if (family == NFPROTO_IPV4)
479 err = tcf_ct_ipv4_is_fragment(skb, &frag);
480 else
481 err = tcf_ct_ipv6_is_fragment(skb, &frag);
482 if (err || !frag)
483 return err;
484
485 skb_get(skb);
486
487 if (family == NFPROTO_IPV4) {
488 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
489
490 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
491 local_bh_disable();
492 err = ip_defrag(net, skb, user);
493 local_bh_enable();
494 if (err && err != -EINPROGRESS)
495 goto out_free;
496 } else { /* NFPROTO_IPV6 */
497#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
498 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
499
500 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
501 err = nf_ct_frag6_gather(net, skb, user);
502 if (err && err != -EINPROGRESS)
503 goto out_free;
504#else
505 err = -EOPNOTSUPP;
506 goto out_free;
507#endif
508 }
509
510 skb_clear_hash(skb);
511 skb->ignore_df = 1;
512 return err;
513
514out_free:
515 kfree_skb(skb);
516 return err;
517}
518
519static void tcf_ct_params_free(struct rcu_head *head)
520{
521 struct tcf_ct_params *params = container_of(head,
522 struct tcf_ct_params, rcu);
523
Paul Blakeyc34b9612020-03-03 15:07:49 +0200524 tcf_ct_flow_table_put(params);
525
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300526 if (params->tmpl)
527 nf_conntrack_put(&params->tmpl->ct_general);
528 kfree(params);
529}
530
531#if IS_ENABLED(CONFIG_NF_NAT)
532/* Modelled after nf_nat_ipv[46]_fn().
533 * range is only used for new, uninitialized NAT state.
534 * Returns either NF_ACCEPT or NF_DROP.
535 */
536static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
537 enum ip_conntrack_info ctinfo,
538 const struct nf_nat_range2 *range,
539 enum nf_nat_manip_type maniptype)
540{
541 int hooknum, err = NF_ACCEPT;
542
543 /* See HOOK2MANIP(). */
544 if (maniptype == NF_NAT_MANIP_SRC)
545 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
546 else
547 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
548
549 switch (ctinfo) {
550 case IP_CT_RELATED:
551 case IP_CT_RELATED_REPLY:
552 if (skb->protocol == htons(ETH_P_IP) &&
553 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
554 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
555 hooknum))
556 err = NF_DROP;
557 goto out;
558 } else if (IS_ENABLED(CONFIG_IPV6) &&
559 skb->protocol == htons(ETH_P_IPV6)) {
560 __be16 frag_off;
561 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
562 int hdrlen = ipv6_skip_exthdr(skb,
563 sizeof(struct ipv6hdr),
564 &nexthdr, &frag_off);
565
566 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
567 if (!nf_nat_icmpv6_reply_translation(skb, ct,
568 ctinfo,
569 hooknum,
570 hdrlen))
571 err = NF_DROP;
572 goto out;
573 }
574 }
575 /* Non-ICMP, fall thru to initialize if needed. */
576 /* fall through */
577 case IP_CT_NEW:
578 /* Seen it before? This can happen for loopback, retrans,
579 * or local packets.
580 */
581 if (!nf_nat_initialized(ct, maniptype)) {
582 /* Initialize according to the NAT action. */
583 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
584 /* Action is set up to establish a new
585 * mapping.
586 */
587 ? nf_nat_setup_info(ct, range, maniptype)
588 : nf_nat_alloc_null_binding(ct, hooknum);
589 if (err != NF_ACCEPT)
590 goto out;
591 }
592 break;
593
594 case IP_CT_ESTABLISHED:
595 case IP_CT_ESTABLISHED_REPLY:
596 break;
597
598 default:
599 err = NF_DROP;
600 goto out;
601 }
602
603 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
604out:
605 return err;
606}
607#endif /* CONFIG_NF_NAT */
608
609static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
610{
611#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
612 u32 new_mark;
613
614 if (!mask)
615 return;
616
617 new_mark = mark | (ct->mark & ~(mask));
618 if (ct->mark != new_mark) {
619 ct->mark = new_mark;
620 if (nf_ct_is_confirmed(ct))
621 nf_conntrack_event_cache(IPCT_MARK, ct);
622 }
623#endif
624}
625
626static void tcf_ct_act_set_labels(struct nf_conn *ct,
627 u32 *labels,
628 u32 *labels_m)
629{
630#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
Pankaj Bharadiyac5936422019-12-09 10:31:43 -0800631 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300632
633 if (!memchr_inv(labels_m, 0, labels_sz))
634 return;
635
636 nf_connlabels_replace(ct, labels, labels_m, 4);
637#endif
638}
639
640static int tcf_ct_act_nat(struct sk_buff *skb,
641 struct nf_conn *ct,
642 enum ip_conntrack_info ctinfo,
643 int ct_action,
644 struct nf_nat_range2 *range,
645 bool commit)
646{
647#if IS_ENABLED(CONFIG_NF_NAT)
Aaron Conole95219af2019-12-03 16:34:14 -0500648 int err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300649 enum nf_nat_manip_type maniptype;
650
651 if (!(ct_action & TCA_CT_ACT_NAT))
652 return NF_ACCEPT;
653
654 /* Add NAT extension if not confirmed yet. */
655 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
656 return NF_DROP; /* Can't NAT. */
657
658 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
659 (ctinfo != IP_CT_RELATED || commit)) {
660 /* NAT an established or related connection like before. */
661 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
662 /* This is the REPLY direction for a connection
663 * for which NAT was applied in the forward
664 * direction. Do the reverse NAT.
665 */
666 maniptype = ct->status & IPS_SRC_NAT
667 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
668 else
669 maniptype = ct->status & IPS_SRC_NAT
670 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
671 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
672 maniptype = NF_NAT_MANIP_SRC;
673 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
674 maniptype = NF_NAT_MANIP_DST;
675 } else {
676 return NF_ACCEPT;
677 }
678
Aaron Conole95219af2019-12-03 16:34:14 -0500679 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
680 if (err == NF_ACCEPT &&
681 ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
682 if (maniptype == NF_NAT_MANIP_SRC)
683 maniptype = NF_NAT_MANIP_DST;
684 else
685 maniptype = NF_NAT_MANIP_SRC;
686
687 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
688 }
689 return err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300690#else
691 return NF_ACCEPT;
692#endif
693}
694
695static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
696 struct tcf_result *res)
697{
698 struct net *net = dev_net(skb->dev);
699 bool cached, commit, clear, force;
700 enum ip_conntrack_info ctinfo;
701 struct tcf_ct *c = to_ct(a);
702 struct nf_conn *tmpl = NULL;
703 struct nf_hook_state state;
704 int nh_ofs, err, retval;
705 struct tcf_ct_params *p;
Paul Blakey46475bb2020-03-03 15:07:51 +0200706 bool skip_add = false;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300707 struct nf_conn *ct;
708 u8 family;
709
710 p = rcu_dereference_bh(c->params);
711
712 retval = READ_ONCE(c->tcf_action);
713 commit = p->ct_action & TCA_CT_ACT_COMMIT;
714 clear = p->ct_action & TCA_CT_ACT_CLEAR;
715 force = p->ct_action & TCA_CT_ACT_FORCE;
716 tmpl = p->tmpl;
717
718 if (clear) {
719 ct = nf_ct_get(skb, &ctinfo);
720 if (ct) {
721 nf_conntrack_put(&ct->ct_general);
722 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
723 }
724
725 goto out;
726 }
727
728 family = tcf_ct_skb_nf_family(skb);
729 if (family == NFPROTO_UNSPEC)
730 goto drop;
731
732 /* The conntrack module expects to be working at L3.
733 * We also try to pull the IPv4/6 header to linear area
734 */
735 nh_ofs = skb_network_offset(skb);
736 skb_pull_rcsum(skb, nh_ofs);
737 err = tcf_ct_handle_fragments(net, skb, family, p->zone);
738 if (err == -EINPROGRESS) {
739 retval = TC_ACT_STOLEN;
740 goto out;
741 }
742 if (err)
743 goto drop;
744
745 err = tcf_ct_skb_network_trim(skb, family);
746 if (err)
747 goto drop;
748
749 /* If we are recirculating packets to match on ct fields and
750 * committing with a separate ct action, then we don't need to
751 * actually run the packet through conntrack twice unless it's for a
752 * different zone.
753 */
754 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
755 if (!cached) {
Paul Blakey46475bb2020-03-03 15:07:51 +0200756 if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
757 skip_add = true;
758 goto do_nat;
759 }
760
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300761 /* Associate skb with specified zone. */
762 if (tmpl) {
763 ct = nf_ct_get(skb, &ctinfo);
764 if (skb_nfct(skb))
765 nf_conntrack_put(skb_nfct(skb));
766 nf_conntrack_get(&tmpl->ct_general);
767 nf_ct_set(skb, tmpl, IP_CT_NEW);
768 }
769
770 state.hook = NF_INET_PRE_ROUTING;
771 state.net = net;
772 state.pf = family;
773 err = nf_conntrack_in(skb, &state);
774 if (err != NF_ACCEPT)
775 goto out_push;
776 }
777
Paul Blakey46475bb2020-03-03 15:07:51 +0200778do_nat:
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300779 ct = nf_ct_get(skb, &ctinfo);
780 if (!ct)
781 goto out_push;
782 nf_ct_deliver_cached_events(ct);
783
784 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
785 if (err != NF_ACCEPT)
786 goto drop;
787
788 if (commit) {
789 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
790 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
791
792 /* This will take care of sending queued events
793 * even if the connection is already confirmed.
794 */
795 nf_conntrack_confirm(skb);
Paul Blakey46475bb2020-03-03 15:07:51 +0200796 } else if (!skip_add) {
797 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300798 }
799
800out_push:
801 skb_push_rcsum(skb, nh_ofs);
802
803out:
Vlad Buslov5e1ad952019-10-30 16:09:01 +0200804 tcf_action_update_bstats(&c->common, skb);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300805 return retval;
806
807drop:
Vlad Buslov26b537a2019-10-30 16:09:02 +0200808 tcf_action_inc_drop_qstats(&c->common);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300809 return TC_ACT_SHOT;
810}
811
812static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300813 [TCA_CT_ACTION] = { .type = NLA_U16 },
814 [TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) },
815 [TCA_CT_ZONE] = { .type = NLA_U16 },
816 [TCA_CT_MARK] = { .type = NLA_U32 },
817 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
818 [TCA_CT_LABELS] = { .type = NLA_BINARY,
819 .len = 128 / BITS_PER_BYTE },
820 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
821 .len = 128 / BITS_PER_BYTE },
822 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
823 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
824 [TCA_CT_NAT_IPV6_MIN] = { .type = NLA_EXACT_LEN,
825 .len = sizeof(struct in6_addr) },
826 [TCA_CT_NAT_IPV6_MAX] = { .type = NLA_EXACT_LEN,
827 .len = sizeof(struct in6_addr) },
828 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
829 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
830};
831
832static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
833 struct tc_ct *parm,
834 struct nlattr **tb,
835 struct netlink_ext_ack *extack)
836{
837 struct nf_nat_range2 *range;
838
839 if (!(p->ct_action & TCA_CT_ACT_NAT))
840 return 0;
841
842 if (!IS_ENABLED(CONFIG_NF_NAT)) {
843 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
844 return -EOPNOTSUPP;
845 }
846
847 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
848 return 0;
849
850 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
851 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
852 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
853 return -EOPNOTSUPP;
854 }
855
856 range = &p->range;
857 if (tb[TCA_CT_NAT_IPV4_MIN]) {
858 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
859
860 p->ipv4_range = true;
861 range->flags |= NF_NAT_RANGE_MAP_IPS;
862 range->min_addr.ip =
863 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
864
865 range->max_addr.ip = max_attr ?
866 nla_get_in_addr(max_attr) :
867 range->min_addr.ip;
868 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
869 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
870
871 p->ipv4_range = false;
872 range->flags |= NF_NAT_RANGE_MAP_IPS;
873 range->min_addr.in6 =
874 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
875
876 range->max_addr.in6 = max_attr ?
877 nla_get_in6_addr(max_attr) :
878 range->min_addr.in6;
879 }
880
881 if (tb[TCA_CT_NAT_PORT_MIN]) {
882 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
883 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
884
885 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
886 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
887 range->min_proto.all;
888 }
889
890 return 0;
891}
892
893static void tcf_ct_set_key_val(struct nlattr **tb,
894 void *val, int val_type,
895 void *mask, int mask_type,
896 int len)
897{
898 if (!tb[val_type])
899 return;
900 nla_memcpy(val, tb[val_type], len);
901
902 if (!mask)
903 return;
904
905 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
906 memset(mask, 0xff, len);
907 else
908 nla_memcpy(mask, tb[mask_type], len);
909}
910
911static int tcf_ct_fill_params(struct net *net,
912 struct tcf_ct_params *p,
913 struct tc_ct *parm,
914 struct nlattr **tb,
915 struct netlink_ext_ack *extack)
916{
917 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
918 struct nf_conntrack_zone zone;
919 struct nf_conn *tmpl;
920 int err;
921
922 p->zone = NF_CT_DEFAULT_ZONE_ID;
923
924 tcf_ct_set_key_val(tb,
925 &p->ct_action, TCA_CT_ACTION,
926 NULL, TCA_CT_UNSPEC,
927 sizeof(p->ct_action));
928
929 if (p->ct_action & TCA_CT_ACT_CLEAR)
930 return 0;
931
932 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
933 if (err)
934 return err;
935
936 if (tb[TCA_CT_MARK]) {
937 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
938 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
939 return -EOPNOTSUPP;
940 }
941 tcf_ct_set_key_val(tb,
942 &p->mark, TCA_CT_MARK,
943 &p->mark_mask, TCA_CT_MARK_MASK,
944 sizeof(p->mark));
945 }
946
947 if (tb[TCA_CT_LABELS]) {
948 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
949 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
950 return -EOPNOTSUPP;
951 }
952
953 if (!tn->labels) {
954 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
955 return -EOPNOTSUPP;
956 }
957 tcf_ct_set_key_val(tb,
958 p->labels, TCA_CT_LABELS,
959 p->labels_mask, TCA_CT_LABELS_MASK,
960 sizeof(p->labels));
961 }
962
963 if (tb[TCA_CT_ZONE]) {
964 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
965 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
966 return -EOPNOTSUPP;
967 }
968
969 tcf_ct_set_key_val(tb,
970 &p->zone, TCA_CT_ZONE,
971 NULL, TCA_CT_UNSPEC,
972 sizeof(p->zone));
973 }
974
975 if (p->zone == NF_CT_DEFAULT_ZONE_ID)
976 return 0;
977
978 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
979 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
980 if (!tmpl) {
981 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
982 return -ENOMEM;
983 }
984 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
985 nf_conntrack_get(&tmpl->ct_general);
986 p->tmpl = tmpl;
987
988 return 0;
989}
990
991static int tcf_ct_init(struct net *net, struct nlattr *nla,
992 struct nlattr *est, struct tc_action **a,
993 int replace, int bind, bool rtnl_held,
Vlad Buslovabbb0d32019-10-30 16:09:05 +0200994 struct tcf_proto *tp, u32 flags,
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300995 struct netlink_ext_ack *extack)
996{
997 struct tc_action_net *tn = net_generic(net, ct_net_id);
998 struct tcf_ct_params *params = NULL;
999 struct nlattr *tb[TCA_CT_MAX + 1];
1000 struct tcf_chain *goto_ch = NULL;
1001 struct tc_ct *parm;
1002 struct tcf_ct *c;
1003 int err, res = 0;
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001004 u32 index;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001005
1006 if (!nla) {
1007 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1008 return -EINVAL;
1009 }
1010
1011 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1012 if (err < 0)
1013 return err;
1014
1015 if (!tb[TCA_CT_PARMS]) {
1016 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1017 return -EINVAL;
1018 }
1019 parm = nla_data(tb[TCA_CT_PARMS]);
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001020 index = parm->index;
1021 err = tcf_idr_check_alloc(tn, &index, a, bind);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001022 if (err < 0)
1023 return err;
1024
1025 if (!err) {
Vlad Buslove3822672019-10-30 16:09:06 +02001026 err = tcf_idr_create_from_flags(tn, index, est, a,
1027 &act_ct_ops, bind, flags);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001028 if (err) {
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001029 tcf_idr_cleanup(tn, index);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001030 return err;
1031 }
1032 res = ACT_P_CREATED;
1033 } else {
1034 if (bind)
1035 return 0;
1036
1037 if (!replace) {
1038 tcf_idr_release(*a, bind);
1039 return -EEXIST;
1040 }
1041 }
1042 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1043 if (err < 0)
1044 goto cleanup;
1045
1046 c = to_ct(*a);
1047
1048 params = kzalloc(sizeof(*params), GFP_KERNEL);
1049 if (unlikely(!params)) {
1050 err = -ENOMEM;
1051 goto cleanup;
1052 }
1053
1054 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1055 if (err)
1056 goto cleanup;
1057
Paul Blakeyc34b9612020-03-03 15:07:49 +02001058 err = tcf_ct_flow_table_get(params);
1059 if (err)
1060 goto cleanup;
1061
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001062 spin_lock_bh(&c->tcf_lock);
1063 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
Paul E. McKenney445d3742019-09-23 16:09:18 -07001064 params = rcu_replace_pointer(c->params, params,
1065 lockdep_is_held(&c->tcf_lock));
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001066 spin_unlock_bh(&c->tcf_lock);
1067
1068 if (goto_ch)
1069 tcf_chain_put_by_act(goto_ch);
1070 if (params)
1071 kfree_rcu(params, rcu);
1072 if (res == ACT_P_CREATED)
1073 tcf_idr_insert(tn, *a);
1074
1075 return res;
1076
1077cleanup:
1078 if (goto_ch)
1079 tcf_chain_put_by_act(goto_ch);
1080 kfree(params);
1081 tcf_idr_release(*a, bind);
1082 return err;
1083}
1084
1085static void tcf_ct_cleanup(struct tc_action *a)
1086{
1087 struct tcf_ct_params *params;
1088 struct tcf_ct *c = to_ct(a);
1089
1090 params = rcu_dereference_protected(c->params, 1);
1091 if (params)
1092 call_rcu(&params->rcu, tcf_ct_params_free);
1093}
1094
1095static int tcf_ct_dump_key_val(struct sk_buff *skb,
1096 void *val, int val_type,
1097 void *mask, int mask_type,
1098 int len)
1099{
1100 int err;
1101
1102 if (mask && !memchr_inv(mask, 0, len))
1103 return 0;
1104
1105 err = nla_put(skb, val_type, len, val);
1106 if (err)
1107 return err;
1108
1109 if (mask_type != TCA_CT_UNSPEC) {
1110 err = nla_put(skb, mask_type, len, mask);
1111 if (err)
1112 return err;
1113 }
1114
1115 return 0;
1116}
1117
1118static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1119{
1120 struct nf_nat_range2 *range = &p->range;
1121
1122 if (!(p->ct_action & TCA_CT_ACT_NAT))
1123 return 0;
1124
1125 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1126 return 0;
1127
1128 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1129 if (p->ipv4_range) {
1130 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1131 range->min_addr.ip))
1132 return -1;
1133 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1134 range->max_addr.ip))
1135 return -1;
1136 } else {
1137 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1138 &range->min_addr.in6))
1139 return -1;
1140 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1141 &range->max_addr.in6))
1142 return -1;
1143 }
1144 }
1145
1146 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1147 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1148 range->min_proto.all))
1149 return -1;
1150 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1151 range->max_proto.all))
1152 return -1;
1153 }
1154
1155 return 0;
1156}
1157
1158static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1159 int bind, int ref)
1160{
1161 unsigned char *b = skb_tail_pointer(skb);
1162 struct tcf_ct *c = to_ct(a);
1163 struct tcf_ct_params *p;
1164
1165 struct tc_ct opt = {
1166 .index = c->tcf_index,
1167 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1168 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1169 };
1170 struct tcf_t t;
1171
1172 spin_lock_bh(&c->tcf_lock);
1173 p = rcu_dereference_protected(c->params,
1174 lockdep_is_held(&c->tcf_lock));
1175 opt.action = c->tcf_action;
1176
1177 if (tcf_ct_dump_key_val(skb,
1178 &p->ct_action, TCA_CT_ACTION,
1179 NULL, TCA_CT_UNSPEC,
1180 sizeof(p->ct_action)))
1181 goto nla_put_failure;
1182
1183 if (p->ct_action & TCA_CT_ACT_CLEAR)
1184 goto skip_dump;
1185
1186 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1187 tcf_ct_dump_key_val(skb,
1188 &p->mark, TCA_CT_MARK,
1189 &p->mark_mask, TCA_CT_MARK_MASK,
1190 sizeof(p->mark)))
1191 goto nla_put_failure;
1192
1193 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1194 tcf_ct_dump_key_val(skb,
1195 p->labels, TCA_CT_LABELS,
1196 p->labels_mask, TCA_CT_LABELS_MASK,
1197 sizeof(p->labels)))
1198 goto nla_put_failure;
1199
1200 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1201 tcf_ct_dump_key_val(skb,
1202 &p->zone, TCA_CT_ZONE,
1203 NULL, TCA_CT_UNSPEC,
1204 sizeof(p->zone)))
1205 goto nla_put_failure;
1206
1207 if (tcf_ct_dump_nat(skb, p))
1208 goto nla_put_failure;
1209
1210skip_dump:
1211 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1212 goto nla_put_failure;
1213
1214 tcf_tm_dump(&t, &c->tcf_tm);
1215 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1216 goto nla_put_failure;
1217 spin_unlock_bh(&c->tcf_lock);
1218
1219 return skb->len;
1220nla_put_failure:
1221 spin_unlock_bh(&c->tcf_lock);
1222 nlmsg_trim(skb, b);
1223 return -1;
1224}
1225
1226static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1227 struct netlink_callback *cb, int type,
1228 const struct tc_action_ops *ops,
1229 struct netlink_ext_ack *extack)
1230{
1231 struct tc_action_net *tn = net_generic(net, ct_net_id);
1232
1233 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1234}
1235
1236static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1237{
1238 struct tc_action_net *tn = net_generic(net, ct_net_id);
1239
1240 return tcf_idr_search(tn, a, index);
1241}
1242
1243static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
1244 u64 lastuse, bool hw)
1245{
1246 struct tcf_ct *c = to_ct(a);
1247
Vlad Buslovc8ecebd2019-10-30 16:09:00 +02001248 tcf_action_update_stats(a, bytes, packets, false, hw);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001249 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1250}
1251
1252static struct tc_action_ops act_ct_ops = {
1253 .kind = "ct",
1254 .id = TCA_ID_CT,
1255 .owner = THIS_MODULE,
1256 .act = tcf_ct_act,
1257 .dump = tcf_ct_dump,
1258 .init = tcf_ct_init,
1259 .cleanup = tcf_ct_cleanup,
1260 .walk = tcf_ct_walker,
1261 .lookup = tcf_ct_search,
1262 .stats_update = tcf_stats_update,
1263 .size = sizeof(struct tcf_ct),
1264};
1265
1266static __net_init int ct_init_net(struct net *net)
1267{
Pankaj Bharadiyac5936422019-12-09 10:31:43 -08001268 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001269 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1270
1271 if (nf_connlabels_get(net, n_bits - 1)) {
1272 tn->labels = false;
1273 pr_err("act_ct: Failed to set connlabels length");
1274 } else {
1275 tn->labels = true;
1276 }
1277
Cong Wang981471b2019-08-25 10:01:32 -07001278 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001279}
1280
1281static void __net_exit ct_exit_net(struct list_head *net_list)
1282{
1283 struct net *net;
1284
1285 rtnl_lock();
1286 list_for_each_entry(net, net_list, exit_list) {
1287 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1288
1289 if (tn->labels)
1290 nf_connlabels_put(net);
1291 }
1292 rtnl_unlock();
1293
1294 tc_action_net_exit(net_list, ct_net_id);
1295}
1296
1297static struct pernet_operations ct_net_ops = {
1298 .init = ct_init_net,
1299 .exit_batch = ct_exit_net,
1300 .id = &ct_net_id,
1301 .size = sizeof(struct tc_ct_action_net),
1302};
1303
1304static int __init ct_init_module(void)
1305{
Paul Blakeyc34b9612020-03-03 15:07:49 +02001306 int err;
1307
1308 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1309 if (!act_ct_wq)
1310 return -ENOMEM;
1311
1312 err = tcf_ct_flow_tables_init();
1313 if (err)
1314 goto err_tbl_init;
1315
1316 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1317 if (err)
1318 goto err_register;
1319
1320 return 0;
1321
1322err_tbl_init:
1323 destroy_workqueue(act_ct_wq);
1324err_register:
1325 tcf_ct_flow_tables_uninit();
1326 return err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001327}
1328
1329static void __exit ct_cleanup_module(void)
1330{
1331 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
Paul Blakeyc34b9612020-03-03 15:07:49 +02001332 tcf_ct_flow_tables_uninit();
1333 destroy_workqueue(act_ct_wq);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001334}
1335
1336module_init(ct_init_module);
1337module_exit(ct_cleanup_module);
1338MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1339MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1340MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1341MODULE_DESCRIPTION("Connection tracking action");
1342MODULE_LICENSE("GPL v2");
1343