blob: ebdf7caf7084fd0b522fb47fc40d9513f2e9833e [file] [log] [blame]
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/pkt_cls.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
Paul Blakeyc34b9612020-03-03 15:07:49 +020018#include <linux/rhashtable.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030019#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/act_api.h>
23#include <net/ip.h>
24#include <net/ipv6_frag.h>
25#include <uapi/linux/tc_act/tc_ct.h>
26#include <net/tc_act/tc_ct.h>
27
Paul Blakeyc34b9612020-03-03 15:07:49 +020028#include <net/netfilter/nf_flow_table.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030029#include <net/netfilter/nf_conntrack.h>
30#include <net/netfilter/nf_conntrack_core.h>
31#include <net/netfilter/nf_conntrack_zones.h>
32#include <net/netfilter/nf_conntrack_helper.h>
wenxubeb97d32020-04-21 07:55:43 +080033#include <net/netfilter/nf_conntrack_acct.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030034#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
Paul Blakey9795ded2022-01-03 13:44:50 +020035#include <net/netfilter/nf_conntrack_act_ct.h>
Jeremy Sowden40d102c2019-09-13 09:13:05 +010036#include <uapi/linux/netfilter/nf_nat.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030037
Paul Blakeyc34b9612020-03-03 15:07:49 +020038static struct workqueue_struct *act_ct_wq;
39static struct rhashtable zones_ht;
Eric Dumazet138470a2020-03-08 14:27:48 -070040static DEFINE_MUTEX(zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +020041
42struct tcf_ct_flow_table {
43 struct rhash_head node; /* In zones tables */
44
45 struct rcu_work rwork;
46 struct nf_flowtable nf_ft;
Eric Dumazet138470a2020-03-08 14:27:48 -070047 refcount_t ref;
Paul Blakeyc34b9612020-03-03 15:07:49 +020048 u16 zone;
Paul Blakeyc34b9612020-03-03 15:07:49 +020049
50 bool dying;
51};
52
53static const struct rhashtable_params zones_params = {
54 .head_offset = offsetof(struct tcf_ct_flow_table, node),
55 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
56 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
57 .automatic_shrinking = true,
58};
59
Paul Blakey9795ded2022-01-03 13:44:50 +020060static struct nf_ct_ext_type act_ct_extend __read_mostly = {
61 .len = sizeof(struct nf_conn_act_ct_ext),
62 .align = __alignof__(struct nf_conn_act_ct_ext),
63 .id = NF_CT_EXT_ACT_CT,
64};
65
Paul Blakey9c26ba92020-03-12 12:23:06 +020066static struct flow_action_entry *
67tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
68{
69 int i = flow_action->num_entries++;
70
71 return &flow_action->entries[i];
72}
73
74static void tcf_ct_add_mangle_action(struct flow_action *action,
75 enum flow_action_mangle_base htype,
76 u32 offset,
77 u32 mask,
78 u32 val)
79{
80 struct flow_action_entry *entry;
81
82 entry = tcf_ct_flow_table_flow_action_get_next(action);
83 entry->id = FLOW_ACTION_MANGLE;
84 entry->mangle.htype = htype;
85 entry->mangle.mask = ~mask;
86 entry->mangle.offset = offset;
87 entry->mangle.val = val;
88}
89
90/* The following nat helper functions check if the inverted reverse tuple
91 * (target) is different then the current dir tuple - meaning nat for ports
92 * and/or ip is needed, and add the relevant mangle actions.
93 */
94static void
95tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
96 struct nf_conntrack_tuple target,
97 struct flow_action *action)
98{
99 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
100 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
101 offsetof(struct iphdr, saddr),
102 0xFFFFFFFF,
103 be32_to_cpu(target.src.u3.ip));
104 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
105 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
106 offsetof(struct iphdr, daddr),
107 0xFFFFFFFF,
108 be32_to_cpu(target.dst.u3.ip));
109}
110
111static void
112tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
113 union nf_inet_addr *addr,
114 u32 offset)
115{
116 int i;
117
118 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
119 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
120 i * sizeof(u32) + offset,
121 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
122}
123
124static void
125tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
126 struct nf_conntrack_tuple target,
127 struct flow_action *action)
128{
129 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
130 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
131 offsetof(struct ipv6hdr,
132 saddr));
133 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
134 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
135 offsetof(struct ipv6hdr,
136 daddr));
137}
138
139static void
140tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
141 struct nf_conntrack_tuple target,
142 struct flow_action *action)
143{
144 __be16 target_src = target.src.u.tcp.port;
145 __be16 target_dst = target.dst.u.tcp.port;
146
147 if (target_src != tuple->src.u.tcp.port)
148 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
149 offsetof(struct tcphdr, source),
150 0xFFFF, be16_to_cpu(target_src));
151 if (target_dst != tuple->dst.u.tcp.port)
152 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
153 offsetof(struct tcphdr, dest),
154 0xFFFF, be16_to_cpu(target_dst));
155}
156
157static void
158tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
159 struct nf_conntrack_tuple target,
160 struct flow_action *action)
161{
162 __be16 target_src = target.src.u.udp.port;
163 __be16 target_dst = target.dst.u.udp.port;
164
165 if (target_src != tuple->src.u.udp.port)
Roi Dayan47b5d2a2020-10-19 12:02:44 +0300166 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
Paul Blakey9c26ba92020-03-12 12:23:06 +0200167 offsetof(struct udphdr, source),
168 0xFFFF, be16_to_cpu(target_src));
169 if (target_dst != tuple->dst.u.udp.port)
Roi Dayan47b5d2a2020-10-19 12:02:44 +0300170 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
Paul Blakey9c26ba92020-03-12 12:23:06 +0200171 offsetof(struct udphdr, dest),
172 0xFFFF, be16_to_cpu(target_dst));
173}
174
175static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
176 enum ip_conntrack_dir dir,
177 struct flow_action *action)
178{
179 struct nf_conn_labels *ct_labels;
180 struct flow_action_entry *entry;
Paul Blakey30b0cf92020-03-12 12:23:07 +0200181 enum ip_conntrack_info ctinfo;
Paul Blakey9c26ba92020-03-12 12:23:06 +0200182 u32 *act_ct_labels;
183
184 entry = tcf_ct_flow_table_flow_action_get_next(action);
185 entry->id = FLOW_ACTION_CT_METADATA;
186#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
187 entry->ct_metadata.mark = ct->mark;
188#endif
Paul Blakey30b0cf92020-03-12 12:23:07 +0200189 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
190 IP_CT_ESTABLISHED_REPLY;
191 /* aligns with the CT reference on the SKB nf_ct_set */
192 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
Paul Blakey941eff52021-01-27 16:32:46 +0200193 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
Paul Blakey9c26ba92020-03-12 12:23:06 +0200194
195 act_ct_labels = entry->ct_metadata.labels;
196 ct_labels = nf_ct_labels_find(ct);
197 if (ct_labels)
198 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
199 else
200 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
201}
202
203static int tcf_ct_flow_table_add_action_nat(struct net *net,
204 struct nf_conn *ct,
205 enum ip_conntrack_dir dir,
206 struct flow_action *action)
207{
208 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
209 struct nf_conntrack_tuple target;
210
wenxu05aa69e2020-05-30 13:54:51 +0800211 if (!(ct->status & IPS_NAT_MASK))
212 return 0;
213
Paul Blakey9c26ba92020-03-12 12:23:06 +0200214 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
215
216 switch (tuple->src.l3num) {
217 case NFPROTO_IPV4:
218 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
219 action);
220 break;
221 case NFPROTO_IPV6:
222 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
223 action);
224 break;
225 default:
226 return -EOPNOTSUPP;
227 }
228
229 switch (nf_ct_protonum(ct)) {
230 case IPPROTO_TCP:
231 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
232 break;
233 case IPPROTO_UDP:
234 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
235 break;
236 default:
237 return -EOPNOTSUPP;
238 }
239
240 return 0;
241}
242
243static int tcf_ct_flow_table_fill_actions(struct net *net,
244 const struct flow_offload *flow,
245 enum flow_offload_tuple_dir tdir,
246 struct nf_flow_rule *flow_rule)
247{
248 struct flow_action *action = &flow_rule->rule->action;
249 int num_entries = action->num_entries;
250 struct nf_conn *ct = flow->ct;
251 enum ip_conntrack_dir dir;
252 int i, err;
253
254 switch (tdir) {
255 case FLOW_OFFLOAD_DIR_ORIGINAL:
256 dir = IP_CT_DIR_ORIGINAL;
257 break;
258 case FLOW_OFFLOAD_DIR_REPLY:
259 dir = IP_CT_DIR_REPLY;
260 break;
261 default:
262 return -EOPNOTSUPP;
263 }
264
265 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
266 if (err)
267 goto err_nat;
268
269 tcf_ct_flow_table_add_action_meta(ct, dir, action);
270 return 0;
271
272err_nat:
273 /* Clear filled actions */
274 for (i = num_entries; i < action->num_entries; i++)
275 memset(&action->entries[i], 0, sizeof(action->entries[i]));
276 action->num_entries = num_entries;
277
278 return err;
279}
280
Paul Blakeyc34b9612020-03-03 15:07:49 +0200281static struct nf_flowtable_type flowtable_ct = {
Paul Blakey9c26ba92020-03-12 12:23:06 +0200282 .action = tcf_ct_flow_table_fill_actions,
Paul Blakeyc34b9612020-03-03 15:07:49 +0200283 .owner = THIS_MODULE,
284};
285
286static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
287{
288 struct tcf_ct_flow_table *ct_ft;
289 int err = -ENOMEM;
290
Eric Dumazet138470a2020-03-08 14:27:48 -0700291 mutex_lock(&zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200292 ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
Eric Dumazet138470a2020-03-08 14:27:48 -0700293 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
294 goto out_unlock;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200295
Eric Dumazet138470a2020-03-08 14:27:48 -0700296 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200297 if (!ct_ft)
298 goto err_alloc;
Eric Dumazet138470a2020-03-08 14:27:48 -0700299 refcount_set(&ct_ft->ref, 1);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200300
301 ct_ft->zone = params->zone;
302 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
303 if (err)
304 goto err_insert;
305
306 ct_ft->nf_ft.type = &flowtable_ct;
Marcelo Ricardo Leitner3567e232020-11-26 15:40:49 -0300307 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
308 NF_FLOWTABLE_COUNTER;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200309 err = nf_flow_table_init(&ct_ft->nf_ft);
310 if (err)
311 goto err_init;
312
313 __module_get(THIS_MODULE);
Eric Dumazet138470a2020-03-08 14:27:48 -0700314out_unlock:
Paul Blakeyc34b9612020-03-03 15:07:49 +0200315 params->ct_ft = ct_ft;
Paul Blakeyedd58612020-03-12 12:23:09 +0200316 params->nf_ft = &ct_ft->nf_ft;
Eric Dumazet138470a2020-03-08 14:27:48 -0700317 mutex_unlock(&zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200318
319 return 0;
320
321err_init:
322 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
323err_insert:
324 kfree(ct_ft);
325err_alloc:
Eric Dumazet138470a2020-03-08 14:27:48 -0700326 mutex_unlock(&zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200327 return err;
328}
329
330static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
331{
Louis Peens77ac5e42021-07-02 11:21:38 +0200332 struct flow_block_cb *block_cb, *tmp_cb;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200333 struct tcf_ct_flow_table *ct_ft;
Louis Peens77ac5e42021-07-02 11:21:38 +0200334 struct flow_block *block;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200335
336 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
337 rwork);
338 nf_flow_table_free(&ct_ft->nf_ft);
Louis Peens77ac5e42021-07-02 11:21:38 +0200339
340 /* Remove any remaining callbacks before cleanup */
341 block = &ct_ft->nf_ft.flow_block;
342 down_write(&ct_ft->nf_ft.flow_block_lock);
343 list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
344 list_del(&block_cb->list);
345 flow_block_cb_free(block_cb);
346 }
347 up_write(&ct_ft->nf_ft.flow_block_lock);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200348 kfree(ct_ft);
349
350 module_put(THIS_MODULE);
351}
352
353static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
354{
355 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
356
Eric Dumazet138470a2020-03-08 14:27:48 -0700357 if (refcount_dec_and_test(&params->ct_ft->ref)) {
Paul Blakeyc34b9612020-03-03 15:07:49 +0200358 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
359 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
360 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
361 }
Paul Blakeyc34b9612020-03-03 15:07:49 +0200362}
363
Paul Blakey64ff70b2020-03-03 15:07:50 +0200364static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
365 struct nf_conn *ct,
366 bool tcp)
367{
Paul Blakey9795ded2022-01-03 13:44:50 +0200368 struct nf_conn_act_ct_ext *act_ct_ext;
Paul Blakey64ff70b2020-03-03 15:07:50 +0200369 struct flow_offload *entry;
370 int err;
371
372 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
373 return;
374
375 entry = flow_offload_alloc(ct);
376 if (!entry) {
377 WARN_ON_ONCE(1);
378 goto err_alloc;
379 }
380
381 if (tcp) {
382 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
383 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
384 }
385
Paul Blakey9795ded2022-01-03 13:44:50 +0200386 act_ct_ext = nf_conn_act_ct_ext_find(ct);
387 if (act_ct_ext) {
388 entry->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
389 act_ct_ext->ifindex[IP_CT_DIR_ORIGINAL];
390 entry->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
391 act_ct_ext->ifindex[IP_CT_DIR_REPLY];
392 }
393
Paul Blakey64ff70b2020-03-03 15:07:50 +0200394 err = flow_offload_add(&ct_ft->nf_ft, entry);
395 if (err)
396 goto err_add;
397
398 return;
399
400err_add:
401 flow_offload_free(entry);
402err_alloc:
403 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
404}
405
406static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
407 struct nf_conn *ct,
408 enum ip_conntrack_info ctinfo)
409{
410 bool tcp = false;
411
Chris Mi43332cf2021-12-01 15:31:53 +0200412 if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) ||
413 !test_bit(IPS_ASSURED_BIT, &ct->status))
Paul Blakey64ff70b2020-03-03 15:07:50 +0200414 return;
415
416 switch (nf_ct_protonum(ct)) {
417 case IPPROTO_TCP:
418 tcp = true;
419 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
420 return;
421 break;
422 case IPPROTO_UDP:
423 break;
424 default:
425 return;
426 }
427
428 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
429 ct->status & IPS_SEQ_ADJUST)
430 return;
431
432 tcf_ct_flow_table_add(ct_ft, ct, tcp);
433}
434
Paul Blakey46475bb2020-03-03 15:07:51 +0200435static bool
436tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
Paul Blakey07ac9d12020-03-04 13:49:38 +0200437 struct flow_offload_tuple *tuple,
438 struct tcphdr **tcph)
Paul Blakey46475bb2020-03-03 15:07:51 +0200439{
440 struct flow_ports *ports;
441 unsigned int thoff;
442 struct iphdr *iph;
443
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200444 if (!pskb_network_may_pull(skb, sizeof(*iph)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200445 return false;
446
447 iph = ip_hdr(skb);
448 thoff = iph->ihl * 4;
449
450 if (ip_is_fragment(iph) ||
451 unlikely(thoff != sizeof(struct iphdr)))
452 return false;
453
454 if (iph->protocol != IPPROTO_TCP &&
455 iph->protocol != IPPROTO_UDP)
456 return false;
457
458 if (iph->ttl <= 1)
459 return false;
460
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200461 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
462 thoff + sizeof(struct tcphdr) :
463 thoff + sizeof(*ports)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200464 return false;
465
Paul Blakey07ac9d12020-03-04 13:49:38 +0200466 iph = ip_hdr(skb);
467 if (iph->protocol == IPPROTO_TCP)
468 *tcph = (void *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200469
Paul Blakey07ac9d12020-03-04 13:49:38 +0200470 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200471 tuple->src_v4.s_addr = iph->saddr;
472 tuple->dst_v4.s_addr = iph->daddr;
473 tuple->src_port = ports->source;
474 tuple->dst_port = ports->dest;
475 tuple->l3proto = AF_INET;
476 tuple->l4proto = iph->protocol;
477
478 return true;
479}
480
481static bool
482tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
Paul Blakey07ac9d12020-03-04 13:49:38 +0200483 struct flow_offload_tuple *tuple,
484 struct tcphdr **tcph)
Paul Blakey46475bb2020-03-03 15:07:51 +0200485{
486 struct flow_ports *ports;
487 struct ipv6hdr *ip6h;
488 unsigned int thoff;
489
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200490 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200491 return false;
492
493 ip6h = ipv6_hdr(skb);
494
495 if (ip6h->nexthdr != IPPROTO_TCP &&
496 ip6h->nexthdr != IPPROTO_UDP)
497 return false;
498
499 if (ip6h->hop_limit <= 1)
500 return false;
501
502 thoff = sizeof(*ip6h);
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200503 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
504 thoff + sizeof(struct tcphdr) :
505 thoff + sizeof(*ports)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200506 return false;
507
Paul Blakey07ac9d12020-03-04 13:49:38 +0200508 ip6h = ipv6_hdr(skb);
509 if (ip6h->nexthdr == IPPROTO_TCP)
510 *tcph = (void *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200511
Paul Blakey07ac9d12020-03-04 13:49:38 +0200512 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200513 tuple->src_v6 = ip6h->saddr;
514 tuple->dst_v6 = ip6h->daddr;
515 tuple->src_port = ports->source;
516 tuple->dst_port = ports->dest;
517 tuple->l3proto = AF_INET6;
518 tuple->l4proto = ip6h->nexthdr;
519
520 return true;
521}
522
Paul Blakey46475bb2020-03-03 15:07:51 +0200523static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
524 struct sk_buff *skb,
525 u8 family)
526{
527 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
528 struct flow_offload_tuple_rhash *tuplehash;
529 struct flow_offload_tuple tuple = {};
530 enum ip_conntrack_info ctinfo;
Paul Blakey07ac9d12020-03-04 13:49:38 +0200531 struct tcphdr *tcph = NULL;
Paul Blakey46475bb2020-03-03 15:07:51 +0200532 struct flow_offload *flow;
533 struct nf_conn *ct;
Paul Blakey46475bb2020-03-03 15:07:51 +0200534 u8 dir;
535
536 /* Previously seen or loopback */
537 ct = nf_ct_get(skb, &ctinfo);
538 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
539 return false;
540
541 switch (family) {
542 case NFPROTO_IPV4:
Paul Blakey07ac9d12020-03-04 13:49:38 +0200543 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
Paul Blakey46475bb2020-03-03 15:07:51 +0200544 return false;
545 break;
546 case NFPROTO_IPV6:
Paul Blakey07ac9d12020-03-04 13:49:38 +0200547 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
Paul Blakey46475bb2020-03-03 15:07:51 +0200548 return false;
549 break;
550 default:
551 return false;
552 }
553
554 tuplehash = flow_offload_lookup(nf_ft, &tuple);
555 if (!tuplehash)
556 return false;
557
558 dir = tuplehash->tuple.dir;
559 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
560 ct = flow->ct;
561
Paul Blakey07ac9d12020-03-04 13:49:38 +0200562 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
563 flow_offload_teardown(flow);
564 return false;
565 }
566
Paul Blakey46475bb2020-03-03 15:07:51 +0200567 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
568 IP_CT_ESTABLISHED_REPLY;
569
Paul Blakey8b3646d2020-03-12 12:23:08 +0200570 flow_offload_refresh(nf_ft, flow);
Paul Blakey46475bb2020-03-03 15:07:51 +0200571 nf_conntrack_get(&ct->ct_general);
572 nf_ct_set(skb, ct, ctinfo);
Marcelo Ricardo Leitner3567e232020-11-26 15:40:49 -0300573 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
574 nf_ct_acct_update(ct, dir, skb->len);
Paul Blakey46475bb2020-03-03 15:07:51 +0200575
576 return true;
577}
578
Paul Blakeyc34b9612020-03-03 15:07:49 +0200579static int tcf_ct_flow_tables_init(void)
580{
581 return rhashtable_init(&zones_ht, &zones_params);
582}
583
584static void tcf_ct_flow_tables_uninit(void)
585{
586 rhashtable_destroy(&zones_ht);
587}
588
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300589static struct tc_action_ops act_ct_ops;
590static unsigned int ct_net_id;
591
592struct tc_ct_action_net {
593 struct tc_action_net tn; /* Must be first */
594 bool labels;
595};
596
597/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
598static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
599 u16 zone_id, bool force)
600{
601 enum ip_conntrack_info ctinfo;
602 struct nf_conn *ct;
603
604 ct = nf_ct_get(skb, &ctinfo);
605 if (!ct)
606 return false;
607 if (!net_eq(net, read_pnet(&ct->ct_net)))
608 return false;
609 if (nf_ct_zone(ct)->id != zone_id)
610 return false;
611
612 /* Force conntrack entry direction. */
613 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
614 if (nf_ct_is_confirmed(ct))
615 nf_ct_kill(ct);
616
617 nf_conntrack_put(&ct->ct_general);
618 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
619
620 return false;
621 }
622
623 return true;
624}
625
626/* Trim the skb to the length specified by the IP/IPv6 header,
627 * removing any trailing lower-layer padding. This prepares the skb
628 * for higher-layer processing that assumes skb->len excludes padding
629 * (such as nf_ip_checksum). The caller needs to pull the skb to the
630 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
631 */
632static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
633{
634 unsigned int len;
635 int err;
636
637 switch (family) {
638 case NFPROTO_IPV4:
639 len = ntohs(ip_hdr(skb)->tot_len);
640 break;
641 case NFPROTO_IPV6:
642 len = sizeof(struct ipv6hdr)
643 + ntohs(ipv6_hdr(skb)->payload_len);
644 break;
645 default:
646 len = skb->len;
647 }
648
649 err = pskb_trim_rcsum(skb, len);
650
651 return err;
652}
653
654static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
655{
656 u8 family = NFPROTO_UNSPEC;
657
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200658 switch (skb_protocol(skb, true)) {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300659 case htons(ETH_P_IP):
660 family = NFPROTO_IPV4;
661 break;
662 case htons(ETH_P_IPV6):
663 family = NFPROTO_IPV6;
664 break;
665 default:
666 break;
667 }
668
669 return family;
670}
671
672static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
673{
674 unsigned int len;
675
676 len = skb_network_offset(skb) + sizeof(struct iphdr);
677 if (unlikely(skb->len < len))
678 return -EINVAL;
679 if (unlikely(!pskb_may_pull(skb, len)))
680 return -ENOMEM;
681
682 *frag = ip_is_fragment(ip_hdr(skb));
683 return 0;
684}
685
686static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
687{
688 unsigned int flags = 0, len, payload_ofs = 0;
689 unsigned short frag_off;
690 int nexthdr;
691
692 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
693 if (unlikely(skb->len < len))
694 return -EINVAL;
695 if (unlikely(!pskb_may_pull(skb, len)))
696 return -ENOMEM;
697
698 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
699 if (unlikely(nexthdr < 0))
700 return -EPROTO;
701
702 *frag = flags & IP6_FH_F_FRAG;
703 return 0;
704}
705
706static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
wenxuae372cb2020-07-19 20:30:37 +0800707 u8 family, u16 zone, bool *defrag)
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300708{
709 enum ip_conntrack_info ctinfo;
710 struct nf_conn *ct;
711 int err = 0;
712 bool frag;
Paul Blakeyec624fe2021-12-14 19:24:33 +0200713 u16 mru;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300714
715 /* Previously seen (loopback)? Ignore. */
716 ct = nf_ct_get(skb, &ctinfo);
717 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
718 return 0;
719
720 if (family == NFPROTO_IPV4)
721 err = tcf_ct_ipv4_is_fragment(skb, &frag);
722 else
723 err = tcf_ct_ipv6_is_fragment(skb, &frag);
724 if (err || !frag)
725 return err;
726
727 skb_get(skb);
Paul Blakeyec624fe2021-12-14 19:24:33 +0200728 mru = tc_skb_cb(skb)->mru;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300729
730 if (family == NFPROTO_IPV4) {
731 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
732
733 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
734 local_bh_disable();
735 err = ip_defrag(net, skb, user);
736 local_bh_enable();
737 if (err && err != -EINPROGRESS)
Alaa Hleiheleda814b2020-08-19 18:24:10 +0300738 return err;
wenxuae372cb2020-07-19 20:30:37 +0800739
wenxu038ebb12020-07-31 10:45:01 +0800740 if (!err) {
wenxuae372cb2020-07-19 20:30:37 +0800741 *defrag = true;
Paul Blakeyec624fe2021-12-14 19:24:33 +0200742 mru = IPCB(skb)->frag_max_size;
wenxu038ebb12020-07-31 10:45:01 +0800743 }
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300744 } else { /* NFPROTO_IPV6 */
745#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
746 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
747
748 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
749 err = nf_ct_frag6_gather(net, skb, user);
750 if (err && err != -EINPROGRESS)
751 goto out_free;
wenxuae372cb2020-07-19 20:30:37 +0800752
wenxu038ebb12020-07-31 10:45:01 +0800753 if (!err) {
wenxuae372cb2020-07-19 20:30:37 +0800754 *defrag = true;
Paul Blakeyec624fe2021-12-14 19:24:33 +0200755 mru = IP6CB(skb)->frag_max_size;
wenxu038ebb12020-07-31 10:45:01 +0800756 }
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300757#else
758 err = -EOPNOTSUPP;
759 goto out_free;
760#endif
761 }
762
Davide Carattif77bd542021-04-26 17:45:51 +0200763 if (err != -EINPROGRESS)
Paul Blakeyec624fe2021-12-14 19:24:33 +0200764 tc_skb_cb(skb)->mru = mru;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300765 skb_clear_hash(skb);
766 skb->ignore_df = 1;
767 return err;
768
769out_free:
770 kfree_skb(skb);
771 return err;
772}
773
774static void tcf_ct_params_free(struct rcu_head *head)
775{
776 struct tcf_ct_params *params = container_of(head,
777 struct tcf_ct_params, rcu);
778
Paul Blakeyc34b9612020-03-03 15:07:49 +0200779 tcf_ct_flow_table_put(params);
780
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300781 if (params->tmpl)
782 nf_conntrack_put(&params->tmpl->ct_general);
783 kfree(params);
784}
785
786#if IS_ENABLED(CONFIG_NF_NAT)
787/* Modelled after nf_nat_ipv[46]_fn().
788 * range is only used for new, uninitialized NAT state.
789 * Returns either NF_ACCEPT or NF_DROP.
790 */
791static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
792 enum ip_conntrack_info ctinfo,
793 const struct nf_nat_range2 *range,
794 enum nf_nat_manip_type maniptype)
795{
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200796 __be16 proto = skb_protocol(skb, true);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300797 int hooknum, err = NF_ACCEPT;
798
799 /* See HOOK2MANIP(). */
800 if (maniptype == NF_NAT_MANIP_SRC)
801 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
802 else
803 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
804
805 switch (ctinfo) {
806 case IP_CT_RELATED:
807 case IP_CT_RELATED_REPLY:
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200808 if (proto == htons(ETH_P_IP) &&
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300809 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
810 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
811 hooknum))
812 err = NF_DROP;
813 goto out;
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200814 } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300815 __be16 frag_off;
816 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
817 int hdrlen = ipv6_skip_exthdr(skb,
818 sizeof(struct ipv6hdr),
819 &nexthdr, &frag_off);
820
821 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
822 if (!nf_nat_icmpv6_reply_translation(skb, ct,
823 ctinfo,
824 hooknum,
825 hdrlen))
826 err = NF_DROP;
827 goto out;
828 }
829 }
830 /* Non-ICMP, fall thru to initialize if needed. */
Gustavo A. R. Silva964201d2020-07-07 12:21:38 -0500831 fallthrough;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300832 case IP_CT_NEW:
833 /* Seen it before? This can happen for loopback, retrans,
834 * or local packets.
835 */
836 if (!nf_nat_initialized(ct, maniptype)) {
837 /* Initialize according to the NAT action. */
838 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
839 /* Action is set up to establish a new
840 * mapping.
841 */
842 ? nf_nat_setup_info(ct, range, maniptype)
843 : nf_nat_alloc_null_binding(ct, hooknum);
844 if (err != NF_ACCEPT)
845 goto out;
846 }
847 break;
848
849 case IP_CT_ESTABLISHED:
850 case IP_CT_ESTABLISHED_REPLY:
851 break;
852
853 default:
854 err = NF_DROP;
855 goto out;
856 }
857
858 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
859out:
860 return err;
861}
862#endif /* CONFIG_NF_NAT */
863
864static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
865{
866#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
867 u32 new_mark;
868
869 if (!mask)
870 return;
871
872 new_mark = mark | (ct->mark & ~(mask));
873 if (ct->mark != new_mark) {
874 ct->mark = new_mark;
875 if (nf_ct_is_confirmed(ct))
876 nf_conntrack_event_cache(IPCT_MARK, ct);
877 }
878#endif
879}
880
881static void tcf_ct_act_set_labels(struct nf_conn *ct,
882 u32 *labels,
883 u32 *labels_m)
884{
885#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
Pankaj Bharadiyac5936422019-12-09 10:31:43 -0800886 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300887
888 if (!memchr_inv(labels_m, 0, labels_sz))
889 return;
890
891 nf_connlabels_replace(ct, labels, labels_m, 4);
892#endif
893}
894
895static int tcf_ct_act_nat(struct sk_buff *skb,
896 struct nf_conn *ct,
897 enum ip_conntrack_info ctinfo,
898 int ct_action,
899 struct nf_nat_range2 *range,
900 bool commit)
901{
902#if IS_ENABLED(CONFIG_NF_NAT)
Aaron Conole95219af2019-12-03 16:34:14 -0500903 int err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300904 enum nf_nat_manip_type maniptype;
905
906 if (!(ct_action & TCA_CT_ACT_NAT))
907 return NF_ACCEPT;
908
909 /* Add NAT extension if not confirmed yet. */
910 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
911 return NF_DROP; /* Can't NAT. */
912
913 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
914 (ctinfo != IP_CT_RELATED || commit)) {
915 /* NAT an established or related connection like before. */
916 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
917 /* This is the REPLY direction for a connection
918 * for which NAT was applied in the forward
919 * direction. Do the reverse NAT.
920 */
921 maniptype = ct->status & IPS_SRC_NAT
922 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
923 else
924 maniptype = ct->status & IPS_SRC_NAT
925 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
926 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
927 maniptype = NF_NAT_MANIP_SRC;
928 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
929 maniptype = NF_NAT_MANIP_DST;
930 } else {
931 return NF_ACCEPT;
932 }
933
Aaron Conole95219af2019-12-03 16:34:14 -0500934 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
Marcelo Ricardo Leitner13c62f532021-06-09 11:23:56 -0300935 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
936 if (ct->status & IPS_SRC_NAT) {
937 if (maniptype == NF_NAT_MANIP_SRC)
938 maniptype = NF_NAT_MANIP_DST;
939 else
940 maniptype = NF_NAT_MANIP_SRC;
Aaron Conole95219af2019-12-03 16:34:14 -0500941
Marcelo Ricardo Leitner13c62f532021-06-09 11:23:56 -0300942 err = ct_nat_execute(skb, ct, ctinfo, range,
943 maniptype);
944 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
945 err = ct_nat_execute(skb, ct, ctinfo, NULL,
946 NF_NAT_MANIP_SRC);
947 }
Aaron Conole95219af2019-12-03 16:34:14 -0500948 }
949 return err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300950#else
951 return NF_ACCEPT;
952#endif
953}
954
955static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
956 struct tcf_result *res)
957{
958 struct net *net = dev_net(skb->dev);
959 bool cached, commit, clear, force;
960 enum ip_conntrack_info ctinfo;
961 struct tcf_ct *c = to_ct(a);
962 struct nf_conn *tmpl = NULL;
963 struct nf_hook_state state;
964 int nh_ofs, err, retval;
965 struct tcf_ct_params *p;
Paul Blakey46475bb2020-03-03 15:07:51 +0200966 bool skip_add = false;
wenxuae372cb2020-07-19 20:30:37 +0800967 bool defrag = false;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300968 struct nf_conn *ct;
969 u8 family;
970
971 p = rcu_dereference_bh(c->params);
972
973 retval = READ_ONCE(c->tcf_action);
974 commit = p->ct_action & TCA_CT_ACT_COMMIT;
975 clear = p->ct_action & TCA_CT_ACT_CLEAR;
976 force = p->ct_action & TCA_CT_ACT_FORCE;
977 tmpl = p->tmpl;
978
wenxu8367b3a2020-07-04 15:42:47 +0800979 tcf_lastuse_update(&c->tcf_tm);
Paul Blakey2dc4e9e2021-10-17 14:58:51 +0300980 tcf_action_update_bstats(&c->common, skb);
wenxu8367b3a2020-07-04 15:42:47 +0800981
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300982 if (clear) {
Paul Blakeyec624fe2021-12-14 19:24:33 +0200983 tc_skb_cb(skb)->post_ct = false;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300984 ct = nf_ct_get(skb, &ctinfo);
985 if (ct) {
986 nf_conntrack_put(&ct->ct_general);
987 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
988 }
989
Marcelo Ricardo Leitner8ca1b092021-03-22 15:13:22 -0300990 goto out_clear;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300991 }
992
993 family = tcf_ct_skb_nf_family(skb);
994 if (family == NFPROTO_UNSPEC)
995 goto drop;
996
997 /* The conntrack module expects to be working at L3.
998 * We also try to pull the IPv4/6 header to linear area
999 */
1000 nh_ofs = skb_network_offset(skb);
1001 skb_pull_rcsum(skb, nh_ofs);
wenxuae372cb2020-07-19 20:30:37 +08001002 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001003 if (err == -EINPROGRESS) {
1004 retval = TC_ACT_STOLEN;
Davide Carattif77bd542021-04-26 17:45:51 +02001005 goto out_clear;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001006 }
1007 if (err)
1008 goto drop;
1009
1010 err = tcf_ct_skb_network_trim(skb, family);
1011 if (err)
1012 goto drop;
1013
1014 /* If we are recirculating packets to match on ct fields and
1015 * committing with a separate ct action, then we don't need to
1016 * actually run the packet through conntrack twice unless it's for a
1017 * different zone.
1018 */
1019 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
1020 if (!cached) {
Paul Blakey0cc254e2021-05-26 14:44:09 +03001021 if (tcf_ct_flow_table_lookup(p, skb, family)) {
Paul Blakey46475bb2020-03-03 15:07:51 +02001022 skip_add = true;
1023 goto do_nat;
1024 }
1025
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001026 /* Associate skb with specified zone. */
1027 if (tmpl) {
Roi Dayan9be02dd2021-04-28 09:05:32 +03001028 nf_conntrack_put(skb_nfct(skb));
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001029 nf_conntrack_get(&tmpl->ct_general);
1030 nf_ct_set(skb, tmpl, IP_CT_NEW);
1031 }
1032
1033 state.hook = NF_INET_PRE_ROUTING;
1034 state.net = net;
1035 state.pf = family;
1036 err = nf_conntrack_in(skb, &state);
1037 if (err != NF_ACCEPT)
1038 goto out_push;
1039 }
1040
Paul Blakey46475bb2020-03-03 15:07:51 +02001041do_nat:
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001042 ct = nf_ct_get(skb, &ctinfo);
1043 if (!ct)
1044 goto out_push;
1045 nf_ct_deliver_cached_events(ct);
Paul Blakey9795ded2022-01-03 13:44:50 +02001046 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001047
1048 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1049 if (err != NF_ACCEPT)
1050 goto drop;
1051
1052 if (commit) {
1053 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1054 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1055
Paul Blakey9795ded2022-01-03 13:44:50 +02001056 if (!nf_ct_is_confirmed(ct))
1057 nf_conn_act_ct_ext_add(ct);
1058
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001059 /* This will take care of sending queued events
1060 * even if the connection is already confirmed.
1061 */
wenxu8955b902021-07-02 11:34:31 +08001062 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1063 goto drop;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001064 }
1065
Paul Blakey0cc254e2021-05-26 14:44:09 +03001066 if (!skip_add)
1067 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1068
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001069out_push:
1070 skb_push_rcsum(skb, nh_ofs);
1071
Paul Blakeyec624fe2021-12-14 19:24:33 +02001072 tc_skb_cb(skb)->post_ct = true;
Paul Blakey38495952021-12-14 19:24:34 +02001073 tc_skb_cb(skb)->zone = p->zone;
Marcelo Ricardo Leitner8ca1b092021-03-22 15:13:22 -03001074out_clear:
wenxuae372cb2020-07-19 20:30:37 +08001075 if (defrag)
1076 qdisc_skb_cb(skb)->pkt_len = skb->len;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001077 return retval;
1078
1079drop:
Vlad Buslov26b537a2019-10-30 16:09:02 +02001080 tcf_action_inc_drop_qstats(&c->common);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001081 return TC_ACT_SHOT;
1082}
1083
1084static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001085 [TCA_CT_ACTION] = { .type = NLA_U16 },
Johannes Berg81408602020-08-18 10:17:31 +02001086 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001087 [TCA_CT_ZONE] = { .type = NLA_U16 },
1088 [TCA_CT_MARK] = { .type = NLA_U32 },
1089 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1090 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1091 .len = 128 / BITS_PER_BYTE },
1092 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1093 .len = 128 / BITS_PER_BYTE },
1094 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1095 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
Johannes Berg81408602020-08-18 10:17:31 +02001096 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1097 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001098 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1099 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1100};
1101
1102static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1103 struct tc_ct *parm,
1104 struct nlattr **tb,
1105 struct netlink_ext_ack *extack)
1106{
1107 struct nf_nat_range2 *range;
1108
1109 if (!(p->ct_action & TCA_CT_ACT_NAT))
1110 return 0;
1111
1112 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1113 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1114 return -EOPNOTSUPP;
1115 }
1116
1117 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1118 return 0;
1119
1120 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1121 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1122 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1123 return -EOPNOTSUPP;
1124 }
1125
1126 range = &p->range;
1127 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1128 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1129
1130 p->ipv4_range = true;
1131 range->flags |= NF_NAT_RANGE_MAP_IPS;
1132 range->min_addr.ip =
1133 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1134
1135 range->max_addr.ip = max_attr ?
1136 nla_get_in_addr(max_attr) :
1137 range->min_addr.ip;
1138 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1139 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1140
1141 p->ipv4_range = false;
1142 range->flags |= NF_NAT_RANGE_MAP_IPS;
1143 range->min_addr.in6 =
1144 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1145
1146 range->max_addr.in6 = max_attr ?
1147 nla_get_in6_addr(max_attr) :
1148 range->min_addr.in6;
1149 }
1150
1151 if (tb[TCA_CT_NAT_PORT_MIN]) {
1152 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1153 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1154
1155 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1156 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1157 range->min_proto.all;
1158 }
1159
1160 return 0;
1161}
1162
1163static void tcf_ct_set_key_val(struct nlattr **tb,
1164 void *val, int val_type,
1165 void *mask, int mask_type,
1166 int len)
1167{
1168 if (!tb[val_type])
1169 return;
1170 nla_memcpy(val, tb[val_type], len);
1171
1172 if (!mask)
1173 return;
1174
1175 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1176 memset(mask, 0xff, len);
1177 else
1178 nla_memcpy(mask, tb[mask_type], len);
1179}
1180
1181static int tcf_ct_fill_params(struct net *net,
1182 struct tcf_ct_params *p,
1183 struct tc_ct *parm,
1184 struct nlattr **tb,
1185 struct netlink_ext_ack *extack)
1186{
1187 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1188 struct nf_conntrack_zone zone;
1189 struct nf_conn *tmpl;
1190 int err;
1191
1192 p->zone = NF_CT_DEFAULT_ZONE_ID;
1193
1194 tcf_ct_set_key_val(tb,
1195 &p->ct_action, TCA_CT_ACTION,
1196 NULL, TCA_CT_UNSPEC,
1197 sizeof(p->ct_action));
1198
1199 if (p->ct_action & TCA_CT_ACT_CLEAR)
1200 return 0;
1201
1202 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1203 if (err)
1204 return err;
1205
1206 if (tb[TCA_CT_MARK]) {
1207 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1208 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1209 return -EOPNOTSUPP;
1210 }
1211 tcf_ct_set_key_val(tb,
1212 &p->mark, TCA_CT_MARK,
1213 &p->mark_mask, TCA_CT_MARK_MASK,
1214 sizeof(p->mark));
1215 }
1216
1217 if (tb[TCA_CT_LABELS]) {
1218 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1219 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1220 return -EOPNOTSUPP;
1221 }
1222
1223 if (!tn->labels) {
1224 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1225 return -EOPNOTSUPP;
1226 }
1227 tcf_ct_set_key_val(tb,
1228 p->labels, TCA_CT_LABELS,
1229 p->labels_mask, TCA_CT_LABELS_MASK,
1230 sizeof(p->labels));
1231 }
1232
1233 if (tb[TCA_CT_ZONE]) {
1234 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1235 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1236 return -EOPNOTSUPP;
1237 }
1238
1239 tcf_ct_set_key_val(tb,
1240 &p->zone, TCA_CT_ZONE,
1241 NULL, TCA_CT_UNSPEC,
1242 sizeof(p->zone));
1243 }
1244
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001245 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1246 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1247 if (!tmpl) {
1248 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1249 return -ENOMEM;
1250 }
1251 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1252 nf_conntrack_get(&tmpl->ct_general);
1253 p->tmpl = tmpl;
1254
1255 return 0;
1256}
1257
1258static int tcf_ct_init(struct net *net, struct nlattr *nla,
1259 struct nlattr *est, struct tc_action **a,
Vlad Buslovabbb0d32019-10-30 16:09:05 +02001260 struct tcf_proto *tp, u32 flags,
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001261 struct netlink_ext_ack *extack)
1262{
1263 struct tc_action_net *tn = net_generic(net, ct_net_id);
Cong Wang695176b2021-07-29 16:12:14 -07001264 bool bind = flags & TCA_ACT_FLAGS_BIND;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001265 struct tcf_ct_params *params = NULL;
1266 struct nlattr *tb[TCA_CT_MAX + 1];
1267 struct tcf_chain *goto_ch = NULL;
1268 struct tc_ct *parm;
1269 struct tcf_ct *c;
1270 int err, res = 0;
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001271 u32 index;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001272
1273 if (!nla) {
1274 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1275 return -EINVAL;
1276 }
1277
1278 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1279 if (err < 0)
1280 return err;
1281
1282 if (!tb[TCA_CT_PARMS]) {
1283 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1284 return -EINVAL;
1285 }
1286 parm = nla_data(tb[TCA_CT_PARMS]);
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001287 index = parm->index;
1288 err = tcf_idr_check_alloc(tn, &index, a, bind);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001289 if (err < 0)
1290 return err;
1291
1292 if (!err) {
Vlad Buslove3822672019-10-30 16:09:06 +02001293 err = tcf_idr_create_from_flags(tn, index, est, a,
1294 &act_ct_ops, bind, flags);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001295 if (err) {
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001296 tcf_idr_cleanup(tn, index);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001297 return err;
1298 }
1299 res = ACT_P_CREATED;
1300 } else {
1301 if (bind)
1302 return 0;
1303
Cong Wang695176b2021-07-29 16:12:14 -07001304 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001305 tcf_idr_release(*a, bind);
1306 return -EEXIST;
1307 }
1308 }
1309 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1310 if (err < 0)
1311 goto cleanup;
1312
1313 c = to_ct(*a);
1314
1315 params = kzalloc(sizeof(*params), GFP_KERNEL);
1316 if (unlikely(!params)) {
1317 err = -ENOMEM;
1318 goto cleanup;
1319 }
1320
1321 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1322 if (err)
1323 goto cleanup;
1324
Paul Blakeyc34b9612020-03-03 15:07:49 +02001325 err = tcf_ct_flow_table_get(params);
1326 if (err)
1327 goto cleanup;
1328
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001329 spin_lock_bh(&c->tcf_lock);
1330 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
Paul E. McKenney445d3742019-09-23 16:09:18 -07001331 params = rcu_replace_pointer(c->params, params,
1332 lockdep_is_held(&c->tcf_lock));
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001333 spin_unlock_bh(&c->tcf_lock);
1334
1335 if (goto_ch)
1336 tcf_chain_put_by_act(goto_ch);
1337 if (params)
Paul Blakeydd2af102020-03-18 12:50:33 +02001338 call_rcu(&params->rcu, tcf_ct_params_free);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001339
1340 return res;
1341
1342cleanup:
1343 if (goto_ch)
1344 tcf_chain_put_by_act(goto_ch);
1345 kfree(params);
1346 tcf_idr_release(*a, bind);
1347 return err;
1348}
1349
1350static void tcf_ct_cleanup(struct tc_action *a)
1351{
1352 struct tcf_ct_params *params;
1353 struct tcf_ct *c = to_ct(a);
1354
1355 params = rcu_dereference_protected(c->params, 1);
1356 if (params)
1357 call_rcu(&params->rcu, tcf_ct_params_free);
1358}
1359
1360static int tcf_ct_dump_key_val(struct sk_buff *skb,
1361 void *val, int val_type,
1362 void *mask, int mask_type,
1363 int len)
1364{
1365 int err;
1366
1367 if (mask && !memchr_inv(mask, 0, len))
1368 return 0;
1369
1370 err = nla_put(skb, val_type, len, val);
1371 if (err)
1372 return err;
1373
1374 if (mask_type != TCA_CT_UNSPEC) {
1375 err = nla_put(skb, mask_type, len, mask);
1376 if (err)
1377 return err;
1378 }
1379
1380 return 0;
1381}
1382
1383static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1384{
1385 struct nf_nat_range2 *range = &p->range;
1386
1387 if (!(p->ct_action & TCA_CT_ACT_NAT))
1388 return 0;
1389
1390 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1391 return 0;
1392
1393 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1394 if (p->ipv4_range) {
1395 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1396 range->min_addr.ip))
1397 return -1;
1398 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1399 range->max_addr.ip))
1400 return -1;
1401 } else {
1402 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1403 &range->min_addr.in6))
1404 return -1;
1405 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1406 &range->max_addr.in6))
1407 return -1;
1408 }
1409 }
1410
1411 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1412 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1413 range->min_proto.all))
1414 return -1;
1415 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1416 range->max_proto.all))
1417 return -1;
1418 }
1419
1420 return 0;
1421}
1422
1423static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1424 int bind, int ref)
1425{
1426 unsigned char *b = skb_tail_pointer(skb);
1427 struct tcf_ct *c = to_ct(a);
1428 struct tcf_ct_params *p;
1429
1430 struct tc_ct opt = {
1431 .index = c->tcf_index,
1432 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1433 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1434 };
1435 struct tcf_t t;
1436
1437 spin_lock_bh(&c->tcf_lock);
1438 p = rcu_dereference_protected(c->params,
1439 lockdep_is_held(&c->tcf_lock));
1440 opt.action = c->tcf_action;
1441
1442 if (tcf_ct_dump_key_val(skb,
1443 &p->ct_action, TCA_CT_ACTION,
1444 NULL, TCA_CT_UNSPEC,
1445 sizeof(p->ct_action)))
1446 goto nla_put_failure;
1447
1448 if (p->ct_action & TCA_CT_ACT_CLEAR)
1449 goto skip_dump;
1450
1451 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1452 tcf_ct_dump_key_val(skb,
1453 &p->mark, TCA_CT_MARK,
1454 &p->mark_mask, TCA_CT_MARK_MASK,
1455 sizeof(p->mark)))
1456 goto nla_put_failure;
1457
1458 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1459 tcf_ct_dump_key_val(skb,
1460 p->labels, TCA_CT_LABELS,
1461 p->labels_mask, TCA_CT_LABELS_MASK,
1462 sizeof(p->labels)))
1463 goto nla_put_failure;
1464
1465 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1466 tcf_ct_dump_key_val(skb,
1467 &p->zone, TCA_CT_ZONE,
1468 NULL, TCA_CT_UNSPEC,
1469 sizeof(p->zone)))
1470 goto nla_put_failure;
1471
1472 if (tcf_ct_dump_nat(skb, p))
1473 goto nla_put_failure;
1474
1475skip_dump:
1476 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1477 goto nla_put_failure;
1478
1479 tcf_tm_dump(&t, &c->tcf_tm);
1480 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1481 goto nla_put_failure;
1482 spin_unlock_bh(&c->tcf_lock);
1483
1484 return skb->len;
1485nla_put_failure:
1486 spin_unlock_bh(&c->tcf_lock);
1487 nlmsg_trim(skb, b);
1488 return -1;
1489}
1490
1491static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1492 struct netlink_callback *cb, int type,
1493 const struct tc_action_ops *ops,
1494 struct netlink_ext_ack *extack)
1495{
1496 struct tc_action_net *tn = net_generic(net, ct_net_id);
1497
1498 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1499}
1500
1501static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1502{
1503 struct tc_action_net *tn = net_generic(net, ct_net_id);
1504
1505 return tcf_idr_search(tn, a, index);
1506}
1507
Po Liu4b61d3e2020-06-19 14:01:07 +08001508static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1509 u64 drops, u64 lastuse, bool hw)
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001510{
1511 struct tcf_ct *c = to_ct(a);
1512
Po Liu4b61d3e2020-06-19 14:01:07 +08001513 tcf_action_update_stats(a, bytes, packets, drops, hw);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001514 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1515}
1516
Baowen Zhengc54e1d92021-12-17 19:16:21 +01001517static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1518 u32 *index_inc, bool bind)
1519{
1520 if (bind) {
1521 struct flow_action_entry *entry = entry_data;
1522
1523 entry->id = FLOW_ACTION_CT;
1524 entry->ct.action = tcf_ct_action(act);
1525 entry->ct.zone = tcf_ct_zone(act);
1526 entry->ct.flow_table = tcf_ct_ft(act);
1527 *index_inc = 1;
1528 } else {
Baowen Zheng8cbfe932021-12-17 19:16:22 +01001529 struct flow_offload_action *fl_action = entry_data;
1530
1531 fl_action->id = FLOW_ACTION_CT;
Baowen Zhengc54e1d92021-12-17 19:16:21 +01001532 }
1533
1534 return 0;
1535}
1536
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001537static struct tc_action_ops act_ct_ops = {
1538 .kind = "ct",
1539 .id = TCA_ID_CT,
1540 .owner = THIS_MODULE,
1541 .act = tcf_ct_act,
1542 .dump = tcf_ct_dump,
1543 .init = tcf_ct_init,
1544 .cleanup = tcf_ct_cleanup,
1545 .walk = tcf_ct_walker,
1546 .lookup = tcf_ct_search,
1547 .stats_update = tcf_stats_update,
Baowen Zhengc54e1d92021-12-17 19:16:21 +01001548 .offload_act_setup = tcf_ct_offload_act_setup,
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001549 .size = sizeof(struct tcf_ct),
1550};
1551
1552static __net_init int ct_init_net(struct net *net)
1553{
Pankaj Bharadiyac5936422019-12-09 10:31:43 -08001554 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001555 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1556
1557 if (nf_connlabels_get(net, n_bits - 1)) {
1558 tn->labels = false;
1559 pr_err("act_ct: Failed to set connlabels length");
1560 } else {
1561 tn->labels = true;
1562 }
1563
Cong Wang981471b2019-08-25 10:01:32 -07001564 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001565}
1566
1567static void __net_exit ct_exit_net(struct list_head *net_list)
1568{
1569 struct net *net;
1570
1571 rtnl_lock();
1572 list_for_each_entry(net, net_list, exit_list) {
1573 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1574
1575 if (tn->labels)
1576 nf_connlabels_put(net);
1577 }
1578 rtnl_unlock();
1579
1580 tc_action_net_exit(net_list, ct_net_id);
1581}
1582
1583static struct pernet_operations ct_net_ops = {
1584 .init = ct_init_net,
1585 .exit_batch = ct_exit_net,
1586 .id = &ct_net_id,
1587 .size = sizeof(struct tc_ct_action_net),
1588};
1589
1590static int __init ct_init_module(void)
1591{
Paul Blakeyc34b9612020-03-03 15:07:49 +02001592 int err;
1593
1594 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1595 if (!act_ct_wq)
1596 return -ENOMEM;
1597
1598 err = tcf_ct_flow_tables_init();
1599 if (err)
1600 goto err_tbl_init;
1601
1602 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1603 if (err)
1604 goto err_register;
1605
Paul Blakey9795ded2022-01-03 13:44:50 +02001606 err = nf_ct_extend_register(&act_ct_extend);
1607 if (err)
1608 goto err_register_extend;
1609
wenxuc1294122020-11-25 12:01:23 +08001610 static_branch_inc(&tcf_frag_xmit_count);
1611
Paul Blakeyc34b9612020-03-03 15:07:49 +02001612 return 0;
1613
Paul Blakey9795ded2022-01-03 13:44:50 +02001614err_register_extend:
1615 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
Paul Blakeyc34b9612020-03-03 15:07:49 +02001616err_register:
1617 tcf_ct_flow_tables_uninit();
liujian8c5c51f2020-07-30 16:14:28 +08001618err_tbl_init:
1619 destroy_workqueue(act_ct_wq);
Paul Blakeyc34b9612020-03-03 15:07:49 +02001620 return err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001621}
1622
1623static void __exit ct_cleanup_module(void)
1624{
wenxuc1294122020-11-25 12:01:23 +08001625 static_branch_dec(&tcf_frag_xmit_count);
Paul Blakey9795ded2022-01-03 13:44:50 +02001626 nf_ct_extend_unregister(&act_ct_extend);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001627 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
Paul Blakeyc34b9612020-03-03 15:07:49 +02001628 tcf_ct_flow_tables_uninit();
1629 destroy_workqueue(act_ct_wq);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001630}
1631
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001632module_init(ct_init_module);
1633module_exit(ct_cleanup_module);
1634MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1635MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1636MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1637MODULE_DESCRIPTION("Connection tracking action");
1638MODULE_LICENSE("GPL v2");