blob: e9f3576cbf71ab702209337259f2105921497714 [file] [log] [blame]
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/pkt_cls.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
Paul Blakeyc34b9612020-03-03 15:07:49 +020018#include <linux/rhashtable.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030019#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/act_api.h>
23#include <net/ip.h>
24#include <net/ipv6_frag.h>
25#include <uapi/linux/tc_act/tc_ct.h>
26#include <net/tc_act/tc_ct.h>
27
Paul Blakeyc34b9612020-03-03 15:07:49 +020028#include <net/netfilter/nf_flow_table.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030029#include <net/netfilter/nf_conntrack.h>
30#include <net/netfilter/nf_conntrack_core.h>
31#include <net/netfilter/nf_conntrack_zones.h>
32#include <net/netfilter/nf_conntrack_helper.h>
wenxubeb97d32020-04-21 07:55:43 +080033#include <net/netfilter/nf_conntrack_acct.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030034#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
Jeremy Sowden40d102c2019-09-13 09:13:05 +010035#include <uapi/linux/netfilter/nf_nat.h>
Paul Blakeyb57dc7c2019-07-09 10:30:48 +030036
Paul Blakeyc34b9612020-03-03 15:07:49 +020037static struct workqueue_struct *act_ct_wq;
38static struct rhashtable zones_ht;
Eric Dumazet138470a2020-03-08 14:27:48 -070039static DEFINE_MUTEX(zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +020040
41struct tcf_ct_flow_table {
42 struct rhash_head node; /* In zones tables */
43
44 struct rcu_work rwork;
45 struct nf_flowtable nf_ft;
Eric Dumazet138470a2020-03-08 14:27:48 -070046 refcount_t ref;
Paul Blakeyc34b9612020-03-03 15:07:49 +020047 u16 zone;
Paul Blakeyc34b9612020-03-03 15:07:49 +020048
49 bool dying;
50};
51
52static const struct rhashtable_params zones_params = {
53 .head_offset = offsetof(struct tcf_ct_flow_table, node),
54 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
55 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56 .automatic_shrinking = true,
57};
58
Paul Blakey9c26ba92020-03-12 12:23:06 +020059static struct flow_action_entry *
60tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
61{
62 int i = flow_action->num_entries++;
63
64 return &flow_action->entries[i];
65}
66
67static void tcf_ct_add_mangle_action(struct flow_action *action,
68 enum flow_action_mangle_base htype,
69 u32 offset,
70 u32 mask,
71 u32 val)
72{
73 struct flow_action_entry *entry;
74
75 entry = tcf_ct_flow_table_flow_action_get_next(action);
76 entry->id = FLOW_ACTION_MANGLE;
77 entry->mangle.htype = htype;
78 entry->mangle.mask = ~mask;
79 entry->mangle.offset = offset;
80 entry->mangle.val = val;
81}
82
83/* The following nat helper functions check if the inverted reverse tuple
84 * (target) is different then the current dir tuple - meaning nat for ports
85 * and/or ip is needed, and add the relevant mangle actions.
86 */
87static void
88tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89 struct nf_conntrack_tuple target,
90 struct flow_action *action)
91{
92 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94 offsetof(struct iphdr, saddr),
95 0xFFFFFFFF,
96 be32_to_cpu(target.src.u3.ip));
97 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99 offsetof(struct iphdr, daddr),
100 0xFFFFFFFF,
101 be32_to_cpu(target.dst.u3.ip));
102}
103
104static void
105tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106 union nf_inet_addr *addr,
107 u32 offset)
108{
109 int i;
110
111 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113 i * sizeof(u32) + offset,
114 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
115}
116
117static void
118tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119 struct nf_conntrack_tuple target,
120 struct flow_action *action)
121{
122 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124 offsetof(struct ipv6hdr,
125 saddr));
126 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128 offsetof(struct ipv6hdr,
129 daddr));
130}
131
132static void
133tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134 struct nf_conntrack_tuple target,
135 struct flow_action *action)
136{
137 __be16 target_src = target.src.u.tcp.port;
138 __be16 target_dst = target.dst.u.tcp.port;
139
140 if (target_src != tuple->src.u.tcp.port)
141 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142 offsetof(struct tcphdr, source),
143 0xFFFF, be16_to_cpu(target_src));
144 if (target_dst != tuple->dst.u.tcp.port)
145 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146 offsetof(struct tcphdr, dest),
147 0xFFFF, be16_to_cpu(target_dst));
148}
149
150static void
151tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152 struct nf_conntrack_tuple target,
153 struct flow_action *action)
154{
155 __be16 target_src = target.src.u.udp.port;
156 __be16 target_dst = target.dst.u.udp.port;
157
158 if (target_src != tuple->src.u.udp.port)
159 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
160 offsetof(struct udphdr, source),
161 0xFFFF, be16_to_cpu(target_src));
162 if (target_dst != tuple->dst.u.udp.port)
163 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
164 offsetof(struct udphdr, dest),
165 0xFFFF, be16_to_cpu(target_dst));
166}
167
168static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169 enum ip_conntrack_dir dir,
170 struct flow_action *action)
171{
172 struct nf_conn_labels *ct_labels;
173 struct flow_action_entry *entry;
Paul Blakey30b0cf92020-03-12 12:23:07 +0200174 enum ip_conntrack_info ctinfo;
Paul Blakey9c26ba92020-03-12 12:23:06 +0200175 u32 *act_ct_labels;
176
177 entry = tcf_ct_flow_table_flow_action_get_next(action);
178 entry->id = FLOW_ACTION_CT_METADATA;
179#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180 entry->ct_metadata.mark = ct->mark;
181#endif
Paul Blakey30b0cf92020-03-12 12:23:07 +0200182 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183 IP_CT_ESTABLISHED_REPLY;
184 /* aligns with the CT reference on the SKB nf_ct_set */
185 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
Paul Blakey9c26ba92020-03-12 12:23:06 +0200186
187 act_ct_labels = entry->ct_metadata.labels;
188 ct_labels = nf_ct_labels_find(ct);
189 if (ct_labels)
190 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
191 else
192 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
193}
194
195static int tcf_ct_flow_table_add_action_nat(struct net *net,
196 struct nf_conn *ct,
197 enum ip_conntrack_dir dir,
198 struct flow_action *action)
199{
200 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
201 struct nf_conntrack_tuple target;
202
wenxu05aa69e2020-05-30 13:54:51 +0800203 if (!(ct->status & IPS_NAT_MASK))
204 return 0;
205
Paul Blakey9c26ba92020-03-12 12:23:06 +0200206 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
207
208 switch (tuple->src.l3num) {
209 case NFPROTO_IPV4:
210 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
211 action);
212 break;
213 case NFPROTO_IPV6:
214 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
215 action);
216 break;
217 default:
218 return -EOPNOTSUPP;
219 }
220
221 switch (nf_ct_protonum(ct)) {
222 case IPPROTO_TCP:
223 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
224 break;
225 case IPPROTO_UDP:
226 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
227 break;
228 default:
229 return -EOPNOTSUPP;
230 }
231
232 return 0;
233}
234
235static int tcf_ct_flow_table_fill_actions(struct net *net,
236 const struct flow_offload *flow,
237 enum flow_offload_tuple_dir tdir,
238 struct nf_flow_rule *flow_rule)
239{
240 struct flow_action *action = &flow_rule->rule->action;
241 int num_entries = action->num_entries;
242 struct nf_conn *ct = flow->ct;
243 enum ip_conntrack_dir dir;
244 int i, err;
245
246 switch (tdir) {
247 case FLOW_OFFLOAD_DIR_ORIGINAL:
248 dir = IP_CT_DIR_ORIGINAL;
249 break;
250 case FLOW_OFFLOAD_DIR_REPLY:
251 dir = IP_CT_DIR_REPLY;
252 break;
253 default:
254 return -EOPNOTSUPP;
255 }
256
257 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
258 if (err)
259 goto err_nat;
260
261 tcf_ct_flow_table_add_action_meta(ct, dir, action);
262 return 0;
263
264err_nat:
265 /* Clear filled actions */
266 for (i = num_entries; i < action->num_entries; i++)
267 memset(&action->entries[i], 0, sizeof(action->entries[i]));
268 action->num_entries = num_entries;
269
270 return err;
271}
272
Paul Blakeyc34b9612020-03-03 15:07:49 +0200273static struct nf_flowtable_type flowtable_ct = {
Paul Blakey9c26ba92020-03-12 12:23:06 +0200274 .action = tcf_ct_flow_table_fill_actions,
Paul Blakeyc34b9612020-03-03 15:07:49 +0200275 .owner = THIS_MODULE,
276};
277
278static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
279{
280 struct tcf_ct_flow_table *ct_ft;
281 int err = -ENOMEM;
282
Eric Dumazet138470a2020-03-08 14:27:48 -0700283 mutex_lock(&zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200284 ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
Eric Dumazet138470a2020-03-08 14:27:48 -0700285 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
286 goto out_unlock;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200287
Eric Dumazet138470a2020-03-08 14:27:48 -0700288 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200289 if (!ct_ft)
290 goto err_alloc;
Eric Dumazet138470a2020-03-08 14:27:48 -0700291 refcount_set(&ct_ft->ref, 1);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200292
293 ct_ft->zone = params->zone;
294 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
295 if (err)
296 goto err_insert;
297
298 ct_ft->nf_ft.type = &flowtable_ct;
Paul Blakeyedd58612020-03-12 12:23:09 +0200299 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
Paul Blakeyc34b9612020-03-03 15:07:49 +0200300 err = nf_flow_table_init(&ct_ft->nf_ft);
301 if (err)
302 goto err_init;
303
304 __module_get(THIS_MODULE);
Eric Dumazet138470a2020-03-08 14:27:48 -0700305out_unlock:
Paul Blakeyc34b9612020-03-03 15:07:49 +0200306 params->ct_ft = ct_ft;
Paul Blakeyedd58612020-03-12 12:23:09 +0200307 params->nf_ft = &ct_ft->nf_ft;
Eric Dumazet138470a2020-03-08 14:27:48 -0700308 mutex_unlock(&zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200309
310 return 0;
311
312err_init:
313 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
314err_insert:
315 kfree(ct_ft);
316err_alloc:
Eric Dumazet138470a2020-03-08 14:27:48 -0700317 mutex_unlock(&zones_mutex);
Paul Blakeyc34b9612020-03-03 15:07:49 +0200318 return err;
319}
320
321static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
322{
323 struct tcf_ct_flow_table *ct_ft;
324
325 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
326 rwork);
327 nf_flow_table_free(&ct_ft->nf_ft);
328 kfree(ct_ft);
329
330 module_put(THIS_MODULE);
331}
332
333static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
334{
335 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
336
Eric Dumazet138470a2020-03-08 14:27:48 -0700337 if (refcount_dec_and_test(&params->ct_ft->ref)) {
Paul Blakeyc34b9612020-03-03 15:07:49 +0200338 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
339 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
340 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
341 }
Paul Blakeyc34b9612020-03-03 15:07:49 +0200342}
343
Paul Blakey64ff70b2020-03-03 15:07:50 +0200344static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
345 struct nf_conn *ct,
346 bool tcp)
347{
348 struct flow_offload *entry;
349 int err;
350
351 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
352 return;
353
354 entry = flow_offload_alloc(ct);
355 if (!entry) {
356 WARN_ON_ONCE(1);
357 goto err_alloc;
358 }
359
360 if (tcp) {
361 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
362 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
363 }
364
365 err = flow_offload_add(&ct_ft->nf_ft, entry);
366 if (err)
367 goto err_add;
368
369 return;
370
371err_add:
372 flow_offload_free(entry);
373err_alloc:
374 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
375}
376
377static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
378 struct nf_conn *ct,
379 enum ip_conntrack_info ctinfo)
380{
381 bool tcp = false;
382
383 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
384 return;
385
386 switch (nf_ct_protonum(ct)) {
387 case IPPROTO_TCP:
388 tcp = true;
389 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
390 return;
391 break;
392 case IPPROTO_UDP:
393 break;
394 default:
395 return;
396 }
397
398 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
399 ct->status & IPS_SEQ_ADJUST)
400 return;
401
402 tcf_ct_flow_table_add(ct_ft, ct, tcp);
403}
404
Paul Blakey46475bb2020-03-03 15:07:51 +0200405static bool
406tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
Paul Blakey07ac9d12020-03-04 13:49:38 +0200407 struct flow_offload_tuple *tuple,
408 struct tcphdr **tcph)
Paul Blakey46475bb2020-03-03 15:07:51 +0200409{
410 struct flow_ports *ports;
411 unsigned int thoff;
412 struct iphdr *iph;
413
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200414 if (!pskb_network_may_pull(skb, sizeof(*iph)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200415 return false;
416
417 iph = ip_hdr(skb);
418 thoff = iph->ihl * 4;
419
420 if (ip_is_fragment(iph) ||
421 unlikely(thoff != sizeof(struct iphdr)))
422 return false;
423
424 if (iph->protocol != IPPROTO_TCP &&
425 iph->protocol != IPPROTO_UDP)
426 return false;
427
428 if (iph->ttl <= 1)
429 return false;
430
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200431 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
432 thoff + sizeof(struct tcphdr) :
433 thoff + sizeof(*ports)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200434 return false;
435
Paul Blakey07ac9d12020-03-04 13:49:38 +0200436 iph = ip_hdr(skb);
437 if (iph->protocol == IPPROTO_TCP)
438 *tcph = (void *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200439
Paul Blakey07ac9d12020-03-04 13:49:38 +0200440 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200441 tuple->src_v4.s_addr = iph->saddr;
442 tuple->dst_v4.s_addr = iph->daddr;
443 tuple->src_port = ports->source;
444 tuple->dst_port = ports->dest;
445 tuple->l3proto = AF_INET;
446 tuple->l4proto = iph->protocol;
447
448 return true;
449}
450
451static bool
452tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
Paul Blakey07ac9d12020-03-04 13:49:38 +0200453 struct flow_offload_tuple *tuple,
454 struct tcphdr **tcph)
Paul Blakey46475bb2020-03-03 15:07:51 +0200455{
456 struct flow_ports *ports;
457 struct ipv6hdr *ip6h;
458 unsigned int thoff;
459
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200460 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200461 return false;
462
463 ip6h = ipv6_hdr(skb);
464
465 if (ip6h->nexthdr != IPPROTO_TCP &&
466 ip6h->nexthdr != IPPROTO_UDP)
467 return false;
468
469 if (ip6h->hop_limit <= 1)
470 return false;
471
472 thoff = sizeof(*ip6h);
Paul Blakey4cc5fde2020-03-04 13:49:39 +0200473 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
474 thoff + sizeof(struct tcphdr) :
475 thoff + sizeof(*ports)))
Paul Blakey46475bb2020-03-03 15:07:51 +0200476 return false;
477
Paul Blakey07ac9d12020-03-04 13:49:38 +0200478 ip6h = ipv6_hdr(skb);
479 if (ip6h->nexthdr == IPPROTO_TCP)
480 *tcph = (void *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200481
Paul Blakey07ac9d12020-03-04 13:49:38 +0200482 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
Paul Blakey46475bb2020-03-03 15:07:51 +0200483 tuple->src_v6 = ip6h->saddr;
484 tuple->dst_v6 = ip6h->daddr;
485 tuple->src_port = ports->source;
486 tuple->dst_port = ports->dest;
487 tuple->l3proto = AF_INET6;
488 tuple->l4proto = ip6h->nexthdr;
489
490 return true;
491}
492
Paul Blakey46475bb2020-03-03 15:07:51 +0200493static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
494 struct sk_buff *skb,
495 u8 family)
496{
497 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
498 struct flow_offload_tuple_rhash *tuplehash;
499 struct flow_offload_tuple tuple = {};
500 enum ip_conntrack_info ctinfo;
Paul Blakey07ac9d12020-03-04 13:49:38 +0200501 struct tcphdr *tcph = NULL;
Paul Blakey46475bb2020-03-03 15:07:51 +0200502 struct flow_offload *flow;
503 struct nf_conn *ct;
Paul Blakey46475bb2020-03-03 15:07:51 +0200504 u8 dir;
505
506 /* Previously seen or loopback */
507 ct = nf_ct_get(skb, &ctinfo);
508 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
509 return false;
510
511 switch (family) {
512 case NFPROTO_IPV4:
Paul Blakey07ac9d12020-03-04 13:49:38 +0200513 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
Paul Blakey46475bb2020-03-03 15:07:51 +0200514 return false;
515 break;
516 case NFPROTO_IPV6:
Paul Blakey07ac9d12020-03-04 13:49:38 +0200517 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
Paul Blakey46475bb2020-03-03 15:07:51 +0200518 return false;
519 break;
520 default:
521 return false;
522 }
523
524 tuplehash = flow_offload_lookup(nf_ft, &tuple);
525 if (!tuplehash)
526 return false;
527
528 dir = tuplehash->tuple.dir;
529 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
530 ct = flow->ct;
531
Paul Blakey07ac9d12020-03-04 13:49:38 +0200532 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
533 flow_offload_teardown(flow);
534 return false;
535 }
536
Paul Blakey46475bb2020-03-03 15:07:51 +0200537 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
538 IP_CT_ESTABLISHED_REPLY;
539
Paul Blakey8b3646d2020-03-12 12:23:08 +0200540 flow_offload_refresh(nf_ft, flow);
Paul Blakey46475bb2020-03-03 15:07:51 +0200541 nf_conntrack_get(&ct->ct_general);
542 nf_ct_set(skb, ct, ctinfo);
wenxubeb97d32020-04-21 07:55:43 +0800543 nf_ct_acct_update(ct, dir, skb->len);
Paul Blakey46475bb2020-03-03 15:07:51 +0200544
545 return true;
546}
547
Paul Blakeyc34b9612020-03-03 15:07:49 +0200548static int tcf_ct_flow_tables_init(void)
549{
550 return rhashtable_init(&zones_ht, &zones_params);
551}
552
553static void tcf_ct_flow_tables_uninit(void)
554{
555 rhashtable_destroy(&zones_ht);
556}
557
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300558static struct tc_action_ops act_ct_ops;
559static unsigned int ct_net_id;
560
561struct tc_ct_action_net {
562 struct tc_action_net tn; /* Must be first */
563 bool labels;
564};
565
566/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
567static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
568 u16 zone_id, bool force)
569{
570 enum ip_conntrack_info ctinfo;
571 struct nf_conn *ct;
572
573 ct = nf_ct_get(skb, &ctinfo);
574 if (!ct)
575 return false;
576 if (!net_eq(net, read_pnet(&ct->ct_net)))
577 return false;
578 if (nf_ct_zone(ct)->id != zone_id)
579 return false;
580
581 /* Force conntrack entry direction. */
582 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
583 if (nf_ct_is_confirmed(ct))
584 nf_ct_kill(ct);
585
586 nf_conntrack_put(&ct->ct_general);
587 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
588
589 return false;
590 }
591
592 return true;
593}
594
595/* Trim the skb to the length specified by the IP/IPv6 header,
596 * removing any trailing lower-layer padding. This prepares the skb
597 * for higher-layer processing that assumes skb->len excludes padding
598 * (such as nf_ip_checksum). The caller needs to pull the skb to the
599 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
600 */
601static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
602{
603 unsigned int len;
604 int err;
605
606 switch (family) {
607 case NFPROTO_IPV4:
608 len = ntohs(ip_hdr(skb)->tot_len);
609 break;
610 case NFPROTO_IPV6:
611 len = sizeof(struct ipv6hdr)
612 + ntohs(ipv6_hdr(skb)->payload_len);
613 break;
614 default:
615 len = skb->len;
616 }
617
618 err = pskb_trim_rcsum(skb, len);
619
620 return err;
621}
622
623static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
624{
625 u8 family = NFPROTO_UNSPEC;
626
627 switch (skb->protocol) {
628 case htons(ETH_P_IP):
629 family = NFPROTO_IPV4;
630 break;
631 case htons(ETH_P_IPV6):
632 family = NFPROTO_IPV6;
633 break;
634 default:
635 break;
636 }
637
638 return family;
639}
640
641static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
642{
643 unsigned int len;
644
645 len = skb_network_offset(skb) + sizeof(struct iphdr);
646 if (unlikely(skb->len < len))
647 return -EINVAL;
648 if (unlikely(!pskb_may_pull(skb, len)))
649 return -ENOMEM;
650
651 *frag = ip_is_fragment(ip_hdr(skb));
652 return 0;
653}
654
655static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
656{
657 unsigned int flags = 0, len, payload_ofs = 0;
658 unsigned short frag_off;
659 int nexthdr;
660
661 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
662 if (unlikely(skb->len < len))
663 return -EINVAL;
664 if (unlikely(!pskb_may_pull(skb, len)))
665 return -ENOMEM;
666
667 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
668 if (unlikely(nexthdr < 0))
669 return -EPROTO;
670
671 *frag = flags & IP6_FH_F_FRAG;
672 return 0;
673}
674
675static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
676 u8 family, u16 zone)
677{
678 enum ip_conntrack_info ctinfo;
679 struct nf_conn *ct;
680 int err = 0;
681 bool frag;
682
683 /* Previously seen (loopback)? Ignore. */
684 ct = nf_ct_get(skb, &ctinfo);
685 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
686 return 0;
687
688 if (family == NFPROTO_IPV4)
689 err = tcf_ct_ipv4_is_fragment(skb, &frag);
690 else
691 err = tcf_ct_ipv6_is_fragment(skb, &frag);
692 if (err || !frag)
693 return err;
694
695 skb_get(skb);
696
697 if (family == NFPROTO_IPV4) {
698 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
699
700 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
701 local_bh_disable();
702 err = ip_defrag(net, skb, user);
703 local_bh_enable();
704 if (err && err != -EINPROGRESS)
705 goto out_free;
706 } else { /* NFPROTO_IPV6 */
707#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
708 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
709
710 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
711 err = nf_ct_frag6_gather(net, skb, user);
712 if (err && err != -EINPROGRESS)
713 goto out_free;
714#else
715 err = -EOPNOTSUPP;
716 goto out_free;
717#endif
718 }
719
720 skb_clear_hash(skb);
721 skb->ignore_df = 1;
722 return err;
723
724out_free:
725 kfree_skb(skb);
726 return err;
727}
728
729static void tcf_ct_params_free(struct rcu_head *head)
730{
731 struct tcf_ct_params *params = container_of(head,
732 struct tcf_ct_params, rcu);
733
Paul Blakeyc34b9612020-03-03 15:07:49 +0200734 tcf_ct_flow_table_put(params);
735
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300736 if (params->tmpl)
737 nf_conntrack_put(&params->tmpl->ct_general);
738 kfree(params);
739}
740
741#if IS_ENABLED(CONFIG_NF_NAT)
742/* Modelled after nf_nat_ipv[46]_fn().
743 * range is only used for new, uninitialized NAT state.
744 * Returns either NF_ACCEPT or NF_DROP.
745 */
746static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
747 enum ip_conntrack_info ctinfo,
748 const struct nf_nat_range2 *range,
749 enum nf_nat_manip_type maniptype)
750{
751 int hooknum, err = NF_ACCEPT;
752
753 /* See HOOK2MANIP(). */
754 if (maniptype == NF_NAT_MANIP_SRC)
755 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
756 else
757 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
758
759 switch (ctinfo) {
760 case IP_CT_RELATED:
761 case IP_CT_RELATED_REPLY:
762 if (skb->protocol == htons(ETH_P_IP) &&
763 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
764 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
765 hooknum))
766 err = NF_DROP;
767 goto out;
768 } else if (IS_ENABLED(CONFIG_IPV6) &&
769 skb->protocol == htons(ETH_P_IPV6)) {
770 __be16 frag_off;
771 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
772 int hdrlen = ipv6_skip_exthdr(skb,
773 sizeof(struct ipv6hdr),
774 &nexthdr, &frag_off);
775
776 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
777 if (!nf_nat_icmpv6_reply_translation(skb, ct,
778 ctinfo,
779 hooknum,
780 hdrlen))
781 err = NF_DROP;
782 goto out;
783 }
784 }
785 /* Non-ICMP, fall thru to initialize if needed. */
786 /* fall through */
787 case IP_CT_NEW:
788 /* Seen it before? This can happen for loopback, retrans,
789 * or local packets.
790 */
791 if (!nf_nat_initialized(ct, maniptype)) {
792 /* Initialize according to the NAT action. */
793 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
794 /* Action is set up to establish a new
795 * mapping.
796 */
797 ? nf_nat_setup_info(ct, range, maniptype)
798 : nf_nat_alloc_null_binding(ct, hooknum);
799 if (err != NF_ACCEPT)
800 goto out;
801 }
802 break;
803
804 case IP_CT_ESTABLISHED:
805 case IP_CT_ESTABLISHED_REPLY:
806 break;
807
808 default:
809 err = NF_DROP;
810 goto out;
811 }
812
813 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
814out:
815 return err;
816}
817#endif /* CONFIG_NF_NAT */
818
819static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
820{
821#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
822 u32 new_mark;
823
824 if (!mask)
825 return;
826
827 new_mark = mark | (ct->mark & ~(mask));
828 if (ct->mark != new_mark) {
829 ct->mark = new_mark;
830 if (nf_ct_is_confirmed(ct))
831 nf_conntrack_event_cache(IPCT_MARK, ct);
832 }
833#endif
834}
835
836static void tcf_ct_act_set_labels(struct nf_conn *ct,
837 u32 *labels,
838 u32 *labels_m)
839{
840#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
Pankaj Bharadiyac5936422019-12-09 10:31:43 -0800841 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300842
843 if (!memchr_inv(labels_m, 0, labels_sz))
844 return;
845
846 nf_connlabels_replace(ct, labels, labels_m, 4);
847#endif
848}
849
850static int tcf_ct_act_nat(struct sk_buff *skb,
851 struct nf_conn *ct,
852 enum ip_conntrack_info ctinfo,
853 int ct_action,
854 struct nf_nat_range2 *range,
855 bool commit)
856{
857#if IS_ENABLED(CONFIG_NF_NAT)
Aaron Conole95219af2019-12-03 16:34:14 -0500858 int err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300859 enum nf_nat_manip_type maniptype;
860
861 if (!(ct_action & TCA_CT_ACT_NAT))
862 return NF_ACCEPT;
863
864 /* Add NAT extension if not confirmed yet. */
865 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
866 return NF_DROP; /* Can't NAT. */
867
868 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
869 (ctinfo != IP_CT_RELATED || commit)) {
870 /* NAT an established or related connection like before. */
871 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
872 /* This is the REPLY direction for a connection
873 * for which NAT was applied in the forward
874 * direction. Do the reverse NAT.
875 */
876 maniptype = ct->status & IPS_SRC_NAT
877 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
878 else
879 maniptype = ct->status & IPS_SRC_NAT
880 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
881 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
882 maniptype = NF_NAT_MANIP_SRC;
883 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
884 maniptype = NF_NAT_MANIP_DST;
885 } else {
886 return NF_ACCEPT;
887 }
888
Aaron Conole95219af2019-12-03 16:34:14 -0500889 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
890 if (err == NF_ACCEPT &&
891 ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
892 if (maniptype == NF_NAT_MANIP_SRC)
893 maniptype = NF_NAT_MANIP_DST;
894 else
895 maniptype = NF_NAT_MANIP_SRC;
896
897 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
898 }
899 return err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300900#else
901 return NF_ACCEPT;
902#endif
903}
904
905static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
906 struct tcf_result *res)
907{
908 struct net *net = dev_net(skb->dev);
909 bool cached, commit, clear, force;
910 enum ip_conntrack_info ctinfo;
911 struct tcf_ct *c = to_ct(a);
912 struct nf_conn *tmpl = NULL;
913 struct nf_hook_state state;
914 int nh_ofs, err, retval;
915 struct tcf_ct_params *p;
Paul Blakey46475bb2020-03-03 15:07:51 +0200916 bool skip_add = false;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300917 struct nf_conn *ct;
918 u8 family;
919
920 p = rcu_dereference_bh(c->params);
921
922 retval = READ_ONCE(c->tcf_action);
923 commit = p->ct_action & TCA_CT_ACT_COMMIT;
924 clear = p->ct_action & TCA_CT_ACT_CLEAR;
925 force = p->ct_action & TCA_CT_ACT_FORCE;
926 tmpl = p->tmpl;
927
928 if (clear) {
929 ct = nf_ct_get(skb, &ctinfo);
930 if (ct) {
931 nf_conntrack_put(&ct->ct_general);
932 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
933 }
934
935 goto out;
936 }
937
938 family = tcf_ct_skb_nf_family(skb);
939 if (family == NFPROTO_UNSPEC)
940 goto drop;
941
942 /* The conntrack module expects to be working at L3.
943 * We also try to pull the IPv4/6 header to linear area
944 */
945 nh_ofs = skb_network_offset(skb);
946 skb_pull_rcsum(skb, nh_ofs);
947 err = tcf_ct_handle_fragments(net, skb, family, p->zone);
948 if (err == -EINPROGRESS) {
949 retval = TC_ACT_STOLEN;
950 goto out;
951 }
952 if (err)
953 goto drop;
954
955 err = tcf_ct_skb_network_trim(skb, family);
956 if (err)
957 goto drop;
958
959 /* If we are recirculating packets to match on ct fields and
960 * committing with a separate ct action, then we don't need to
961 * actually run the packet through conntrack twice unless it's for a
962 * different zone.
963 */
964 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
965 if (!cached) {
Paul Blakey46475bb2020-03-03 15:07:51 +0200966 if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
967 skip_add = true;
968 goto do_nat;
969 }
970
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300971 /* Associate skb with specified zone. */
972 if (tmpl) {
973 ct = nf_ct_get(skb, &ctinfo);
974 if (skb_nfct(skb))
975 nf_conntrack_put(skb_nfct(skb));
976 nf_conntrack_get(&tmpl->ct_general);
977 nf_ct_set(skb, tmpl, IP_CT_NEW);
978 }
979
980 state.hook = NF_INET_PRE_ROUTING;
981 state.net = net;
982 state.pf = family;
983 err = nf_conntrack_in(skb, &state);
984 if (err != NF_ACCEPT)
985 goto out_push;
986 }
987
Paul Blakey46475bb2020-03-03 15:07:51 +0200988do_nat:
Paul Blakeyb57dc7c2019-07-09 10:30:48 +0300989 ct = nf_ct_get(skb, &ctinfo);
990 if (!ct)
991 goto out_push;
992 nf_ct_deliver_cached_events(ct);
993
994 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
995 if (err != NF_ACCEPT)
996 goto drop;
997
998 if (commit) {
999 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1000 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1001
1002 /* This will take care of sending queued events
1003 * even if the connection is already confirmed.
1004 */
1005 nf_conntrack_confirm(skb);
Paul Blakey46475bb2020-03-03 15:07:51 +02001006 } else if (!skip_add) {
1007 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001008 }
1009
1010out_push:
1011 skb_push_rcsum(skb, nh_ofs);
1012
1013out:
Vlad Buslov5e1ad952019-10-30 16:09:01 +02001014 tcf_action_update_bstats(&c->common, skb);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001015 return retval;
1016
1017drop:
Vlad Buslov26b537a2019-10-30 16:09:02 +02001018 tcf_action_inc_drop_qstats(&c->common);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001019 return TC_ACT_SHOT;
1020}
1021
1022static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001023 [TCA_CT_ACTION] = { .type = NLA_U16 },
1024 [TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) },
1025 [TCA_CT_ZONE] = { .type = NLA_U16 },
1026 [TCA_CT_MARK] = { .type = NLA_U32 },
1027 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1028 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1029 .len = 128 / BITS_PER_BYTE },
1030 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1031 .len = 128 / BITS_PER_BYTE },
1032 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1033 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1034 [TCA_CT_NAT_IPV6_MIN] = { .type = NLA_EXACT_LEN,
1035 .len = sizeof(struct in6_addr) },
1036 [TCA_CT_NAT_IPV6_MAX] = { .type = NLA_EXACT_LEN,
1037 .len = sizeof(struct in6_addr) },
1038 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1039 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1040};
1041
1042static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1043 struct tc_ct *parm,
1044 struct nlattr **tb,
1045 struct netlink_ext_ack *extack)
1046{
1047 struct nf_nat_range2 *range;
1048
1049 if (!(p->ct_action & TCA_CT_ACT_NAT))
1050 return 0;
1051
1052 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1053 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1054 return -EOPNOTSUPP;
1055 }
1056
1057 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1058 return 0;
1059
1060 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1061 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1062 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1063 return -EOPNOTSUPP;
1064 }
1065
1066 range = &p->range;
1067 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1068 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1069
1070 p->ipv4_range = true;
1071 range->flags |= NF_NAT_RANGE_MAP_IPS;
1072 range->min_addr.ip =
1073 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1074
1075 range->max_addr.ip = max_attr ?
1076 nla_get_in_addr(max_attr) :
1077 range->min_addr.ip;
1078 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1079 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1080
1081 p->ipv4_range = false;
1082 range->flags |= NF_NAT_RANGE_MAP_IPS;
1083 range->min_addr.in6 =
1084 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1085
1086 range->max_addr.in6 = max_attr ?
1087 nla_get_in6_addr(max_attr) :
1088 range->min_addr.in6;
1089 }
1090
1091 if (tb[TCA_CT_NAT_PORT_MIN]) {
1092 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1093 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1094
1095 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1096 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1097 range->min_proto.all;
1098 }
1099
1100 return 0;
1101}
1102
1103static void tcf_ct_set_key_val(struct nlattr **tb,
1104 void *val, int val_type,
1105 void *mask, int mask_type,
1106 int len)
1107{
1108 if (!tb[val_type])
1109 return;
1110 nla_memcpy(val, tb[val_type], len);
1111
1112 if (!mask)
1113 return;
1114
1115 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1116 memset(mask, 0xff, len);
1117 else
1118 nla_memcpy(mask, tb[mask_type], len);
1119}
1120
1121static int tcf_ct_fill_params(struct net *net,
1122 struct tcf_ct_params *p,
1123 struct tc_ct *parm,
1124 struct nlattr **tb,
1125 struct netlink_ext_ack *extack)
1126{
1127 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1128 struct nf_conntrack_zone zone;
1129 struct nf_conn *tmpl;
1130 int err;
1131
1132 p->zone = NF_CT_DEFAULT_ZONE_ID;
1133
1134 tcf_ct_set_key_val(tb,
1135 &p->ct_action, TCA_CT_ACTION,
1136 NULL, TCA_CT_UNSPEC,
1137 sizeof(p->ct_action));
1138
1139 if (p->ct_action & TCA_CT_ACT_CLEAR)
1140 return 0;
1141
1142 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1143 if (err)
1144 return err;
1145
1146 if (tb[TCA_CT_MARK]) {
1147 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1148 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1149 return -EOPNOTSUPP;
1150 }
1151 tcf_ct_set_key_val(tb,
1152 &p->mark, TCA_CT_MARK,
1153 &p->mark_mask, TCA_CT_MARK_MASK,
1154 sizeof(p->mark));
1155 }
1156
1157 if (tb[TCA_CT_LABELS]) {
1158 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1159 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1160 return -EOPNOTSUPP;
1161 }
1162
1163 if (!tn->labels) {
1164 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1165 return -EOPNOTSUPP;
1166 }
1167 tcf_ct_set_key_val(tb,
1168 p->labels, TCA_CT_LABELS,
1169 p->labels_mask, TCA_CT_LABELS_MASK,
1170 sizeof(p->labels));
1171 }
1172
1173 if (tb[TCA_CT_ZONE]) {
1174 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1175 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1176 return -EOPNOTSUPP;
1177 }
1178
1179 tcf_ct_set_key_val(tb,
1180 &p->zone, TCA_CT_ZONE,
1181 NULL, TCA_CT_UNSPEC,
1182 sizeof(p->zone));
1183 }
1184
1185 if (p->zone == NF_CT_DEFAULT_ZONE_ID)
1186 return 0;
1187
1188 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1189 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1190 if (!tmpl) {
1191 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1192 return -ENOMEM;
1193 }
1194 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1195 nf_conntrack_get(&tmpl->ct_general);
1196 p->tmpl = tmpl;
1197
1198 return 0;
1199}
1200
1201static int tcf_ct_init(struct net *net, struct nlattr *nla,
1202 struct nlattr *est, struct tc_action **a,
1203 int replace, int bind, bool rtnl_held,
Vlad Buslovabbb0d32019-10-30 16:09:05 +02001204 struct tcf_proto *tp, u32 flags,
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001205 struct netlink_ext_ack *extack)
1206{
1207 struct tc_action_net *tn = net_generic(net, ct_net_id);
1208 struct tcf_ct_params *params = NULL;
1209 struct nlattr *tb[TCA_CT_MAX + 1];
1210 struct tcf_chain *goto_ch = NULL;
1211 struct tc_ct *parm;
1212 struct tcf_ct *c;
1213 int err, res = 0;
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001214 u32 index;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001215
1216 if (!nla) {
1217 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1218 return -EINVAL;
1219 }
1220
1221 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1222 if (err < 0)
1223 return err;
1224
1225 if (!tb[TCA_CT_PARMS]) {
1226 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1227 return -EINVAL;
1228 }
1229 parm = nla_data(tb[TCA_CT_PARMS]);
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001230 index = parm->index;
1231 err = tcf_idr_check_alloc(tn, &index, a, bind);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001232 if (err < 0)
1233 return err;
1234
1235 if (!err) {
Vlad Buslove3822672019-10-30 16:09:06 +02001236 err = tcf_idr_create_from_flags(tn, index, est, a,
1237 &act_ct_ops, bind, flags);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001238 if (err) {
Dmytro Linkin7be8ef22019-08-01 13:02:51 +00001239 tcf_idr_cleanup(tn, index);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001240 return err;
1241 }
1242 res = ACT_P_CREATED;
1243 } else {
1244 if (bind)
1245 return 0;
1246
1247 if (!replace) {
1248 tcf_idr_release(*a, bind);
1249 return -EEXIST;
1250 }
1251 }
1252 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1253 if (err < 0)
1254 goto cleanup;
1255
1256 c = to_ct(*a);
1257
1258 params = kzalloc(sizeof(*params), GFP_KERNEL);
1259 if (unlikely(!params)) {
1260 err = -ENOMEM;
1261 goto cleanup;
1262 }
1263
1264 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1265 if (err)
1266 goto cleanup;
1267
Paul Blakeyc34b9612020-03-03 15:07:49 +02001268 err = tcf_ct_flow_table_get(params);
1269 if (err)
1270 goto cleanup;
1271
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001272 spin_lock_bh(&c->tcf_lock);
1273 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
Paul E. McKenney445d3742019-09-23 16:09:18 -07001274 params = rcu_replace_pointer(c->params, params,
1275 lockdep_is_held(&c->tcf_lock));
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001276 spin_unlock_bh(&c->tcf_lock);
1277
1278 if (goto_ch)
1279 tcf_chain_put_by_act(goto_ch);
1280 if (params)
Paul Blakeydd2af102020-03-18 12:50:33 +02001281 call_rcu(&params->rcu, tcf_ct_params_free);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001282 if (res == ACT_P_CREATED)
1283 tcf_idr_insert(tn, *a);
1284
1285 return res;
1286
1287cleanup:
1288 if (goto_ch)
1289 tcf_chain_put_by_act(goto_ch);
1290 kfree(params);
1291 tcf_idr_release(*a, bind);
1292 return err;
1293}
1294
1295static void tcf_ct_cleanup(struct tc_action *a)
1296{
1297 struct tcf_ct_params *params;
1298 struct tcf_ct *c = to_ct(a);
1299
1300 params = rcu_dereference_protected(c->params, 1);
1301 if (params)
1302 call_rcu(&params->rcu, tcf_ct_params_free);
1303}
1304
1305static int tcf_ct_dump_key_val(struct sk_buff *skb,
1306 void *val, int val_type,
1307 void *mask, int mask_type,
1308 int len)
1309{
1310 int err;
1311
1312 if (mask && !memchr_inv(mask, 0, len))
1313 return 0;
1314
1315 err = nla_put(skb, val_type, len, val);
1316 if (err)
1317 return err;
1318
1319 if (mask_type != TCA_CT_UNSPEC) {
1320 err = nla_put(skb, mask_type, len, mask);
1321 if (err)
1322 return err;
1323 }
1324
1325 return 0;
1326}
1327
1328static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1329{
1330 struct nf_nat_range2 *range = &p->range;
1331
1332 if (!(p->ct_action & TCA_CT_ACT_NAT))
1333 return 0;
1334
1335 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1336 return 0;
1337
1338 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1339 if (p->ipv4_range) {
1340 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1341 range->min_addr.ip))
1342 return -1;
1343 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1344 range->max_addr.ip))
1345 return -1;
1346 } else {
1347 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1348 &range->min_addr.in6))
1349 return -1;
1350 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1351 &range->max_addr.in6))
1352 return -1;
1353 }
1354 }
1355
1356 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1357 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1358 range->min_proto.all))
1359 return -1;
1360 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1361 range->max_proto.all))
1362 return -1;
1363 }
1364
1365 return 0;
1366}
1367
1368static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1369 int bind, int ref)
1370{
1371 unsigned char *b = skb_tail_pointer(skb);
1372 struct tcf_ct *c = to_ct(a);
1373 struct tcf_ct_params *p;
1374
1375 struct tc_ct opt = {
1376 .index = c->tcf_index,
1377 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1378 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1379 };
1380 struct tcf_t t;
1381
1382 spin_lock_bh(&c->tcf_lock);
1383 p = rcu_dereference_protected(c->params,
1384 lockdep_is_held(&c->tcf_lock));
1385 opt.action = c->tcf_action;
1386
1387 if (tcf_ct_dump_key_val(skb,
1388 &p->ct_action, TCA_CT_ACTION,
1389 NULL, TCA_CT_UNSPEC,
1390 sizeof(p->ct_action)))
1391 goto nla_put_failure;
1392
1393 if (p->ct_action & TCA_CT_ACT_CLEAR)
1394 goto skip_dump;
1395
1396 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1397 tcf_ct_dump_key_val(skb,
1398 &p->mark, TCA_CT_MARK,
1399 &p->mark_mask, TCA_CT_MARK_MASK,
1400 sizeof(p->mark)))
1401 goto nla_put_failure;
1402
1403 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1404 tcf_ct_dump_key_val(skb,
1405 p->labels, TCA_CT_LABELS,
1406 p->labels_mask, TCA_CT_LABELS_MASK,
1407 sizeof(p->labels)))
1408 goto nla_put_failure;
1409
1410 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1411 tcf_ct_dump_key_val(skb,
1412 &p->zone, TCA_CT_ZONE,
1413 NULL, TCA_CT_UNSPEC,
1414 sizeof(p->zone)))
1415 goto nla_put_failure;
1416
1417 if (tcf_ct_dump_nat(skb, p))
1418 goto nla_put_failure;
1419
1420skip_dump:
1421 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1422 goto nla_put_failure;
1423
1424 tcf_tm_dump(&t, &c->tcf_tm);
1425 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1426 goto nla_put_failure;
1427 spin_unlock_bh(&c->tcf_lock);
1428
1429 return skb->len;
1430nla_put_failure:
1431 spin_unlock_bh(&c->tcf_lock);
1432 nlmsg_trim(skb, b);
1433 return -1;
1434}
1435
1436static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1437 struct netlink_callback *cb, int type,
1438 const struct tc_action_ops *ops,
1439 struct netlink_ext_ack *extack)
1440{
1441 struct tc_action_net *tn = net_generic(net, ct_net_id);
1442
1443 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1444}
1445
1446static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1447{
1448 struct tc_action_net *tn = net_generic(net, ct_net_id);
1449
1450 return tcf_idr_search(tn, a, index);
1451}
1452
1453static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
1454 u64 lastuse, bool hw)
1455{
1456 struct tcf_ct *c = to_ct(a);
1457
Vlad Buslovc8ecebd2019-10-30 16:09:00 +02001458 tcf_action_update_stats(a, bytes, packets, false, hw);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001459 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1460}
1461
1462static struct tc_action_ops act_ct_ops = {
1463 .kind = "ct",
1464 .id = TCA_ID_CT,
1465 .owner = THIS_MODULE,
1466 .act = tcf_ct_act,
1467 .dump = tcf_ct_dump,
1468 .init = tcf_ct_init,
1469 .cleanup = tcf_ct_cleanup,
1470 .walk = tcf_ct_walker,
1471 .lookup = tcf_ct_search,
1472 .stats_update = tcf_stats_update,
1473 .size = sizeof(struct tcf_ct),
1474};
1475
1476static __net_init int ct_init_net(struct net *net)
1477{
Pankaj Bharadiyac5936422019-12-09 10:31:43 -08001478 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001479 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1480
1481 if (nf_connlabels_get(net, n_bits - 1)) {
1482 tn->labels = false;
1483 pr_err("act_ct: Failed to set connlabels length");
1484 } else {
1485 tn->labels = true;
1486 }
1487
Cong Wang981471b2019-08-25 10:01:32 -07001488 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001489}
1490
1491static void __net_exit ct_exit_net(struct list_head *net_list)
1492{
1493 struct net *net;
1494
1495 rtnl_lock();
1496 list_for_each_entry(net, net_list, exit_list) {
1497 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1498
1499 if (tn->labels)
1500 nf_connlabels_put(net);
1501 }
1502 rtnl_unlock();
1503
1504 tc_action_net_exit(net_list, ct_net_id);
1505}
1506
1507static struct pernet_operations ct_net_ops = {
1508 .init = ct_init_net,
1509 .exit_batch = ct_exit_net,
1510 .id = &ct_net_id,
1511 .size = sizeof(struct tc_ct_action_net),
1512};
1513
1514static int __init ct_init_module(void)
1515{
Paul Blakeyc34b9612020-03-03 15:07:49 +02001516 int err;
1517
1518 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1519 if (!act_ct_wq)
1520 return -ENOMEM;
1521
1522 err = tcf_ct_flow_tables_init();
1523 if (err)
1524 goto err_tbl_init;
1525
1526 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1527 if (err)
1528 goto err_register;
1529
1530 return 0;
1531
1532err_tbl_init:
1533 destroy_workqueue(act_ct_wq);
1534err_register:
1535 tcf_ct_flow_tables_uninit();
1536 return err;
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001537}
1538
1539static void __exit ct_cleanup_module(void)
1540{
1541 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
Paul Blakeyc34b9612020-03-03 15:07:49 +02001542 tcf_ct_flow_tables_uninit();
1543 destroy_workqueue(act_ct_wq);
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001544}
1545
Paul Blakeyb57dc7c2019-07-09 10:30:48 +03001546module_init(ct_init_module);
1547module_exit(ct_cleanup_module);
1548MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1549MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1550MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1551MODULE_DESCRIPTION("Connection tracking action");
1552MODULE_LICENSE("GPL v2");
1553