blob: 05249eb4508231186ba15b12a2330068b1d26b0d [file] [log] [blame]
Thomas Gleixner5b497af2019-05-29 07:18:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Joe Stringer7f8a4362015-08-26 11:31:48 -07002/*
3 * Copyright (c) 2015 Nicira, Inc.
Joe Stringer7f8a4362015-08-26 11:31:48 -07004 */
5
6#include <linux/module.h>
7#include <linux/openvswitch.h>
Jarno Rajahalme05752522016-03-10 10:54:23 -08008#include <linux/tcp.h>
9#include <linux/udp.h>
10#include <linux/sctp.h>
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -070011#include <linux/static_key.h>
Joe Stringer7f8a4362015-08-26 11:31:48 -070012#include <net/ip.h>
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -070013#include <net/genetlink.h>
Joe Stringer7f8a4362015-08-26 11:31:48 -070014#include <net/netfilter/nf_conntrack_core.h>
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -070015#include <net/netfilter/nf_conntrack_count.h>
Joe Stringercae3a262015-08-26 11:31:53 -070016#include <net/netfilter/nf_conntrack_helper.h>
Joe Stringerc2ac6672015-08-26 11:31:52 -070017#include <net/netfilter/nf_conntrack_labels.h>
Jarno Rajahalme05752522016-03-10 10:54:23 -080018#include <net/netfilter/nf_conntrack_seqadj.h>
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -070019#include <net/netfilter/nf_conntrack_timeout.h>
Joe Stringer7f8a4362015-08-26 11:31:48 -070020#include <net/netfilter/nf_conntrack_zones.h>
21#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
Florian Westphal70b095c2018-07-14 01:14:01 +020022#include <net/ipv6_frag.h>
Joe Stringer7f8a4362015-08-26 11:31:48 -070023
Florian Westphal4806e972019-03-27 09:22:26 +010024#if IS_ENABLED(CONFIG_NF_NAT)
Florian Westphald2c5c102019-02-19 17:38:27 +010025#include <net/netfilter/nf_nat.h>
Jarno Rajahalme05752522016-03-10 10:54:23 -080026#endif
27
Joe Stringer7f8a4362015-08-26 11:31:48 -070028#include "datapath.h"
29#include "conntrack.h"
30#include "flow.h"
31#include "flow_netlink.h"
32
33struct ovs_ct_len_tbl {
Jarno Rajahalme05752522016-03-10 10:54:23 -080034 int maxlen;
35 int minlen;
Joe Stringer7f8a4362015-08-26 11:31:48 -070036};
37
Joe Stringer182e3042015-08-26 11:31:49 -070038/* Metadata mark for masked write to conntrack mark */
39struct md_mark {
40 u32 value;
41 u32 mask;
42};
43
Joe Stringerc2ac6672015-08-26 11:31:52 -070044/* Metadata label for masked write to conntrack label. */
Joe Stringer33db4122015-10-01 15:00:37 -070045struct md_labels {
46 struct ovs_key_ct_labels value;
47 struct ovs_key_ct_labels mask;
Joe Stringerc2ac6672015-08-26 11:31:52 -070048};
49
Jarno Rajahalme05752522016-03-10 10:54:23 -080050enum ovs_ct_nat {
51 OVS_CT_NAT = 1 << 0, /* NAT for committed connections only. */
52 OVS_CT_SRC_NAT = 1 << 1, /* Source NAT for NEW connections. */
53 OVS_CT_DST_NAT = 1 << 2, /* Destination NAT for NEW connections. */
54};
55
Joe Stringer7f8a4362015-08-26 11:31:48 -070056/* Conntrack action context for execution. */
57struct ovs_conntrack_info {
Joe Stringercae3a262015-08-26 11:31:53 -070058 struct nf_conntrack_helper *helper;
Joe Stringer7f8a4362015-08-26 11:31:48 -070059 struct nf_conntrack_zone zone;
60 struct nf_conn *ct;
Joe Stringerab38a7b2015-10-06 11:00:01 -070061 u8 commit : 1;
Jarno Rajahalme05752522016-03-10 10:54:23 -080062 u8 nat : 3; /* enum ovs_ct_nat */
Jarno Rajahalmedd41d332017-02-09 11:22:00 -080063 u8 force : 1;
Jarno Rajahalme12064552017-04-21 16:48:06 -070064 u8 have_eventmask : 1;
Joe Stringer7f8a4362015-08-26 11:31:48 -070065 u16 family;
Jarno Rajahalme12064552017-04-21 16:48:06 -070066 u32 eventmask; /* Mask of 1 << IPCT_*. */
Joe Stringer182e3042015-08-26 11:31:49 -070067 struct md_mark mark;
Joe Stringer33db4122015-10-01 15:00:37 -070068 struct md_labels labels;
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -070069 char timeout[CTNL_TIMEOUT_NAME_MAX];
Yi-Hung Wei71778952019-08-22 13:17:50 -070070 struct nf_ct_timeout *nf_ct_timeout;
Florian Westphal4806e972019-03-27 09:22:26 +010071#if IS_ENABLED(CONFIG_NF_NAT)
Thierry Du Tre2eb0f622018-04-04 15:38:22 +020072 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
Jarno Rajahalme05752522016-03-10 10:54:23 -080073#endif
Joe Stringer7f8a4362015-08-26 11:31:48 -070074};
75
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -070076#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
77#define OVS_CT_LIMIT_UNLIMITED 0
78#define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED
79#define CT_LIMIT_HASH_BUCKETS 512
80static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled);
81
82struct ovs_ct_limit {
83 /* Elements in ovs_ct_limit_info->limits hash table */
84 struct hlist_node hlist_node;
85 struct rcu_head rcu;
86 u16 zone;
87 u32 limit;
88};
89
90struct ovs_ct_limit_info {
91 u32 default_limit;
92 struct hlist_head *limits;
93 struct nf_conncount_data *data;
94};
95
96static const struct nla_policy ct_limit_policy[OVS_CT_LIMIT_ATTR_MAX + 1] = {
97 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NLA_NESTED, },
98};
99#endif
100
Jarno Rajahalme09aa98a2017-02-09 11:21:58 -0800101static bool labels_nonzero(const struct ovs_key_ct_labels *labels);
102
Joe Stringer2f3ab9f2015-12-09 14:07:39 -0800103static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
104
Joe Stringer7f8a4362015-08-26 11:31:48 -0700105static u16 key_to_nfproto(const struct sw_flow_key *key)
106{
107 switch (ntohs(key->eth.type)) {
108 case ETH_P_IP:
109 return NFPROTO_IPV4;
110 case ETH_P_IPV6:
111 return NFPROTO_IPV6;
112 default:
113 return NFPROTO_UNSPEC;
114 }
115}
116
117/* Map SKB connection state into the values used by flow definition. */
118static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
119{
120 u8 ct_state = OVS_CS_F_TRACKED;
121
122 switch (ctinfo) {
123 case IP_CT_ESTABLISHED_REPLY:
124 case IP_CT_RELATED_REPLY:
Joe Stringer7f8a4362015-08-26 11:31:48 -0700125 ct_state |= OVS_CS_F_REPLY_DIR;
126 break;
127 default:
128 break;
129 }
130
131 switch (ctinfo) {
132 case IP_CT_ESTABLISHED:
133 case IP_CT_ESTABLISHED_REPLY:
134 ct_state |= OVS_CS_F_ESTABLISHED;
135 break;
136 case IP_CT_RELATED:
137 case IP_CT_RELATED_REPLY:
138 ct_state |= OVS_CS_F_RELATED;
139 break;
140 case IP_CT_NEW:
Joe Stringer7f8a4362015-08-26 11:31:48 -0700141 ct_state |= OVS_CS_F_NEW;
142 break;
143 default:
144 break;
145 }
146
147 return ct_state;
148}
149
Joe Stringer0d5cdef2015-08-28 19:22:11 -0700150static u32 ovs_ct_get_mark(const struct nf_conn *ct)
151{
152#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
153 return ct ? ct->mark : 0;
154#else
155 return 0;
156#endif
157}
158
Jarno Rajahalmeb87cec32017-02-09 11:21:56 -0800159/* Guard against conntrack labels max size shrinking below 128 bits. */
160#if NF_CT_LABELS_MAX_SIZE < 16
161#error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes
162#endif
163
Joe Stringer33db4122015-10-01 15:00:37 -0700164static void ovs_ct_get_labels(const struct nf_conn *ct,
165 struct ovs_key_ct_labels *labels)
Joe Stringerc2ac6672015-08-26 11:31:52 -0700166{
167 struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
168
Jarno Rajahalmeb87cec32017-02-09 11:21:56 -0800169 if (cl)
170 memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
171 else
Joe Stringer33db4122015-10-01 15:00:37 -0700172 memset(labels, 0, OVS_CT_LABELS_LEN);
Joe Stringerc2ac6672015-08-26 11:31:52 -0700173}
174
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800175static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key,
176 const struct nf_conntrack_tuple *orig,
177 u8 icmp_proto)
178{
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800179 key->ct_orig_proto = orig->dst.protonum;
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800180 if (orig->dst.protonum == icmp_proto) {
181 key->ct.orig_tp.src = htons(orig->dst.u.icmp.type);
182 key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code);
183 } else {
184 key->ct.orig_tp.src = orig->src.u.all;
185 key->ct.orig_tp.dst = orig->dst.u.all;
186 }
187}
188
Joe Stringer7f8a4362015-08-26 11:31:48 -0700189static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
Joe Stringer182e3042015-08-26 11:31:49 -0700190 const struct nf_conntrack_zone *zone,
191 const struct nf_conn *ct)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700192{
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800193 key->ct_state = state;
194 key->ct_zone = zone->id;
Joe Stringer0d5cdef2015-08-28 19:22:11 -0700195 key->ct.mark = ovs_ct_get_mark(ct);
Joe Stringer33db4122015-10-01 15:00:37 -0700196 ovs_ct_get_labels(ct, &key->ct.labels);
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800197
198 if (ct) {
199 const struct nf_conntrack_tuple *orig;
200
201 /* Use the master if we have one. */
202 if (ct->master)
203 ct = ct->master;
204 orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
205
206 /* IP version must match with the master connection. */
207 if (key->eth.type == htons(ETH_P_IP) &&
208 nf_ct_l3num(ct) == NFPROTO_IPV4) {
209 key->ipv4.ct_orig.src = orig->src.u3.ip;
210 key->ipv4.ct_orig.dst = orig->dst.u3.ip;
211 __ovs_ct_update_key_orig_tp(key, orig, IPPROTO_ICMP);
212 return;
213 } else if (key->eth.type == htons(ETH_P_IPV6) &&
214 !sw_flow_key_is_nd(key) &&
215 nf_ct_l3num(ct) == NFPROTO_IPV6) {
216 key->ipv6.ct_orig.src = orig->src.u3.in6;
217 key->ipv6.ct_orig.dst = orig->dst.u3.in6;
218 __ovs_ct_update_key_orig_tp(key, orig, NEXTHDR_ICMP);
219 return;
220 }
221 }
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800222 /* Clear 'ct_orig_proto' to mark the non-existence of conntrack
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800223 * original direction key fields.
224 */
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800225 key->ct_orig_proto = 0;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700226}
227
Jarno Rajahalme5e17da62017-02-09 11:21:52 -0800228/* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has
Jarno Rajahalme05752522016-03-10 10:54:23 -0800229 * previously sent the packet to conntrack via the ct action. If
230 * 'keep_nat_flags' is true, the existing NAT flags retained, else they are
231 * initialized from the connection status.
Joe Stringer7f8a4362015-08-26 11:31:48 -0700232 */
233static void ovs_ct_update_key(const struct sk_buff *skb,
Joe Stringerd1109862015-12-09 14:07:40 -0800234 const struct ovs_conntrack_info *info,
Jarno Rajahalme05752522016-03-10 10:54:23 -0800235 struct sw_flow_key *key, bool post_ct,
236 bool keep_nat_flags)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700237{
238 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
239 enum ip_conntrack_info ctinfo;
240 struct nf_conn *ct;
241 u8 state = 0;
242
243 ct = nf_ct_get(skb, &ctinfo);
244 if (ct) {
245 state = ovs_ct_get_state(ctinfo);
Jarno Rajahalme9f13ded2016-03-10 10:54:18 -0800246 /* All unconfirmed entries are NEW connections. */
Joe Stringer4f0909e2015-10-19 19:18:59 -0700247 if (!nf_ct_is_confirmed(ct))
248 state |= OVS_CS_F_NEW;
Jarno Rajahalme9f13ded2016-03-10 10:54:18 -0800249 /* OVS persists the related flag for the duration of the
250 * connection.
251 */
Joe Stringer7f8a4362015-08-26 11:31:48 -0700252 if (ct->master)
253 state |= OVS_CS_F_RELATED;
Jarno Rajahalme05752522016-03-10 10:54:23 -0800254 if (keep_nat_flags) {
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800255 state |= key->ct_state & OVS_CS_F_NAT_MASK;
Jarno Rajahalme05752522016-03-10 10:54:23 -0800256 } else {
257 if (ct->status & IPS_SRC_NAT)
258 state |= OVS_CS_F_SRC_NAT;
259 if (ct->status & IPS_DST_NAT)
260 state |= OVS_CS_F_DST_NAT;
261 }
Joe Stringer7f8a4362015-08-26 11:31:48 -0700262 zone = nf_ct_zone(ct);
263 } else if (post_ct) {
264 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
Joe Stringerd1109862015-12-09 14:07:40 -0800265 if (info)
266 zone = &info->zone;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700267 }
Joe Stringer182e3042015-08-26 11:31:49 -0700268 __ovs_ct_update_key(key, state, zone, ct);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700269}
270
Jarno Rajahalme9f13ded2016-03-10 10:54:18 -0800271/* This is called to initialize CT key fields possibly coming in from the local
272 * stack.
273 */
Joe Stringer7f8a4362015-08-26 11:31:48 -0700274void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
275{
Jarno Rajahalme05752522016-03-10 10:54:23 -0800276 ovs_ct_update_key(skb, NULL, key, false, false);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700277}
278
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800279#define IN6_ADDR_INITIALIZER(ADDR) \
280 { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \
281 (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] }
282
283int ovs_ct_put_key(const struct sw_flow_key *swkey,
284 const struct sw_flow_key *output, struct sk_buff *skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700285{
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800286 if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state))
Joe Stringer7f8a4362015-08-26 11:31:48 -0700287 return -EMSGSIZE;
288
289 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800290 nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone))
Joe Stringer7f8a4362015-08-26 11:31:48 -0700291 return -EMSGSIZE;
292
Joe Stringer182e3042015-08-26 11:31:49 -0700293 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800294 nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark))
Joe Stringer182e3042015-08-26 11:31:49 -0700295 return -EMSGSIZE;
296
Valentin Rothberg9723e6a2015-08-28 10:39:56 +0200297 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800298 nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels),
299 &output->ct.labels))
Joe Stringerc2ac6672015-08-26 11:31:52 -0700300 return -EMSGSIZE;
301
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800302 if (swkey->ct_orig_proto) {
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800303 if (swkey->eth.type == htons(ETH_P_IP)) {
304 struct ovs_key_ct_tuple_ipv4 orig = {
305 output->ipv4.ct_orig.src,
306 output->ipv4.ct_orig.dst,
307 output->ct.orig_tp.src,
308 output->ct.orig_tp.dst,
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800309 output->ct_orig_proto,
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800310 };
311 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
312 sizeof(orig), &orig))
313 return -EMSGSIZE;
314 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
315 struct ovs_key_ct_tuple_ipv6 orig = {
316 IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src),
317 IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst),
318 output->ct.orig_tp.src,
319 output->ct.orig_tp.dst,
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800320 output->ct_orig_proto,
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800321 };
322 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
323 sizeof(orig), &orig))
324 return -EMSGSIZE;
325 }
326 }
327
Joe Stringer182e3042015-08-26 11:31:49 -0700328 return 0;
329}
330
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -0800331static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
Joe Stringer182e3042015-08-26 11:31:49 -0700332 u32 ct_mark, u32 mask)
333{
Joe Stringer0d5cdef2015-08-28 19:22:11 -0700334#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
Joe Stringer182e3042015-08-26 11:31:49 -0700335 u32 new_mark;
336
Joe Stringer182e3042015-08-26 11:31:49 -0700337 new_mark = ct_mark | (ct->mark & ~(mask));
338 if (ct->mark != new_mark) {
339 ct->mark = new_mark;
Jarno Rajahalme193e3092017-02-09 11:21:54 -0800340 if (nf_ct_is_confirmed(ct))
341 nf_conntrack_event_cache(IPCT_MARK, ct);
Joe Stringer182e3042015-08-26 11:31:49 -0700342 key->ct.mark = new_mark;
343 }
344
Joe Stringer7f8a4362015-08-26 11:31:48 -0700345 return 0;
Joe Stringer0d5cdef2015-08-28 19:22:11 -0700346#else
347 return -ENOTSUPP;
348#endif
Joe Stringer7f8a4362015-08-26 11:31:48 -0700349}
350
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -0800351static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct)
Joe Stringerc2ac6672015-08-26 11:31:52 -0700352{
Joe Stringerc2ac6672015-08-26 11:31:52 -0700353 struct nf_conn_labels *cl;
Joe Stringerc2ac6672015-08-26 11:31:52 -0700354
355 cl = nf_ct_labels_find(ct);
356 if (!cl) {
357 nf_ct_labels_ext_add(ct);
358 cl = nf_ct_labels_find(ct);
359 }
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -0800360
361 return cl;
362}
363
364/* Initialize labels for a new, yet to be committed conntrack entry. Note that
365 * since the new connection is not yet confirmed, and thus no-one else has
Jarno Rajahalme2317c6b2017-02-17 18:11:58 -0800366 * access to it's labels, we simply write them over.
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -0800367 */
368static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key,
369 const struct ovs_key_ct_labels *labels,
370 const struct ovs_key_ct_labels *mask)
371{
Jarno Rajahalme09aa98a2017-02-09 11:21:58 -0800372 struct nf_conn_labels *cl, *master_cl;
373 bool have_mask = labels_nonzero(mask);
374
375 /* Inherit master's labels to the related connection? */
376 master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL;
377
378 if (!master_cl && !have_mask)
379 return 0; /* Nothing to do. */
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -0800380
381 cl = ovs_ct_get_conn_labels(ct);
Jarno Rajahalmeb87cec32017-02-09 11:21:56 -0800382 if (!cl)
Joe Stringerc2ac6672015-08-26 11:31:52 -0700383 return -ENOSPC;
384
Jarno Rajahalme09aa98a2017-02-09 11:21:58 -0800385 /* Inherit the master's labels, if any. */
386 if (master_cl)
387 *cl = *master_cl;
388
389 if (have_mask) {
390 u32 *dst = (u32 *)cl->bits;
391 int i;
392
393 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
394 dst[i] = (dst[i] & ~mask->ct_labels_32[i]) |
395 (labels->ct_labels_32[i]
396 & mask->ct_labels_32[i]);
397 }
Jarno Rajahalme193e3092017-02-09 11:21:54 -0800398
Jarno Rajahalme2317c6b2017-02-17 18:11:58 -0800399 /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the
Jarno Rajahalmeabd0a4f2017-04-21 16:48:05 -0700400 * IPCT_LABEL bit is set in the event cache.
Jarno Rajahalme2317c6b2017-02-17 18:11:58 -0800401 */
402 nf_conntrack_event_cache(IPCT_LABEL, ct);
403
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -0800404 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
Joe Stringerc2ac6672015-08-26 11:31:52 -0700405
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -0800406 return 0;
407}
408
409static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key,
410 const struct ovs_key_ct_labels *labels,
411 const struct ovs_key_ct_labels *mask)
412{
413 struct nf_conn_labels *cl;
414 int err;
415
416 cl = ovs_ct_get_conn_labels(ct);
417 if (!cl)
418 return -ENOSPC;
419
420 err = nf_connlabels_replace(ct, labels->ct_labels_32,
421 mask->ct_labels_32,
422 OVS_CT_LABELS_LEN_32);
423 if (err)
424 return err;
425
426 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
427
Joe Stringerc2ac6672015-08-26 11:31:52 -0700428 return 0;
429}
430
Joe Stringercae3a262015-08-26 11:31:53 -0700431/* 'skb' should already be pulled to nh_ofs. */
432static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
433{
434 const struct nf_conntrack_helper *helper;
435 const struct nf_conn_help *help;
436 enum ip_conntrack_info ctinfo;
437 unsigned int protoff;
438 struct nf_conn *ct;
Jarno Rajahalme05752522016-03-10 10:54:23 -0800439 int err;
Joe Stringercae3a262015-08-26 11:31:53 -0700440
441 ct = nf_ct_get(skb, &ctinfo);
442 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
443 return NF_ACCEPT;
444
445 help = nfct_help(ct);
446 if (!help)
447 return NF_ACCEPT;
448
449 helper = rcu_dereference(help->helper);
450 if (!helper)
451 return NF_ACCEPT;
452
453 switch (proto) {
454 case NFPROTO_IPV4:
455 protoff = ip_hdrlen(skb);
456 break;
457 case NFPROTO_IPV6: {
458 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
459 __be16 frag_off;
Joe Stringercc570602015-09-14 11:14:50 -0700460 int ofs;
Joe Stringercae3a262015-08-26 11:31:53 -0700461
Joe Stringercc570602015-09-14 11:14:50 -0700462 ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
463 &frag_off);
464 if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
Joe Stringercae3a262015-08-26 11:31:53 -0700465 pr_debug("proto header not found\n");
466 return NF_ACCEPT;
467 }
Joe Stringercc570602015-09-14 11:14:50 -0700468 protoff = ofs;
Joe Stringercae3a262015-08-26 11:31:53 -0700469 break;
470 }
471 default:
472 WARN_ONCE(1, "helper invoked on non-IP family!");
473 return NF_DROP;
474 }
475
Jarno Rajahalme05752522016-03-10 10:54:23 -0800476 err = helper->help(skb, protoff, ct, ctinfo);
477 if (err != NF_ACCEPT)
478 return err;
479
480 /* Adjust seqs after helper. This is needed due to some helpers (e.g.,
481 * FTP with NAT) adusting the TCP payload size when mangling IP
482 * addresses and/or port numbers in the text-based control connection.
483 */
484 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
485 !nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
486 return NF_DROP;
487 return NF_ACCEPT;
Joe Stringercae3a262015-08-26 11:31:53 -0700488}
489
Joe Stringer74c16612015-10-25 20:21:48 -0700490/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
491 * value if 'skb' is freed.
492 */
Joe Stringer7f8a4362015-08-26 11:31:48 -0700493static int handle_fragments(struct net *net, struct sw_flow_key *key,
494 u16 zone, struct sk_buff *skb)
495{
496 struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
Florian Westphaldaaa7d62015-11-18 23:32:40 +0100497 int err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700498
499 if (key->eth.type == htons(ETH_P_IP)) {
500 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700501
502 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
Eric W. Biederman19bcf9f2015-10-09 13:44:54 -0500503 err = ip_defrag(net, skb, user);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700504 if (err)
505 return err;
506
507 ovs_cb.mru = IPCB(skb)->frag_max_size;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700508#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
Joe Stringer74c16612015-10-25 20:21:48 -0700509 } else if (key->eth.type == htons(ETH_P_IPV6)) {
Joe Stringer7f8a4362015-08-26 11:31:48 -0700510 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700511
512 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
Florian Westphaldaaa7d62015-11-18 23:32:40 +0100513 err = nf_ct_frag6_gather(net, skb, user);
Daniele Di Proiettof92a80a2016-11-28 15:43:53 -0800514 if (err) {
515 if (err != -EINPROGRESS)
516 kfree_skb(skb);
Florian Westphaldaaa7d62015-11-18 23:32:40 +0100517 return err;
Daniele Di Proiettof92a80a2016-11-28 15:43:53 -0800518 }
Joe Stringer7f8a4362015-08-26 11:31:48 -0700519
Florian Westphaldaaa7d62015-11-18 23:32:40 +0100520 key->ip.proto = ipv6_hdr(skb)->nexthdr;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700521 ovs_cb.mru = IP6CB(skb)->frag_max_size;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700522#endif
523 } else {
Joe Stringer74c16612015-10-25 20:21:48 -0700524 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700525 return -EPFNOSUPPORT;
526 }
527
Greg Rosead06a562019-08-27 07:58:09 -0700528 /* The key extracted from the fragment that completed this datagram
529 * likely didn't have an L4 header, so regenerate it.
530 */
531 ovs_flow_key_update_l3l4(skb, key);
532
Joe Stringer7f8a4362015-08-26 11:31:48 -0700533 key->ip.frag = OVS_FRAG_TYPE_NONE;
534 skb_clear_hash(skb);
535 skb->ignore_df = 1;
536 *OVS_CB(skb) = ovs_cb;
537
538 return 0;
539}
540
541static struct nf_conntrack_expect *
542ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
543 u16 proto, const struct sk_buff *skb)
544{
545 struct nf_conntrack_tuple tuple;
Jarno Rajahalmecf5d7092017-04-14 14:26:38 -0700546 struct nf_conntrack_expect *exp;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700547
Eric W. Biedermana31f1ad2015-09-18 14:33:04 -0500548 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
Joe Stringer7f8a4362015-08-26 11:31:48 -0700549 return NULL;
Jarno Rajahalmecf5d7092017-04-14 14:26:38 -0700550
551 exp = __nf_ct_expect_find(net, zone, &tuple);
552 if (exp) {
553 struct nf_conntrack_tuple_hash *h;
554
555 /* Delete existing conntrack entry, if it clashes with the
556 * expectation. This can happen since conntrack ALGs do not
557 * check for clashes between (new) expectations and existing
558 * conntrack entries. nf_conntrack_in() will check the
559 * expectations only if a conntrack entry can not be found,
560 * which can lead to OVS finding the expectation (here) in the
561 * init direction, but which will not be removed by the
562 * nf_conntrack_in() call, if a matching conntrack entry is
563 * found instead. In this case all init direction packets
564 * would be reported as new related packets, while reply
565 * direction packets would be reported as un-related
566 * established packets.
567 */
568 h = nf_conntrack_find_get(net, zone, &tuple);
569 if (h) {
570 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
571
572 nf_ct_delete(ct, 0, 0);
573 nf_conntrack_put(&ct->ct_general);
574 }
575 }
576
577 return exp;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700578}
579
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800580/* This replicates logic from nf_conntrack_core.c that is not exported. */
581static enum ip_conntrack_info
582ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h)
583{
584 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
585
586 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
587 return IP_CT_ESTABLISHED_REPLY;
588 /* Once we've had two way comms, always ESTABLISHED. */
589 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
590 return IP_CT_ESTABLISHED;
591 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
592 return IP_CT_RELATED;
593 return IP_CT_NEW;
594}
595
596/* Find an existing connection which this packet belongs to without
597 * re-attributing statistics or modifying the connection state. This allows an
Jarno Rajahalme5e17da62017-02-09 11:21:52 -0800598 * skb->_nfct lost due to an upcall to be recovered during actions execution.
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800599 *
600 * Must be called with rcu_read_lock.
601 *
Jarno Rajahalme5e17da62017-02-09 11:21:52 -0800602 * On success, populates skb->_nfct and returns the connection. Returns NULL
603 * if there is no existing entry.
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800604 */
605static struct nf_conn *
606ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
Jarno Rajahalme9ff464d2017-02-09 11:21:53 -0800607 u8 l3num, struct sk_buff *skb, bool natted)
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800608{
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800609 struct nf_conntrack_tuple tuple;
610 struct nf_conntrack_tuple_hash *h;
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800611 struct nf_conn *ct;
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800612
Florian Westphal60e3be92018-06-25 17:55:32 +0200613 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num,
614 net, &tuple)) {
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800615 pr_debug("ovs_ct_find_existing: Can't get tuple\n");
616 return NULL;
617 }
618
Jarno Rajahalme9ff464d2017-02-09 11:21:53 -0800619 /* Must invert the tuple if skb has been transformed by NAT. */
620 if (natted) {
621 struct nf_conntrack_tuple inverse;
622
Florian Westphal303e0c52019-01-15 22:03:42 +0100623 if (!nf_ct_invert_tuple(&inverse, &tuple)) {
Jarno Rajahalme9ff464d2017-02-09 11:21:53 -0800624 pr_debug("ovs_ct_find_existing: Inversion failed!\n");
625 return NULL;
626 }
627 tuple = inverse;
628 }
629
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800630 /* look for tuple match */
631 h = nf_conntrack_find_get(net, zone, &tuple);
632 if (!h)
633 return NULL; /* Not found. */
634
635 ct = nf_ct_tuplehash_to_ctrack(h);
636
Jarno Rajahalme9ff464d2017-02-09 11:21:53 -0800637 /* Inverted packet tuple matches the reverse direction conntrack tuple,
638 * select the other tuplehash to get the right 'ctinfo' bits for this
639 * packet.
640 */
641 if (natted)
642 h = &ct->tuplehash[!h->tuple.dst.dir];
643
Florian Westphalc74454f2017-01-23 18:21:57 +0100644 nf_ct_set(skb, ct, ovs_ct_get_info(h));
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800645 return ct;
646}
647
Greg Rose8b97ac52017-07-14 12:42:49 -0700648static
649struct nf_conn *ovs_ct_executed(struct net *net,
650 const struct sw_flow_key *key,
651 const struct ovs_conntrack_info *info,
652 struct sk_buff *skb,
653 bool *ct_executed)
654{
655 struct nf_conn *ct = NULL;
656
657 /* If no ct, check if we have evidence that an existing conntrack entry
658 * might be found for this skb. This happens when we lose a skb->_nfct
659 * due to an upcall, or if the direction is being forced. If the
660 * connection was not confirmed, it is not cached and needs to be run
661 * through conntrack again.
662 */
663 *ct_executed = (key->ct_state & OVS_CS_F_TRACKED) &&
664 !(key->ct_state & OVS_CS_F_INVALID) &&
665 (key->ct_zone == info->zone.id);
666
667 if (*ct_executed || (!key->ct_state && info->force)) {
668 ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
669 !!(key->ct_state &
670 OVS_CS_F_NAT_MASK));
671 }
672
673 return ct;
674}
675
Jarno Rajahalme5e17da62017-02-09 11:21:52 -0800676/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
Jarno Rajahalme289f2252016-03-10 10:54:20 -0800677static bool skb_nfct_cached(struct net *net,
678 const struct sw_flow_key *key,
679 const struct ovs_conntrack_info *info,
680 struct sk_buff *skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700681{
682 enum ip_conntrack_info ctinfo;
683 struct nf_conn *ct;
Greg Rose8b97ac52017-07-14 12:42:49 -0700684 bool ct_executed = true;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700685
686 ct = nf_ct_get(skb, &ctinfo);
687 if (!ct)
Greg Rose8b97ac52017-07-14 12:42:49 -0700688 ct = ovs_ct_executed(net, key, info, skb, &ct_executed);
689
690 if (ct)
691 nf_ct_get(skb, &ctinfo);
692 else
Joe Stringer7f8a4362015-08-26 11:31:48 -0700693 return false;
Greg Rose8b97ac52017-07-14 12:42:49 -0700694
Joe Stringer7f8a4362015-08-26 11:31:48 -0700695 if (!net_eq(net, read_pnet(&ct->ct_net)))
696 return false;
697 if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
698 return false;
Joe Stringercae3a262015-08-26 11:31:53 -0700699 if (info->helper) {
700 struct nf_conn_help *help;
701
702 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
703 if (help && rcu_access_pointer(help->helper) != info->helper)
704 return false;
705 }
Yi-Hung Wei71778952019-08-22 13:17:50 -0700706 if (info->nf_ct_timeout) {
707 struct nf_conn_timeout *timeout_ext;
708
709 timeout_ext = nf_ct_timeout_find(ct);
710 if (!timeout_ext || info->nf_ct_timeout !=
711 rcu_dereference(timeout_ext->timeout))
712 return false;
713 }
Jarno Rajahalmedd41d332017-02-09 11:22:00 -0800714 /* Force conntrack entry direction to the current packet? */
715 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
716 /* Delete the conntrack entry if confirmed, else just release
717 * the reference.
718 */
719 if (nf_ct_is_confirmed(ct))
720 nf_ct_delete(ct, 0, 0);
Jarno Rajahalmeb768b162017-03-28 11:25:26 -0700721
722 nf_conntrack_put(&ct->ct_general);
Jarno Rajahalmedd41d332017-02-09 11:22:00 -0800723 nf_ct_set(skb, NULL, 0);
724 return false;
725 }
Joe Stringer7f8a4362015-08-26 11:31:48 -0700726
Greg Rose8b97ac52017-07-14 12:42:49 -0700727 return ct_executed;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700728}
729
Florian Westphal4806e972019-03-27 09:22:26 +0100730#if IS_ENABLED(CONFIG_NF_NAT)
Jarno Rajahalme05752522016-03-10 10:54:23 -0800731/* Modelled after nf_nat_ipv[46]_fn().
732 * range is only used for new, uninitialized NAT state.
733 * Returns either NF_ACCEPT or NF_DROP.
734 */
735static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
736 enum ip_conntrack_info ctinfo,
Thierry Du Tre2eb0f622018-04-04 15:38:22 +0200737 const struct nf_nat_range2 *range,
Jarno Rajahalme05752522016-03-10 10:54:23 -0800738 enum nf_nat_manip_type maniptype)
739{
740 int hooknum, nh_off, err = NF_ACCEPT;
741
742 nh_off = skb_network_offset(skb);
Lance Richardson75f01a42017-01-12 19:33:18 -0500743 skb_pull_rcsum(skb, nh_off);
Jarno Rajahalme05752522016-03-10 10:54:23 -0800744
745 /* See HOOK2MANIP(). */
746 if (maniptype == NF_NAT_MANIP_SRC)
747 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
748 else
749 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
750
751 switch (ctinfo) {
752 case IP_CT_RELATED:
753 case IP_CT_RELATED_REPLY:
Florian Westphal3bf195a2019-02-19 17:38:21 +0100754 if (IS_ENABLED(CONFIG_NF_NAT) &&
Arnd Bergmann99b72482016-03-18 14:33:45 +0100755 skb->protocol == htons(ETH_P_IP) &&
Jarno Rajahalme05752522016-03-10 10:54:23 -0800756 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
757 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
758 hooknum))
759 err = NF_DROP;
760 goto push;
Florian Westphal3bf195a2019-02-19 17:38:21 +0100761 } else if (IS_ENABLED(CONFIG_IPV6) &&
Arnd Bergmann99b72482016-03-18 14:33:45 +0100762 skb->protocol == htons(ETH_P_IPV6)) {
Jarno Rajahalme05752522016-03-10 10:54:23 -0800763 __be16 frag_off;
764 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
765 int hdrlen = ipv6_skip_exthdr(skb,
766 sizeof(struct ipv6hdr),
767 &nexthdr, &frag_off);
768
769 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
770 if (!nf_nat_icmpv6_reply_translation(skb, ct,
771 ctinfo,
772 hooknum,
773 hdrlen))
774 err = NF_DROP;
775 goto push;
776 }
Jarno Rajahalme05752522016-03-10 10:54:23 -0800777 }
778 /* Non-ICMP, fall thru to initialize if needed. */
Gustavo A. R. Silva279badc2017-10-19 12:55:03 -0500779 /* fall through */
Jarno Rajahalme05752522016-03-10 10:54:23 -0800780 case IP_CT_NEW:
781 /* Seen it before? This can happen for loopback, retrans,
782 * or local packets.
783 */
784 if (!nf_nat_initialized(ct, maniptype)) {
785 /* Initialize according to the NAT action. */
786 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
787 /* Action is set up to establish a new
788 * mapping.
789 */
790 ? nf_nat_setup_info(ct, range, maniptype)
791 : nf_nat_alloc_null_binding(ct, hooknum);
792 if (err != NF_ACCEPT)
793 goto push;
794 }
795 break;
796
797 case IP_CT_ESTABLISHED:
798 case IP_CT_ESTABLISHED_REPLY:
799 break;
800
801 default:
802 err = NF_DROP;
803 goto push;
804 }
805
806 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
807push:
808 skb_push(skb, nh_off);
Lance Richardson75f01a42017-01-12 19:33:18 -0500809 skb_postpush_rcsum(skb, skb->data, nh_off);
Jarno Rajahalme05752522016-03-10 10:54:23 -0800810
811 return err;
812}
813
814static void ovs_nat_update_key(struct sw_flow_key *key,
815 const struct sk_buff *skb,
816 enum nf_nat_manip_type maniptype)
817{
818 if (maniptype == NF_NAT_MANIP_SRC) {
819 __be16 src;
820
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800821 key->ct_state |= OVS_CS_F_SRC_NAT;
Jarno Rajahalme05752522016-03-10 10:54:23 -0800822 if (key->eth.type == htons(ETH_P_IP))
823 key->ipv4.addr.src = ip_hdr(skb)->saddr;
824 else if (key->eth.type == htons(ETH_P_IPV6))
825 memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr,
826 sizeof(key->ipv6.addr.src));
827 else
828 return;
829
830 if (key->ip.proto == IPPROTO_UDP)
831 src = udp_hdr(skb)->source;
832 else if (key->ip.proto == IPPROTO_TCP)
833 src = tcp_hdr(skb)->source;
834 else if (key->ip.proto == IPPROTO_SCTP)
835 src = sctp_hdr(skb)->source;
836 else
837 return;
838
839 key->tp.src = src;
840 } else {
841 __be16 dst;
842
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800843 key->ct_state |= OVS_CS_F_DST_NAT;
Jarno Rajahalme05752522016-03-10 10:54:23 -0800844 if (key->eth.type == htons(ETH_P_IP))
845 key->ipv4.addr.dst = ip_hdr(skb)->daddr;
846 else if (key->eth.type == htons(ETH_P_IPV6))
847 memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr,
848 sizeof(key->ipv6.addr.dst));
849 else
850 return;
851
852 if (key->ip.proto == IPPROTO_UDP)
853 dst = udp_hdr(skb)->dest;
854 else if (key->ip.proto == IPPROTO_TCP)
855 dst = tcp_hdr(skb)->dest;
856 else if (key->ip.proto == IPPROTO_SCTP)
857 dst = sctp_hdr(skb)->dest;
858 else
859 return;
860
861 key->tp.dst = dst;
862 }
863}
864
865/* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */
866static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
867 const struct ovs_conntrack_info *info,
868 struct sk_buff *skb, struct nf_conn *ct,
869 enum ip_conntrack_info ctinfo)
870{
871 enum nf_nat_manip_type maniptype;
872 int err;
873
Jarno Rajahalme05752522016-03-10 10:54:23 -0800874 /* Add NAT extension if not confirmed yet. */
875 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
876 return NF_ACCEPT; /* Can't NAT. */
877
878 /* Determine NAT type.
879 * Check if the NAT type can be deduced from the tracked connection.
Jarno Rajahalme5745b0b2016-03-21 11:15:19 -0700880 * Make sure new expected connections (IP_CT_RELATED) are NATted only
881 * when committing.
Jarno Rajahalme05752522016-03-10 10:54:23 -0800882 */
883 if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW &&
884 ct->status & IPS_NAT_MASK &&
Jarno Rajahalme5745b0b2016-03-21 11:15:19 -0700885 (ctinfo != IP_CT_RELATED || info->commit)) {
Jarno Rajahalme05752522016-03-10 10:54:23 -0800886 /* NAT an established or related connection like before. */
887 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
888 /* This is the REPLY direction for a connection
889 * for which NAT was applied in the forward
890 * direction. Do the reverse NAT.
891 */
892 maniptype = ct->status & IPS_SRC_NAT
893 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
894 else
895 maniptype = ct->status & IPS_SRC_NAT
896 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
897 } else if (info->nat & OVS_CT_SRC_NAT) {
898 maniptype = NF_NAT_MANIP_SRC;
899 } else if (info->nat & OVS_CT_DST_NAT) {
900 maniptype = NF_NAT_MANIP_DST;
901 } else {
902 return NF_ACCEPT; /* Connection is not NATed. */
903 }
904 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
905
906 /* Mark NAT done if successful and update the flow key. */
907 if (err == NF_ACCEPT)
908 ovs_nat_update_key(key, skb, maniptype);
909
910 return err;
911}
Florian Westphal4806e972019-03-27 09:22:26 +0100912#else /* !CONFIG_NF_NAT */
Jarno Rajahalme05752522016-03-10 10:54:23 -0800913static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
914 const struct ovs_conntrack_info *info,
915 struct sk_buff *skb, struct nf_conn *ct,
916 enum ip_conntrack_info ctinfo)
917{
918 return NF_ACCEPT;
919}
920#endif
921
Jarno Rajahalme9f13ded2016-03-10 10:54:18 -0800922/* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
Jarno Rajahalme394e9102016-03-10 10:54:19 -0800923 * not done already. Update key with new CT state after passing the packet
924 * through conntrack.
Jarno Rajahalme5e17da62017-02-09 11:21:52 -0800925 * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be
Jarno Rajahalme9f13ded2016-03-10 10:54:18 -0800926 * set to NULL and 0 will be returned.
927 */
Joe Stringer4f0909e2015-10-19 19:18:59 -0700928static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
Joe Stringer7f8a4362015-08-26 11:31:48 -0700929 const struct ovs_conntrack_info *info,
930 struct sk_buff *skb)
931{
932 /* If we are recirculating packets to match on conntrack fields and
933 * committing with a separate conntrack action, then we don't need to
934 * actually run the packet through conntrack twice unless it's for a
935 * different zone.
936 */
Jarno Rajahalme28b6e0c2016-03-10 10:54:22 -0800937 bool cached = skb_nfct_cached(net, key, info, skb);
938 enum ip_conntrack_info ctinfo;
939 struct nf_conn *ct;
940
941 if (!cached) {
Florian Westphal93e66022018-09-12 15:19:07 +0200942 struct nf_hook_state state = {
943 .hook = NF_INET_PRE_ROUTING,
944 .pf = info->family,
945 .net = net,
946 };
Joe Stringer7f8a4362015-08-26 11:31:48 -0700947 struct nf_conn *tmpl = info->ct;
Jarno Rajahalme5b6b9292016-03-10 10:54:21 -0800948 int err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700949
950 /* Associate skb with specified zone. */
951 if (tmpl) {
Florian Westphalcb9c6832017-01-23 18:21:56 +0100952 if (skb_nfct(skb))
953 nf_conntrack_put(skb_nfct(skb));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700954 nf_conntrack_get(&tmpl->ct_general);
Florian Westphalc74454f2017-01-23 18:21:57 +0100955 nf_ct_set(skb, tmpl, IP_CT_NEW);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700956 }
957
Florian Westphal93e66022018-09-12 15:19:07 +0200958 err = nf_conntrack_in(skb, &state);
Jarno Rajahalme5b6b9292016-03-10 10:54:21 -0800959 if (err != NF_ACCEPT)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700960 return -ENOENT;
Joe Stringercae3a262015-08-26 11:31:53 -0700961
Jarno Rajahalme05752522016-03-10 10:54:23 -0800962 /* Clear CT state NAT flags to mark that we have not yet done
963 * NAT after the nf_conntrack_in() call. We can actually clear
964 * the whole state, as it will be re-initialized below.
965 */
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800966 key->ct_state = 0;
Jarno Rajahalme05752522016-03-10 10:54:23 -0800967
968 /* Update the key, but keep the NAT flags. */
969 ovs_ct_update_key(skb, info, key, true, true);
Jarno Rajahalme28b6e0c2016-03-10 10:54:22 -0800970 }
Jarno Rajahalme394e9102016-03-10 10:54:19 -0800971
Jarno Rajahalme28b6e0c2016-03-10 10:54:22 -0800972 ct = nf_ct_get(skb, &ctinfo);
Jarno Rajahalme05752522016-03-10 10:54:23 -0800973 if (ct) {
974 /* Packets starting a new connection must be NATted before the
975 * helper, so that the helper knows about the NAT. We enforce
976 * this by delaying both NAT and helper calls for unconfirmed
977 * connections until the committing CT action. For later
978 * packets NAT and Helper may be called in either order.
979 *
980 * NAT will be done only if the CT action has NAT, and only
981 * once per packet (per zone), as guarded by the NAT bits in
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800982 * the key->ct_state.
Jarno Rajahalme05752522016-03-10 10:54:23 -0800983 */
Jarno Rajahalme316d4d72017-02-09 11:22:01 -0800984 if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) &&
Jarno Rajahalme05752522016-03-10 10:54:23 -0800985 (nf_ct_is_confirmed(ct) || info->commit) &&
986 ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
987 return -EINVAL;
988 }
989
Joe Stringer16ec3d42016-05-11 10:29:26 -0700990 /* Userspace may decide to perform a ct lookup without a helper
991 * specified followed by a (recirculate and) commit with one.
992 * Therefore, for unconfirmed connections which we will commit,
993 * we need to attach the helper here.
994 */
995 if (!nf_ct_is_confirmed(ct) && info->commit &&
996 info->helper && !nfct_help(ct)) {
997 int err = __nf_ct_try_assign_helper(ct, info->ct,
998 GFP_ATOMIC);
999 if (err)
1000 return err;
Flavio Leitnerfa7e4282019-03-25 15:58:31 -03001001
1002 /* helper installed, add seqadj if NAT is required */
1003 if (info->nat && !nfct_seqadj(ct)) {
1004 if (!nfct_seqadj_ext_add(ct))
1005 return -EINVAL;
1006 }
Joe Stringer16ec3d42016-05-11 10:29:26 -07001007 }
1008
Jarno Rajahalme05752522016-03-10 10:54:23 -08001009 /* Call the helper only if:
1010 * - nf_conntrack_in() was executed above ("!cached") for a
1011 * confirmed connection, or
1012 * - When committing an unconfirmed connection.
1013 */
1014 if ((nf_ct_is_confirmed(ct) ? !cached : info->commit) &&
1015 ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
1016 return -EINVAL;
1017 }
Joe Stringer7f8a4362015-08-26 11:31:48 -07001018 }
1019
1020 return 0;
1021}
1022
1023/* Lookup connection and read fields into key. */
1024static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
1025 const struct ovs_conntrack_info *info,
1026 struct sk_buff *skb)
1027{
1028 struct nf_conntrack_expect *exp;
1029
Jarno Rajahalme9f13ded2016-03-10 10:54:18 -08001030 /* If we pass an expected packet through nf_conntrack_in() the
1031 * expectation is typically removed, but the packet could still be
1032 * lost in upcall processing. To prevent this from happening we
1033 * perform an explicit expectation lookup. Expected connections are
1034 * always new, and will be passed through conntrack only when they are
1035 * committed, as it is OK to remove the expectation at that time.
1036 */
Joe Stringer7f8a4362015-08-26 11:31:48 -07001037 exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
1038 if (exp) {
1039 u8 state;
1040
Jarno Rajahalme05752522016-03-10 10:54:23 -08001041 /* NOTE: New connections are NATted and Helped only when
1042 * committed, so we are not calling into NAT here.
1043 */
Joe Stringer7f8a4362015-08-26 11:31:48 -07001044 state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
Joe Stringer182e3042015-08-26 11:31:49 -07001045 __ovs_ct_update_key(key, state, &info->zone, exp->master);
Samuel Gauthierd913d3a2016-06-28 17:22:26 +02001046 } else {
1047 struct nf_conn *ct;
1048 int err;
1049
1050 err = __ovs_ct_lookup(net, key, info, skb);
1051 if (err)
1052 return err;
1053
Florian Westphalcb9c6832017-01-23 18:21:56 +01001054 ct = (struct nf_conn *)skb_nfct(skb);
Samuel Gauthierd913d3a2016-06-28 17:22:26 +02001055 if (ct)
1056 nf_ct_deliver_cached_events(ct);
1057 }
Joe Stringer7f8a4362015-08-26 11:31:48 -07001058
1059 return 0;
1060}
1061
Joe Stringer33db4122015-10-01 15:00:37 -07001062static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
Joe Stringerc2ac6672015-08-26 11:31:52 -07001063{
1064 size_t i;
1065
Jarno Rajahalmecb80d582017-02-09 11:21:55 -08001066 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
1067 if (labels->ct_labels_32[i])
Joe Stringerc2ac6672015-08-26 11:31:52 -07001068 return true;
1069
1070 return false;
1071}
1072
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07001073#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1074static struct hlist_head *ct_limit_hash_bucket(
1075 const struct ovs_ct_limit_info *info, u16 zone)
1076{
1077 return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)];
1078}
1079
1080/* Call with ovs_mutex */
1081static void ct_limit_set(const struct ovs_ct_limit_info *info,
1082 struct ovs_ct_limit *new_ct_limit)
1083{
1084 struct ovs_ct_limit *ct_limit;
1085 struct hlist_head *head;
1086
1087 head = ct_limit_hash_bucket(info, new_ct_limit->zone);
1088 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1089 if (ct_limit->zone == new_ct_limit->zone) {
1090 hlist_replace_rcu(&ct_limit->hlist_node,
1091 &new_ct_limit->hlist_node);
1092 kfree_rcu(ct_limit, rcu);
1093 return;
1094 }
1095 }
1096
1097 hlist_add_head_rcu(&new_ct_limit->hlist_node, head);
1098}
1099
1100/* Call with ovs_mutex */
1101static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone)
1102{
1103 struct ovs_ct_limit *ct_limit;
1104 struct hlist_head *head;
1105 struct hlist_node *n;
1106
1107 head = ct_limit_hash_bucket(info, zone);
1108 hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) {
1109 if (ct_limit->zone == zone) {
1110 hlist_del_rcu(&ct_limit->hlist_node);
1111 kfree_rcu(ct_limit, rcu);
1112 return;
1113 }
1114 }
1115}
1116
1117/* Call with RCU read lock */
1118static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone)
1119{
1120 struct ovs_ct_limit *ct_limit;
1121 struct hlist_head *head;
1122
1123 head = ct_limit_hash_bucket(info, zone);
1124 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1125 if (ct_limit->zone == zone)
1126 return ct_limit->limit;
1127 }
1128
1129 return info->default_limit;
1130}
1131
1132static int ovs_ct_check_limit(struct net *net,
1133 const struct ovs_conntrack_info *info,
1134 const struct nf_conntrack_tuple *tuple)
1135{
1136 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1137 const struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
1138 u32 per_zone_limit, connections;
1139 u32 conncount_key;
1140
1141 conncount_key = info->zone.id;
1142
1143 per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id);
1144 if (per_zone_limit == OVS_CT_LIMIT_UNLIMITED)
1145 return 0;
1146
1147 connections = nf_conncount_count(net, ct_limit_info->data,
1148 &conncount_key, tuple, &info->zone);
1149 if (connections > per_zone_limit)
1150 return -ENOMEM;
1151
1152 return 0;
1153}
1154#endif
1155
Jarno Rajahalme7d904c72016-06-21 14:59:38 -07001156/* Lookup connection and confirm if unconfirmed. */
1157static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
1158 const struct ovs_conntrack_info *info,
1159 struct sk_buff *skb)
1160{
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -08001161 enum ip_conntrack_info ctinfo;
1162 struct nf_conn *ct;
Jarno Rajahalme7d904c72016-06-21 14:59:38 -07001163 int err;
1164
1165 err = __ovs_ct_lookup(net, key, info, skb);
1166 if (err)
1167 return err;
1168
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -08001169 /* The connection could be invalid, in which case this is a no-op.*/
1170 ct = nf_ct_get(skb, &ctinfo);
1171 if (!ct)
1172 return 0;
1173
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07001174#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1175 if (static_branch_unlikely(&ovs_ct_limit_enabled)) {
1176 if (!nf_ct_is_confirmed(ct)) {
1177 err = ovs_ct_check_limit(net, info,
1178 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1179 if (err) {
1180 net_warn_ratelimited("openvswitch: zone: %u "
Colin Ian King43d0e962018-11-27 14:37:17 +00001181 "exceeds conntrack limit\n",
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07001182 info->zone.id);
1183 return err;
1184 }
1185 }
1186 }
1187#endif
1188
Jarno Rajahalme12064552017-04-21 16:48:06 -07001189 /* Set the conntrack event mask if given. NEW and DELETE events have
1190 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener
1191 * typically would receive many kinds of updates. Setting the event
1192 * mask allows those events to be filtered. The set event mask will
1193 * remain in effect for the lifetime of the connection unless changed
1194 * by a further CT action with both the commit flag and the eventmask
1195 * option. */
1196 if (info->have_eventmask) {
1197 struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct);
1198
1199 if (cache)
1200 cache->ctmask = info->eventmask;
1201 }
1202
Jarno Rajahalme7d904c72016-06-21 14:59:38 -07001203 /* Apply changes before confirming the connection so that the initial
1204 * conntrack NEW netlink event carries the values given in the CT
1205 * action.
1206 */
1207 if (info->mark.mask) {
Jarno Rajahalme6ffcea72017-02-09 11:21:57 -08001208 err = ovs_ct_set_mark(ct, key, info->mark.value,
Jarno Rajahalme7d904c72016-06-21 14:59:38 -07001209 info->mark.mask);
1210 if (err)
1211 return err;
1212 }
Jarno Rajahalme09aa98a2017-02-09 11:21:58 -08001213 if (!nf_ct_is_confirmed(ct)) {
1214 err = ovs_ct_init_labels(ct, key, &info->labels.value,
1215 &info->labels.mask);
1216 if (err)
1217 return err;
Arnd Bergmanna277d512018-11-02 16:36:55 +01001218 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1219 labels_nonzero(&info->labels.mask)) {
Jarno Rajahalme09aa98a2017-02-09 11:21:58 -08001220 err = ovs_ct_set_labels(ct, key, &info->labels.value,
1221 &info->labels.mask);
Jarno Rajahalme7d904c72016-06-21 14:59:38 -07001222 if (err)
1223 return err;
1224 }
1225 /* This will take care of sending queued events even if the connection
1226 * is already confirmed.
1227 */
1228 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1229 return -EINVAL;
1230
1231 return 0;
1232}
1233
Ed Swierk9382fe72018-01-31 18:48:02 -08001234/* Trim the skb to the length specified by the IP/IPv6 header,
1235 * removing any trailing lower-layer padding. This prepares the skb
1236 * for higher-layer processing that assumes skb->len excludes padding
1237 * (such as nf_ip_checksum). The caller needs to pull the skb to the
1238 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
1239 */
1240static int ovs_skb_network_trim(struct sk_buff *skb)
1241{
1242 unsigned int len;
1243 int err;
1244
1245 switch (skb->protocol) {
1246 case htons(ETH_P_IP):
1247 len = ntohs(ip_hdr(skb)->tot_len);
1248 break;
1249 case htons(ETH_P_IPV6):
1250 len = sizeof(struct ipv6hdr)
1251 + ntohs(ipv6_hdr(skb)->payload_len);
1252 break;
1253 default:
1254 len = skb->len;
1255 }
1256
1257 err = pskb_trim_rcsum(skb, len);
1258 if (err)
1259 kfree_skb(skb);
1260
1261 return err;
1262}
1263
Joe Stringer74c16612015-10-25 20:21:48 -07001264/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
1265 * value if 'skb' is freed.
1266 */
Joe Stringer7f8a4362015-08-26 11:31:48 -07001267int ovs_ct_execute(struct net *net, struct sk_buff *skb,
1268 struct sw_flow_key *key,
1269 const struct ovs_conntrack_info *info)
1270{
1271 int nh_ofs;
1272 int err;
1273
1274 /* The conntrack module expects to be working at L3. */
1275 nh_ofs = skb_network_offset(skb);
Lance Richardson75f01a42017-01-12 19:33:18 -05001276 skb_pull_rcsum(skb, nh_ofs);
Joe Stringer7f8a4362015-08-26 11:31:48 -07001277
Ed Swierk9382fe72018-01-31 18:48:02 -08001278 err = ovs_skb_network_trim(skb);
1279 if (err)
1280 return err;
1281
Joe Stringer7f8a4362015-08-26 11:31:48 -07001282 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
1283 err = handle_fragments(net, key, info->zone.id, skb);
1284 if (err)
1285 return err;
1286 }
1287
Joe Stringerab38a7b2015-10-06 11:00:01 -07001288 if (info->commit)
Jarno Rajahalme7d904c72016-06-21 14:59:38 -07001289 err = ovs_ct_commit(net, key, info, skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -07001290 else
1291 err = ovs_ct_lookup(net, key, info, skb);
1292
1293 skb_push(skb, nh_ofs);
Lance Richardson75f01a42017-01-12 19:33:18 -05001294 skb_postpush_rcsum(skb, skb->data, nh_ofs);
Joe Stringer74c16612015-10-25 20:21:48 -07001295 if (err)
1296 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -07001297 return err;
1298}
1299
Eric Garverb8226962017-10-10 16:54:44 -04001300int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
1301{
1302 if (skb_nfct(skb)) {
1303 nf_conntrack_put(skb_nfct(skb));
1304 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
1305 ovs_ct_fill_key(skb, key);
1306 }
1307
1308 return 0;
1309}
1310
Joe Stringercae3a262015-08-26 11:31:53 -07001311static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
1312 const struct sw_flow_key *key, bool log)
1313{
1314 struct nf_conntrack_helper *helper;
1315 struct nf_conn_help *help;
Flavio Leitnerfec9c272019-04-17 11:46:17 -03001316 int ret = 0;
Joe Stringercae3a262015-08-26 11:31:53 -07001317
1318 helper = nf_conntrack_helper_try_module_get(name, info->family,
1319 key->ip.proto);
1320 if (!helper) {
1321 OVS_NLERR(log, "Unknown helper \"%s\"", name);
1322 return -EINVAL;
1323 }
1324
Gao Feng440534d2018-07-09 18:06:33 +08001325 help = nf_ct_helper_ext_add(info->ct, GFP_KERNEL);
Joe Stringercae3a262015-08-26 11:31:53 -07001326 if (!help) {
Liping Zhangd91fc592017-05-07 22:01:55 +08001327 nf_conntrack_helper_put(helper);
Joe Stringercae3a262015-08-26 11:31:53 -07001328 return -ENOMEM;
1329 }
1330
Geert Uytterhoevenf319ca62019-05-08 08:52:32 +02001331#if IS_ENABLED(CONFIG_NF_NAT)
Flavio Leitnerfec9c272019-04-17 11:46:17 -03001332 if (info->nat) {
1333 ret = nf_nat_helper_try_module_get(name, info->family,
1334 key->ip.proto);
1335 if (ret) {
1336 nf_conntrack_helper_put(helper);
1337 OVS_NLERR(log, "Failed to load \"%s\" NAT helper, error: %d",
1338 name, ret);
1339 return ret;
1340 }
1341 }
1342#endif
Joe Stringercae3a262015-08-26 11:31:53 -07001343 rcu_assign_pointer(help->helper, helper);
1344 info->helper = helper;
Flavio Leitnerfec9c272019-04-17 11:46:17 -03001345 return ret;
Joe Stringercae3a262015-08-26 11:31:53 -07001346}
1347
Florian Westphal4806e972019-03-27 09:22:26 +01001348#if IS_ENABLED(CONFIG_NF_NAT)
Jarno Rajahalme05752522016-03-10 10:54:23 -08001349static int parse_nat(const struct nlattr *attr,
1350 struct ovs_conntrack_info *info, bool log)
1351{
1352 struct nlattr *a;
1353 int rem;
1354 bool have_ip_max = false;
1355 bool have_proto_max = false;
1356 bool ip_vers = (info->family == NFPROTO_IPV6);
1357
1358 nla_for_each_nested(a, attr, rem) {
1359 static const int ovs_nat_attr_lens[OVS_NAT_ATTR_MAX + 1][2] = {
1360 [OVS_NAT_ATTR_SRC] = {0, 0},
1361 [OVS_NAT_ATTR_DST] = {0, 0},
1362 [OVS_NAT_ATTR_IP_MIN] = {sizeof(struct in_addr),
1363 sizeof(struct in6_addr)},
1364 [OVS_NAT_ATTR_IP_MAX] = {sizeof(struct in_addr),
1365 sizeof(struct in6_addr)},
1366 [OVS_NAT_ATTR_PROTO_MIN] = {sizeof(u16), sizeof(u16)},
1367 [OVS_NAT_ATTR_PROTO_MAX] = {sizeof(u16), sizeof(u16)},
1368 [OVS_NAT_ATTR_PERSISTENT] = {0, 0},
1369 [OVS_NAT_ATTR_PROTO_HASH] = {0, 0},
1370 [OVS_NAT_ATTR_PROTO_RANDOM] = {0, 0},
1371 };
1372 int type = nla_type(a);
1373
1374 if (type > OVS_NAT_ATTR_MAX) {
Joe Perches0ed80da2017-08-11 04:26:26 -07001375 OVS_NLERR(log, "Unknown NAT attribute (type=%d, max=%d)",
Jarno Rajahalme05752522016-03-10 10:54:23 -08001376 type, OVS_NAT_ATTR_MAX);
1377 return -EINVAL;
1378 }
1379
1380 if (nla_len(a) != ovs_nat_attr_lens[type][ip_vers]) {
Joe Perches0ed80da2017-08-11 04:26:26 -07001381 OVS_NLERR(log, "NAT attribute type %d has unexpected length (%d != %d)",
Jarno Rajahalme05752522016-03-10 10:54:23 -08001382 type, nla_len(a),
1383 ovs_nat_attr_lens[type][ip_vers]);
1384 return -EINVAL;
1385 }
1386
1387 switch (type) {
1388 case OVS_NAT_ATTR_SRC:
1389 case OVS_NAT_ATTR_DST:
1390 if (info->nat) {
Joe Perches0ed80da2017-08-11 04:26:26 -07001391 OVS_NLERR(log, "Only one type of NAT may be specified");
Jarno Rajahalme05752522016-03-10 10:54:23 -08001392 return -ERANGE;
1393 }
1394 info->nat |= OVS_CT_NAT;
1395 info->nat |= ((type == OVS_NAT_ATTR_SRC)
1396 ? OVS_CT_SRC_NAT : OVS_CT_DST_NAT);
1397 break;
1398
1399 case OVS_NAT_ATTR_IP_MIN:
Haishuang Yanac71b462016-03-28 18:08:59 +08001400 nla_memcpy(&info->range.min_addr, a,
1401 sizeof(info->range.min_addr));
Jarno Rajahalme05752522016-03-10 10:54:23 -08001402 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1403 break;
1404
1405 case OVS_NAT_ATTR_IP_MAX:
1406 have_ip_max = true;
1407 nla_memcpy(&info->range.max_addr, a,
1408 sizeof(info->range.max_addr));
1409 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1410 break;
1411
1412 case OVS_NAT_ATTR_PROTO_MIN:
1413 info->range.min_proto.all = htons(nla_get_u16(a));
1414 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1415 break;
1416
1417 case OVS_NAT_ATTR_PROTO_MAX:
1418 have_proto_max = true;
1419 info->range.max_proto.all = htons(nla_get_u16(a));
1420 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1421 break;
1422
1423 case OVS_NAT_ATTR_PERSISTENT:
1424 info->range.flags |= NF_NAT_RANGE_PERSISTENT;
1425 break;
1426
1427 case OVS_NAT_ATTR_PROTO_HASH:
1428 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM;
1429 break;
1430
1431 case OVS_NAT_ATTR_PROTO_RANDOM:
1432 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM_FULLY;
1433 break;
1434
1435 default:
Joe Perches0ed80da2017-08-11 04:26:26 -07001436 OVS_NLERR(log, "Unknown nat attribute (%d)", type);
Jarno Rajahalme05752522016-03-10 10:54:23 -08001437 return -EINVAL;
1438 }
1439 }
1440
1441 if (rem > 0) {
Joe Perches0ed80da2017-08-11 04:26:26 -07001442 OVS_NLERR(log, "NAT attribute has %d unknown bytes", rem);
Jarno Rajahalme05752522016-03-10 10:54:23 -08001443 return -EINVAL;
1444 }
1445 if (!info->nat) {
1446 /* Do not allow flags if no type is given. */
1447 if (info->range.flags) {
1448 OVS_NLERR(log,
Julia Lawalle0b10842017-12-27 15:51:38 +01001449 "NAT flags may be given only when NAT range (SRC or DST) is also specified."
Jarno Rajahalme05752522016-03-10 10:54:23 -08001450 );
1451 return -EINVAL;
1452 }
1453 info->nat = OVS_CT_NAT; /* NAT existing connections. */
1454 } else if (!info->commit) {
1455 OVS_NLERR(log,
Julia Lawalle0b10842017-12-27 15:51:38 +01001456 "NAT attributes may be specified only when CT COMMIT flag is also specified."
Jarno Rajahalme05752522016-03-10 10:54:23 -08001457 );
1458 return -EINVAL;
1459 }
1460 /* Allow missing IP_MAX. */
1461 if (info->range.flags & NF_NAT_RANGE_MAP_IPS && !have_ip_max) {
1462 memcpy(&info->range.max_addr, &info->range.min_addr,
1463 sizeof(info->range.max_addr));
1464 }
1465 /* Allow missing PROTO_MAX. */
1466 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1467 !have_proto_max) {
1468 info->range.max_proto.all = info->range.min_proto.all;
1469 }
1470 return 0;
1471}
1472#endif
1473
Joe Stringer7f8a4362015-08-26 11:31:48 -07001474static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
Joe Stringerab38a7b2015-10-06 11:00:01 -07001475 [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 },
Jarno Rajahalmedd41d332017-02-09 11:22:00 -08001476 [OVS_CT_ATTR_FORCE_COMMIT] = { .minlen = 0, .maxlen = 0 },
Joe Stringer7f8a4362015-08-26 11:31:48 -07001477 [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16),
1478 .maxlen = sizeof(u16) },
Joe Stringer182e3042015-08-26 11:31:49 -07001479 [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark),
1480 .maxlen = sizeof(struct md_mark) },
Joe Stringer33db4122015-10-01 15:00:37 -07001481 [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels),
1482 .maxlen = sizeof(struct md_labels) },
Joe Stringercae3a262015-08-26 11:31:53 -07001483 [OVS_CT_ATTR_HELPER] = { .minlen = 1,
Jarno Rajahalme05752522016-03-10 10:54:23 -08001484 .maxlen = NF_CT_HELPER_NAME_LEN },
Florian Westphal4806e972019-03-27 09:22:26 +01001485#if IS_ENABLED(CONFIG_NF_NAT)
Jarno Rajahalme05752522016-03-10 10:54:23 -08001486 /* NAT length is checked when parsing the nested attributes. */
1487 [OVS_CT_ATTR_NAT] = { .minlen = 0, .maxlen = INT_MAX },
1488#endif
Jarno Rajahalme12064552017-04-21 16:48:06 -07001489 [OVS_CT_ATTR_EVENTMASK] = { .minlen = sizeof(u32),
1490 .maxlen = sizeof(u32) },
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -07001491 [OVS_CT_ATTR_TIMEOUT] = { .minlen = 1,
1492 .maxlen = CTNL_TIMEOUT_NAME_MAX },
Joe Stringer7f8a4362015-08-26 11:31:48 -07001493};
1494
1495static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
Joe Stringercae3a262015-08-26 11:31:53 -07001496 const char **helper, bool log)
Joe Stringer7f8a4362015-08-26 11:31:48 -07001497{
1498 struct nlattr *a;
1499 int rem;
1500
1501 nla_for_each_nested(a, attr, rem) {
1502 int type = nla_type(a);
Liping Zhang69ec9322017-07-23 17:52:23 +08001503 int maxlen;
1504 int minlen;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001505
1506 if (type > OVS_CT_ATTR_MAX) {
1507 OVS_NLERR(log,
1508 "Unknown conntrack attr (type=%d, max=%d)",
1509 type, OVS_CT_ATTR_MAX);
1510 return -EINVAL;
1511 }
Liping Zhang69ec9322017-07-23 17:52:23 +08001512
1513 maxlen = ovs_ct_attr_lens[type].maxlen;
1514 minlen = ovs_ct_attr_lens[type].minlen;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001515 if (nla_len(a) < minlen || nla_len(a) > maxlen) {
1516 OVS_NLERR(log,
1517 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
1518 type, nla_len(a), maxlen);
1519 return -EINVAL;
1520 }
1521
1522 switch (type) {
Jarno Rajahalmedd41d332017-02-09 11:22:00 -08001523 case OVS_CT_ATTR_FORCE_COMMIT:
1524 info->force = true;
1525 /* fall through. */
Joe Stringerab38a7b2015-10-06 11:00:01 -07001526 case OVS_CT_ATTR_COMMIT:
1527 info->commit = true;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001528 break;
1529#ifdef CONFIG_NF_CONNTRACK_ZONES
1530 case OVS_CT_ATTR_ZONE:
1531 info->zone.id = nla_get_u16(a);
1532 break;
1533#endif
Joe Stringer182e3042015-08-26 11:31:49 -07001534#ifdef CONFIG_NF_CONNTRACK_MARK
1535 case OVS_CT_ATTR_MARK: {
1536 struct md_mark *mark = nla_data(a);
1537
Joe Stringere754ec62015-10-19 19:19:00 -07001538 if (!mark->mask) {
1539 OVS_NLERR(log, "ct_mark mask cannot be 0");
1540 return -EINVAL;
1541 }
Joe Stringer182e3042015-08-26 11:31:49 -07001542 info->mark = *mark;
1543 break;
1544 }
1545#endif
Joe Stringerc2ac6672015-08-26 11:31:52 -07001546#ifdef CONFIG_NF_CONNTRACK_LABELS
Joe Stringer33db4122015-10-01 15:00:37 -07001547 case OVS_CT_ATTR_LABELS: {
1548 struct md_labels *labels = nla_data(a);
Joe Stringerc2ac6672015-08-26 11:31:52 -07001549
Joe Stringere754ec62015-10-19 19:19:00 -07001550 if (!labels_nonzero(&labels->mask)) {
1551 OVS_NLERR(log, "ct_labels mask cannot be 0");
1552 return -EINVAL;
1553 }
Joe Stringer33db4122015-10-01 15:00:37 -07001554 info->labels = *labels;
Joe Stringerc2ac6672015-08-26 11:31:52 -07001555 break;
1556 }
1557#endif
Joe Stringercae3a262015-08-26 11:31:53 -07001558 case OVS_CT_ATTR_HELPER:
1559 *helper = nla_data(a);
1560 if (!memchr(*helper, '\0', nla_len(a))) {
1561 OVS_NLERR(log, "Invalid conntrack helper");
1562 return -EINVAL;
1563 }
1564 break;
Florian Westphal4806e972019-03-27 09:22:26 +01001565#if IS_ENABLED(CONFIG_NF_NAT)
Jarno Rajahalme05752522016-03-10 10:54:23 -08001566 case OVS_CT_ATTR_NAT: {
1567 int err = parse_nat(a, info, log);
1568
1569 if (err)
1570 return err;
1571 break;
1572 }
1573#endif
Jarno Rajahalme12064552017-04-21 16:48:06 -07001574 case OVS_CT_ATTR_EVENTMASK:
1575 info->have_eventmask = true;
1576 info->eventmask = nla_get_u32(a);
1577 break;
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -07001578#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1579 case OVS_CT_ATTR_TIMEOUT:
1580 memcpy(info->timeout, nla_data(a), nla_len(a));
1581 if (!memchr(info->timeout, '\0', nla_len(a))) {
Yi-Hung Wei12c6bc32019-08-21 17:16:10 -07001582 OVS_NLERR(log, "Invalid conntrack timeout");
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -07001583 return -EINVAL;
1584 }
1585 break;
1586#endif
Jarno Rajahalme12064552017-04-21 16:48:06 -07001587
Joe Stringer7f8a4362015-08-26 11:31:48 -07001588 default:
1589 OVS_NLERR(log, "Unknown conntrack attr (%d)",
1590 type);
1591 return -EINVAL;
1592 }
1593 }
1594
Jarno Rajahalme7d904c72016-06-21 14:59:38 -07001595#ifdef CONFIG_NF_CONNTRACK_MARK
1596 if (!info->commit && info->mark.mask) {
1597 OVS_NLERR(log,
1598 "Setting conntrack mark requires 'commit' flag.");
1599 return -EINVAL;
1600 }
1601#endif
1602#ifdef CONFIG_NF_CONNTRACK_LABELS
1603 if (!info->commit && labels_nonzero(&info->labels.mask)) {
1604 OVS_NLERR(log,
1605 "Setting conntrack labels requires 'commit' flag.");
1606 return -EINVAL;
1607 }
1608#endif
Joe Stringer7f8a4362015-08-26 11:31:48 -07001609 if (rem > 0) {
1610 OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
1611 return -EINVAL;
1612 }
1613
1614 return 0;
1615}
1616
Joe Stringerc2ac6672015-08-26 11:31:52 -07001617bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
Joe Stringer7f8a4362015-08-26 11:31:48 -07001618{
1619 if (attr == OVS_KEY_ATTR_CT_STATE)
1620 return true;
1621 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1622 attr == OVS_KEY_ATTR_CT_ZONE)
1623 return true;
Joe Stringer182e3042015-08-26 11:31:49 -07001624 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1625 attr == OVS_KEY_ATTR_CT_MARK)
1626 return true;
Joe Stringerc2ac6672015-08-26 11:31:52 -07001627 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
Joe Stringer33db4122015-10-01 15:00:37 -07001628 attr == OVS_KEY_ATTR_CT_LABELS) {
Joe Stringerc2ac6672015-08-26 11:31:52 -07001629 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1630
1631 return ovs_net->xt_label;
1632 }
Joe Stringer7f8a4362015-08-26 11:31:48 -07001633
1634 return false;
1635}
1636
1637int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1638 const struct sw_flow_key *key,
1639 struct sw_flow_actions **sfa, bool log)
1640{
1641 struct ovs_conntrack_info ct_info;
Joe Stringercae3a262015-08-26 11:31:53 -07001642 const char *helper = NULL;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001643 u16 family;
1644 int err;
1645
1646 family = key_to_nfproto(key);
1647 if (family == NFPROTO_UNSPEC) {
1648 OVS_NLERR(log, "ct family unspecified");
1649 return -EINVAL;
1650 }
1651
1652 memset(&ct_info, 0, sizeof(ct_info));
1653 ct_info.family = family;
1654
1655 nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
1656 NF_CT_DEFAULT_ZONE_DIR, 0);
1657
Joe Stringercae3a262015-08-26 11:31:53 -07001658 err = parse_ct(attr, &ct_info, &helper, log);
Joe Stringer7f8a4362015-08-26 11:31:48 -07001659 if (err)
1660 return err;
1661
1662 /* Set up template for tracking connections in specific zones. */
1663 ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
1664 if (!ct_info.ct) {
1665 OVS_NLERR(log, "Failed to allocate conntrack template");
1666 return -ENOMEM;
1667 }
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -07001668
1669 if (ct_info.timeout[0]) {
1670 if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
1671 ct_info.timeout))
1672 pr_info_ratelimited("Failed to associated timeout "
1673 "policy `%s'\n", ct_info.timeout);
Yi-Hung Wei71778952019-08-22 13:17:50 -07001674 else
1675 ct_info.nf_ct_timeout = rcu_dereference(
1676 nf_ct_timeout_find(ct_info.ct)->timeout);
1677
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -07001678 }
1679
Joe Stringercae3a262015-08-26 11:31:53 -07001680 if (helper) {
1681 err = ovs_ct_add_helper(&ct_info, helper, key, log);
1682 if (err)
1683 goto err_free_ct;
1684 }
Joe Stringer7f8a4362015-08-26 11:31:48 -07001685
1686 err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
1687 sizeof(ct_info), log);
1688 if (err)
1689 goto err_free_ct;
1690
Flavio Leitner7f6d6552018-09-28 14:55:34 -03001691 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
1692 nf_conntrack_get(&ct_info.ct->ct_general);
Joe Stringer7f8a4362015-08-26 11:31:48 -07001693 return 0;
1694err_free_ct:
Joe Stringer2f3ab9f2015-12-09 14:07:39 -08001695 __ovs_ct_free_action(&ct_info);
Joe Stringer7f8a4362015-08-26 11:31:48 -07001696 return err;
1697}
1698
Florian Westphal4806e972019-03-27 09:22:26 +01001699#if IS_ENABLED(CONFIG_NF_NAT)
Jarno Rajahalme05752522016-03-10 10:54:23 -08001700static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
1701 struct sk_buff *skb)
1702{
1703 struct nlattr *start;
1704
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001705 start = nla_nest_start_noflag(skb, OVS_CT_ATTR_NAT);
Jarno Rajahalme05752522016-03-10 10:54:23 -08001706 if (!start)
1707 return false;
1708
1709 if (info->nat & OVS_CT_SRC_NAT) {
1710 if (nla_put_flag(skb, OVS_NAT_ATTR_SRC))
1711 return false;
1712 } else if (info->nat & OVS_CT_DST_NAT) {
1713 if (nla_put_flag(skb, OVS_NAT_ATTR_DST))
1714 return false;
1715 } else {
1716 goto out;
1717 }
1718
1719 if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
Florian Westphal3bf195a2019-02-19 17:38:21 +01001720 if (IS_ENABLED(CONFIG_NF_NAT) &&
Arnd Bergmann99b72482016-03-18 14:33:45 +01001721 info->family == NFPROTO_IPV4) {
Jarno Rajahalme05752522016-03-10 10:54:23 -08001722 if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
1723 info->range.min_addr.ip) ||
1724 (info->range.max_addr.ip
1725 != info->range.min_addr.ip &&
1726 (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
1727 info->range.max_addr.ip))))
1728 return false;
Florian Westphal3bf195a2019-02-19 17:38:21 +01001729 } else if (IS_ENABLED(CONFIG_IPV6) &&
Arnd Bergmann99b72482016-03-18 14:33:45 +01001730 info->family == NFPROTO_IPV6) {
Jarno Rajahalme05752522016-03-10 10:54:23 -08001731 if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
1732 &info->range.min_addr.in6) ||
1733 (memcmp(&info->range.max_addr.in6,
1734 &info->range.min_addr.in6,
1735 sizeof(info->range.max_addr.in6)) &&
1736 (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
1737 &info->range.max_addr.in6))))
1738 return false;
Jarno Rajahalme05752522016-03-10 10:54:23 -08001739 } else {
1740 return false;
1741 }
1742 }
1743 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1744 (nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MIN,
1745 ntohs(info->range.min_proto.all)) ||
1746 (info->range.max_proto.all != info->range.min_proto.all &&
1747 nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MAX,
1748 ntohs(info->range.max_proto.all)))))
1749 return false;
1750
1751 if (info->range.flags & NF_NAT_RANGE_PERSISTENT &&
1752 nla_put_flag(skb, OVS_NAT_ATTR_PERSISTENT))
1753 return false;
1754 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM &&
1755 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_HASH))
1756 return false;
1757 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY &&
1758 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_RANDOM))
1759 return false;
1760out:
1761 nla_nest_end(skb, start);
1762
1763 return true;
1764}
1765#endif
1766
Joe Stringer7f8a4362015-08-26 11:31:48 -07001767int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
1768 struct sk_buff *skb)
1769{
1770 struct nlattr *start;
1771
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001772 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CT);
Joe Stringer7f8a4362015-08-26 11:31:48 -07001773 if (!start)
1774 return -EMSGSIZE;
1775
Jarno Rajahalmedd41d332017-02-09 11:22:00 -08001776 if (ct_info->commit && nla_put_flag(skb, ct_info->force
1777 ? OVS_CT_ATTR_FORCE_COMMIT
1778 : OVS_CT_ATTR_COMMIT))
Joe Stringer7f8a4362015-08-26 11:31:48 -07001779 return -EMSGSIZE;
1780 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1781 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
1782 return -EMSGSIZE;
Joe Stringere754ec62015-10-19 19:19:00 -07001783 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
Joe Stringer182e3042015-08-26 11:31:49 -07001784 nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
1785 &ct_info->mark))
1786 return -EMSGSIZE;
Joe Stringerc2ac6672015-08-26 11:31:52 -07001787 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
Joe Stringere754ec62015-10-19 19:19:00 -07001788 labels_nonzero(&ct_info->labels.mask) &&
Joe Stringer33db4122015-10-01 15:00:37 -07001789 nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
1790 &ct_info->labels))
Joe Stringerc2ac6672015-08-26 11:31:52 -07001791 return -EMSGSIZE;
Joe Stringercae3a262015-08-26 11:31:53 -07001792 if (ct_info->helper) {
1793 if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
1794 ct_info->helper->name))
1795 return -EMSGSIZE;
1796 }
Jarno Rajahalme12064552017-04-21 16:48:06 -07001797 if (ct_info->have_eventmask &&
1798 nla_put_u32(skb, OVS_CT_ATTR_EVENTMASK, ct_info->eventmask))
1799 return -EMSGSIZE;
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -07001800 if (ct_info->timeout[0]) {
1801 if (nla_put_string(skb, OVS_CT_ATTR_TIMEOUT, ct_info->timeout))
1802 return -EMSGSIZE;
1803 }
Jarno Rajahalme12064552017-04-21 16:48:06 -07001804
Florian Westphal4806e972019-03-27 09:22:26 +01001805#if IS_ENABLED(CONFIG_NF_NAT)
Jarno Rajahalme05752522016-03-10 10:54:23 -08001806 if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb))
1807 return -EMSGSIZE;
1808#endif
Joe Stringer7f8a4362015-08-26 11:31:48 -07001809 nla_nest_end(skb, start);
1810
1811 return 0;
1812}
1813
1814void ovs_ct_free_action(const struct nlattr *a)
1815{
1816 struct ovs_conntrack_info *ct_info = nla_data(a);
1817
Joe Stringer2f3ab9f2015-12-09 14:07:39 -08001818 __ovs_ct_free_action(ct_info);
1819}
1820
1821static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
1822{
Flavio Leitnerfec9c272019-04-17 11:46:17 -03001823 if (ct_info->helper) {
Geert Uytterhoevenf319ca62019-05-08 08:52:32 +02001824#if IS_ENABLED(CONFIG_NF_NAT)
Flavio Leitnerfec9c272019-04-17 11:46:17 -03001825 if (ct_info->nat)
1826 nf_nat_helper_put(ct_info->helper);
1827#endif
Liping Zhangd91fc592017-05-07 22:01:55 +08001828 nf_conntrack_helper_put(ct_info->helper);
Flavio Leitnerfec9c272019-04-17 11:46:17 -03001829 }
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -07001830 if (ct_info->ct) {
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -07001831 if (ct_info->timeout[0])
1832 nf_ct_destroy_timeout(ct_info->ct);
Dan Carpenter6d670492019-04-02 09:53:14 +03001833 nf_ct_tmpl_free(ct_info->ct);
Yi-Hung Wei06bd2bd2019-03-26 11:31:14 -07001834 }
Joe Stringer7f8a4362015-08-26 11:31:48 -07001835}
Joe Stringerc2ac6672015-08-26 11:31:52 -07001836
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07001837#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1838static int ovs_ct_limit_init(struct net *net, struct ovs_net *ovs_net)
1839{
1840 int i, err;
1841
1842 ovs_net->ct_limit_info = kmalloc(sizeof(*ovs_net->ct_limit_info),
1843 GFP_KERNEL);
1844 if (!ovs_net->ct_limit_info)
1845 return -ENOMEM;
1846
1847 ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT;
1848 ovs_net->ct_limit_info->limits =
1849 kmalloc_array(CT_LIMIT_HASH_BUCKETS, sizeof(struct hlist_head),
1850 GFP_KERNEL);
1851 if (!ovs_net->ct_limit_info->limits) {
1852 kfree(ovs_net->ct_limit_info);
1853 return -ENOMEM;
1854 }
1855
1856 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; i++)
1857 INIT_HLIST_HEAD(&ovs_net->ct_limit_info->limits[i]);
1858
1859 ovs_net->ct_limit_info->data =
1860 nf_conncount_init(net, NFPROTO_INET, sizeof(u32));
1861
1862 if (IS_ERR(ovs_net->ct_limit_info->data)) {
1863 err = PTR_ERR(ovs_net->ct_limit_info->data);
1864 kfree(ovs_net->ct_limit_info->limits);
1865 kfree(ovs_net->ct_limit_info);
1866 pr_err("openvswitch: failed to init nf_conncount %d\n", err);
1867 return err;
1868 }
1869 return 0;
1870}
1871
1872static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
1873{
1874 const struct ovs_ct_limit_info *info = ovs_net->ct_limit_info;
1875 int i;
1876
1877 nf_conncount_destroy(net, NFPROTO_INET, info->data);
1878 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
1879 struct hlist_head *head = &info->limits[i];
1880 struct ovs_ct_limit *ct_limit;
1881
1882 hlist_for_each_entry_rcu(ct_limit, head, hlist_node)
1883 kfree_rcu(ct_limit, rcu);
1884 }
1885 kfree(ovs_net->ct_limit_info->limits);
1886 kfree(ovs_net->ct_limit_info);
1887}
1888
1889static struct sk_buff *
1890ovs_ct_limit_cmd_reply_start(struct genl_info *info, u8 cmd,
1891 struct ovs_header **ovs_reply_header)
1892{
1893 struct ovs_header *ovs_header = info->userhdr;
1894 struct sk_buff *skb;
1895
1896 skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1897 if (!skb)
1898 return ERR_PTR(-ENOMEM);
1899
1900 *ovs_reply_header = genlmsg_put(skb, info->snd_portid,
1901 info->snd_seq,
1902 &dp_ct_limit_genl_family, 0, cmd);
1903
1904 if (!*ovs_reply_header) {
1905 nlmsg_free(skb);
1906 return ERR_PTR(-EMSGSIZE);
1907 }
1908 (*ovs_reply_header)->dp_ifindex = ovs_header->dp_ifindex;
1909
1910 return skb;
1911}
1912
1913static bool check_zone_id(int zone_id, u16 *pzone)
1914{
1915 if (zone_id >= 0 && zone_id <= 65535) {
1916 *pzone = (u16)zone_id;
1917 return true;
1918 }
1919 return false;
1920}
1921
1922static int ovs_ct_limit_set_zone_limit(struct nlattr *nla_zone_limit,
1923 struct ovs_ct_limit_info *info)
1924{
1925 struct ovs_zone_limit *zone_limit;
1926 int rem;
1927 u16 zone;
1928
1929 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1930 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1931
1932 while (rem >= sizeof(*zone_limit)) {
1933 if (unlikely(zone_limit->zone_id ==
1934 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1935 ovs_lock();
1936 info->default_limit = zone_limit->limit;
1937 ovs_unlock();
1938 } else if (unlikely(!check_zone_id(
1939 zone_limit->zone_id, &zone))) {
1940 OVS_NLERR(true, "zone id is out of range");
1941 } else {
1942 struct ovs_ct_limit *ct_limit;
1943
1944 ct_limit = kmalloc(sizeof(*ct_limit), GFP_KERNEL);
1945 if (!ct_limit)
1946 return -ENOMEM;
1947
1948 ct_limit->zone = zone;
1949 ct_limit->limit = zone_limit->limit;
1950
1951 ovs_lock();
1952 ct_limit_set(info, ct_limit);
1953 ovs_unlock();
1954 }
1955 rem -= NLA_ALIGN(sizeof(*zone_limit));
1956 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
1957 NLA_ALIGN(sizeof(*zone_limit)));
1958 }
1959
1960 if (rem)
1961 OVS_NLERR(true, "set zone limit has %d unknown bytes", rem);
1962
1963 return 0;
1964}
1965
1966static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
1967 struct ovs_ct_limit_info *info)
1968{
1969 struct ovs_zone_limit *zone_limit;
1970 int rem;
1971 u16 zone;
1972
1973 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1974 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1975
1976 while (rem >= sizeof(*zone_limit)) {
1977 if (unlikely(zone_limit->zone_id ==
1978 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1979 ovs_lock();
1980 info->default_limit = OVS_CT_LIMIT_DEFAULT;
1981 ovs_unlock();
1982 } else if (unlikely(!check_zone_id(
1983 zone_limit->zone_id, &zone))) {
1984 OVS_NLERR(true, "zone id is out of range");
1985 } else {
1986 ovs_lock();
1987 ct_limit_del(info, zone);
1988 ovs_unlock();
1989 }
1990 rem -= NLA_ALIGN(sizeof(*zone_limit));
1991 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
1992 NLA_ALIGN(sizeof(*zone_limit)));
1993 }
1994
1995 if (rem)
1996 OVS_NLERR(true, "del zone limit has %d unknown bytes", rem);
1997
1998 return 0;
1999}
2000
2001static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
2002 struct sk_buff *reply)
2003{
2004 struct ovs_zone_limit zone_limit;
2005 int err;
2006
2007 zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
2008 zone_limit.limit = info->default_limit;
2009 err = nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
2010 if (err)
2011 return err;
2012
2013 return 0;
2014}
2015
2016static int __ovs_ct_limit_get_zone_limit(struct net *net,
2017 struct nf_conncount_data *data,
2018 u16 zone_id, u32 limit,
2019 struct sk_buff *reply)
2020{
2021 struct nf_conntrack_zone ct_zone;
2022 struct ovs_zone_limit zone_limit;
2023 u32 conncount_key = zone_id;
2024
2025 zone_limit.zone_id = zone_id;
2026 zone_limit.limit = limit;
2027 nf_ct_zone_init(&ct_zone, zone_id, NF_CT_DEFAULT_ZONE_DIR, 0);
2028
2029 zone_limit.count = nf_conncount_count(net, data, &conncount_key, NULL,
2030 &ct_zone);
2031 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
2032}
2033
2034static int ovs_ct_limit_get_zone_limit(struct net *net,
2035 struct nlattr *nla_zone_limit,
2036 struct ovs_ct_limit_info *info,
2037 struct sk_buff *reply)
2038{
2039 struct ovs_zone_limit *zone_limit;
2040 int rem, err;
2041 u32 limit;
2042 u16 zone;
2043
2044 rem = NLA_ALIGN(nla_len(nla_zone_limit));
2045 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
2046
2047 while (rem >= sizeof(*zone_limit)) {
2048 if (unlikely(zone_limit->zone_id ==
2049 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
2050 err = ovs_ct_limit_get_default_limit(info, reply);
2051 if (err)
2052 return err;
2053 } else if (unlikely(!check_zone_id(zone_limit->zone_id,
2054 &zone))) {
2055 OVS_NLERR(true, "zone id is out of range");
2056 } else {
2057 rcu_read_lock();
2058 limit = ct_limit_get(info, zone);
2059 rcu_read_unlock();
2060
2061 err = __ovs_ct_limit_get_zone_limit(
2062 net, info->data, zone, limit, reply);
2063 if (err)
2064 return err;
2065 }
2066 rem -= NLA_ALIGN(sizeof(*zone_limit));
2067 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
2068 NLA_ALIGN(sizeof(*zone_limit)));
2069 }
2070
2071 if (rem)
2072 OVS_NLERR(true, "get zone limit has %d unknown bytes", rem);
2073
2074 return 0;
2075}
2076
2077static int ovs_ct_limit_get_all_zone_limit(struct net *net,
2078 struct ovs_ct_limit_info *info,
2079 struct sk_buff *reply)
2080{
2081 struct ovs_ct_limit *ct_limit;
2082 struct hlist_head *head;
2083 int i, err = 0;
2084
2085 err = ovs_ct_limit_get_default_limit(info, reply);
2086 if (err)
2087 return err;
2088
2089 rcu_read_lock();
2090 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
2091 head = &info->limits[i];
2092 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
2093 err = __ovs_ct_limit_get_zone_limit(net, info->data,
2094 ct_limit->zone, ct_limit->limit, reply);
2095 if (err)
2096 goto exit_err;
2097 }
2098 }
2099
2100exit_err:
2101 rcu_read_unlock();
2102 return err;
2103}
2104
2105static int ovs_ct_limit_cmd_set(struct sk_buff *skb, struct genl_info *info)
2106{
2107 struct nlattr **a = info->attrs;
2108 struct sk_buff *reply;
2109 struct ovs_header *ovs_reply_header;
2110 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2111 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2112 int err;
2113
2114 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_SET,
2115 &ovs_reply_header);
2116 if (IS_ERR(reply))
2117 return PTR_ERR(reply);
2118
2119 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2120 err = -EINVAL;
2121 goto exit_err;
2122 }
2123
2124 err = ovs_ct_limit_set_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2125 ct_limit_info);
2126 if (err)
2127 goto exit_err;
2128
2129 static_branch_enable(&ovs_ct_limit_enabled);
2130
2131 genlmsg_end(reply, ovs_reply_header);
2132 return genlmsg_reply(reply, info);
2133
2134exit_err:
2135 nlmsg_free(reply);
2136 return err;
2137}
2138
2139static int ovs_ct_limit_cmd_del(struct sk_buff *skb, struct genl_info *info)
2140{
2141 struct nlattr **a = info->attrs;
2142 struct sk_buff *reply;
2143 struct ovs_header *ovs_reply_header;
2144 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
2145 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2146 int err;
2147
2148 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_DEL,
2149 &ovs_reply_header);
2150 if (IS_ERR(reply))
2151 return PTR_ERR(reply);
2152
2153 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2154 err = -EINVAL;
2155 goto exit_err;
2156 }
2157
2158 err = ovs_ct_limit_del_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
2159 ct_limit_info);
2160 if (err)
2161 goto exit_err;
2162
2163 genlmsg_end(reply, ovs_reply_header);
2164 return genlmsg_reply(reply, info);
2165
2166exit_err:
2167 nlmsg_free(reply);
2168 return err;
2169}
2170
2171static int ovs_ct_limit_cmd_get(struct sk_buff *skb, struct genl_info *info)
2172{
2173 struct nlattr **a = info->attrs;
2174 struct nlattr *nla_reply;
2175 struct sk_buff *reply;
2176 struct ovs_header *ovs_reply_header;
2177 struct net *net = sock_net(skb->sk);
2178 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2179 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2180 int err;
2181
2182 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_GET,
2183 &ovs_reply_header);
2184 if (IS_ERR(reply))
2185 return PTR_ERR(reply);
2186
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002187 nla_reply = nla_nest_start_noflag(reply, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
Colin Ian Kingca965342019-05-01 14:41:58 +01002188 if (!nla_reply) {
2189 err = -EMSGSIZE;
2190 goto exit_err;
2191 }
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002192
2193 if (a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2194 err = ovs_ct_limit_get_zone_limit(
2195 net, a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], ct_limit_info,
2196 reply);
2197 if (err)
2198 goto exit_err;
2199 } else {
2200 err = ovs_ct_limit_get_all_zone_limit(net, ct_limit_info,
2201 reply);
2202 if (err)
2203 goto exit_err;
2204 }
2205
2206 nla_nest_end(reply, nla_reply);
2207 genlmsg_end(reply, ovs_reply_header);
2208 return genlmsg_reply(reply, info);
2209
2210exit_err:
2211 nlmsg_free(reply);
2212 return err;
2213}
2214
2215static struct genl_ops ct_limit_genl_ops[] = {
2216 { .cmd = OVS_CT_LIMIT_CMD_SET,
Johannes Bergef6243a2019-04-26 14:07:31 +02002217 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002218 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2219 * privilege. */
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002220 .doit = ovs_ct_limit_cmd_set,
2221 },
2222 { .cmd = OVS_CT_LIMIT_CMD_DEL,
Johannes Bergef6243a2019-04-26 14:07:31 +02002223 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002224 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2225 * privilege. */
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002226 .doit = ovs_ct_limit_cmd_del,
2227 },
2228 { .cmd = OVS_CT_LIMIT_CMD_GET,
Johannes Bergef6243a2019-04-26 14:07:31 +02002229 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002230 .flags = 0, /* OK for unprivileged users. */
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002231 .doit = ovs_ct_limit_cmd_get,
2232 },
2233};
2234
2235static const struct genl_multicast_group ovs_ct_limit_multicast_group = {
2236 .name = OVS_CT_LIMIT_MCGROUP,
2237};
2238
2239struct genl_family dp_ct_limit_genl_family __ro_after_init = {
2240 .hdrsize = sizeof(struct ovs_header),
2241 .name = OVS_CT_LIMIT_FAMILY,
2242 .version = OVS_CT_LIMIT_VERSION,
2243 .maxattr = OVS_CT_LIMIT_ATTR_MAX,
Johannes Berg3b0f31f2019-03-21 22:51:02 +01002244 .policy = ct_limit_policy,
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002245 .netnsok = true,
2246 .parallel_ops = true,
2247 .ops = ct_limit_genl_ops,
2248 .n_ops = ARRAY_SIZE(ct_limit_genl_ops),
2249 .mcgrps = &ovs_ct_limit_multicast_group,
2250 .n_mcgrps = 1,
2251 .module = THIS_MODULE,
2252};
2253#endif
2254
2255int ovs_ct_init(struct net *net)
Joe Stringerc2ac6672015-08-26 11:31:52 -07002256{
Joe Stringer33db4122015-10-01 15:00:37 -07002257 unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
Joe Stringerc2ac6672015-08-26 11:31:52 -07002258 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2259
Florian Westphaladff6c62016-04-12 18:14:25 +02002260 if (nf_connlabels_get(net, n_bits - 1)) {
Joe Stringerc2ac6672015-08-26 11:31:52 -07002261 ovs_net->xt_label = false;
2262 OVS_NLERR(true, "Failed to set connlabel length");
2263 } else {
2264 ovs_net->xt_label = true;
2265 }
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002266
2267#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2268 return ovs_ct_limit_init(net, ovs_net);
2269#else
2270 return 0;
2271#endif
Joe Stringerc2ac6672015-08-26 11:31:52 -07002272}
2273
2274void ovs_ct_exit(struct net *net)
2275{
2276 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2277
Yi-Hung Wei11efd5c2018-05-24 17:56:43 -07002278#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2279 ovs_ct_limit_exit(net, ovs_net);
2280#endif
2281
Joe Stringerc2ac6672015-08-26 11:31:52 -07002282 if (ovs_net->xt_label)
2283 nf_connlabels_put(net);
2284}