blob: 38147e6a20f534a1d28eb8bd2ed2701c060c6efe [file] [log] [blame]
Thomas Gleixnerc9422992019-05-29 07:12:43 -07001// SPDX-License-Identifier: GPL-2.0-only
Jesse Grossccb13522011-10-25 19:26:31 -07002/*
Andy Zhou971427f32014-09-15 19:37:25 -07003 * Copyright (c) 2007-2014 Nicira, Inc.
Jesse Grossccb13522011-10-25 19:26:31 -07004 */
5
Jesse Grossccb13522011-10-25 19:26:31 -07006#include <linux/uaccess.h>
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/if_ether.h>
10#include <linux/if_vlan.h>
11#include <net/llc_pdu.h>
12#include <linux/kernel.h>
13#include <linux/jhash.h>
14#include <linux/jiffies.h>
15#include <linux/llc.h>
16#include <linux/module.h>
17#include <linux/in.h>
18#include <linux/rcupdate.h>
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -030019#include <linux/cpumask.h>
Jesse Grossccb13522011-10-25 19:26:31 -070020#include <linux/if_arp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070021#include <linux/ip.h>
22#include <linux/ipv6.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070023#include <linux/mpls.h>
Joe Stringera175a722013-08-22 12:30:48 -070024#include <linux/sctp.h>
Pravin B Shelare298e502013-10-29 17:22:21 -070025#include <linux/smp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070026#include <linux/tcp.h>
27#include <linux/udp.h>
28#include <linux/icmp.h>
29#include <linux/icmpv6.h>
30#include <linux/rculist.h>
31#include <net/ip.h>
Pravin B Shelar7d5437c2013-06-17 17:50:18 -070032#include <net/ip_tunnels.h>
Jesse Grossccb13522011-10-25 19:26:31 -070033#include <net/ipv6.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070034#include <net/mpls.h>
Jesse Grossccb13522011-10-25 19:26:31 -070035#include <net/ndisc.h>
Yi Yangb2d0f5d2017-11-07 21:07:02 +080036#include <net/nsh.h>
Jesse Grossccb13522011-10-25 19:26:31 -070037
Pravin B Shelara581b962015-08-29 17:44:08 -070038#include "conntrack.h"
Pravin B Shelar83c8df22014-09-15 19:20:31 -070039#include "datapath.h"
40#include "flow.h"
41#include "flow_netlink.h"
Pravin B Shelara581b962015-08-29 17:44:08 -070042#include "vport.h"
Pravin B Shelar83c8df22014-09-15 19:20:31 -070043
Pravin B Shelare6445712013-10-03 18:16:47 -070044u64 ovs_flow_used_time(unsigned long flow_jiffies)
Andy Zhou03f0d912013-08-07 20:01:00 -070045{
Arnd Bergmann311af512017-11-27 12:41:38 +010046 struct timespec64 cur_ts;
Pravin B Shelare6445712013-10-03 18:16:47 -070047 u64 cur_ms, idle_ms;
Andy Zhou03f0d912013-08-07 20:01:00 -070048
Arnd Bergmann311af512017-11-27 12:41:38 +010049 ktime_get_ts64(&cur_ts);
Pravin B Shelare6445712013-10-03 18:16:47 -070050 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
Arnd Bergmann311af512017-11-27 12:41:38 +010051 cur_ms = (u64)(u32)cur_ts.tv_sec * MSEC_PER_SEC +
Pravin B Shelare6445712013-10-03 18:16:47 -070052 cur_ts.tv_nsec / NSEC_PER_MSEC;
Andy Zhou03f0d912013-08-07 20:01:00 -070053
Pravin B Shelare6445712013-10-03 18:16:47 -070054 return cur_ms - idle_ms;
Andy Zhou03f0d912013-08-07 20:01:00 -070055}
56
Jarno Rajahalmedf23e9f2013-10-23 01:40:44 -070057#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
Andy Zhou03f0d912013-08-07 20:01:00 -070058
Ben Pfaffad552002014-05-06 16:48:38 -070059void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
Thomas Graf12eb18f2014-11-06 06:58:52 -080060 const struct sk_buff *skb)
Andy Zhou5828cd92013-08-27 13:02:21 -070061{
Pablo Neira Ayusoaef833c2019-07-19 18:20:13 +020062 struct sw_flow_stats *stats;
Tonghao Zhangc4b2bf62017-07-17 23:28:06 -070063 unsigned int cpu = smp_processor_id();
Jiri Pirkodf8a39d2015-01-13 17:13:44 +010064 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
Andy Zhou5828cd92013-08-27 13:02:21 -070065
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -030066 stats = rcu_dereference(flow->stats[cpu]);
Pravin B Shelare298e502013-10-29 17:22:21 -070067
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -030068 /* Check if already have CPU-specific stats. */
Jarno Rajahalme63e79592014-03-27 12:42:54 -070069 if (likely(stats)) {
70 spin_lock(&stats->lock);
71 /* Mark if we write on the pre-allocated stats. */
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -030072 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
73 flow->stats_last_writer = cpu;
Jarno Rajahalme63e79592014-03-27 12:42:54 -070074 } else {
75 stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
76 spin_lock(&stats->lock);
77
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -030078 /* If the current CPU is the only writer on the
Jarno Rajahalme63e79592014-03-27 12:42:54 -070079 * pre-allocated stats keep using them.
80 */
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -030081 if (unlikely(flow->stats_last_writer != cpu)) {
Jarno Rajahalme63e79592014-03-27 12:42:54 -070082 /* A previous locker may have already allocated the
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -030083 * stats, so we need to check again. If CPU-specific
Jarno Rajahalme63e79592014-03-27 12:42:54 -070084 * stats were already allocated, we update the pre-
85 * allocated stats as we have already locked them.
86 */
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -030087 if (likely(flow->stats_last_writer != -1) &&
88 likely(!rcu_access_pointer(flow->stats[cpu]))) {
89 /* Try to allocate CPU-specific stats. */
Pablo Neira Ayusoaef833c2019-07-19 18:20:13 +020090 struct sw_flow_stats *new_stats;
Jarno Rajahalme63e79592014-03-27 12:42:54 -070091
92 new_stats =
93 kmem_cache_alloc_node(flow_stats_cache,
David Rientjes4167e9b2015-04-14 15:46:55 -070094 GFP_NOWAIT |
95 __GFP_THISNODE |
96 __GFP_NOWARN |
Jarno Rajahalme63e79592014-03-27 12:42:54 -070097 __GFP_NOMEMALLOC,
Tonghao Zhangc57c0542017-07-17 23:28:05 -070098 numa_node_id());
Jarno Rajahalme63e79592014-03-27 12:42:54 -070099 if (likely(new_stats)) {
100 new_stats->used = jiffies;
101 new_stats->packet_count = 1;
Ben Pfaff24cc59d2014-12-31 08:45:46 -0800102 new_stats->byte_count = len;
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700103 new_stats->tcp_flags = tcp_flags;
104 spin_lock_init(&new_stats->lock);
105
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -0300106 rcu_assign_pointer(flow->stats[cpu],
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700107 new_stats);
Tonghao Zhangc4b2bf62017-07-17 23:28:06 -0700108 cpumask_set_cpu(cpu, &flow->cpu_used_mask);
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700109 goto unlock;
110 }
111 }
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -0300112 flow->stats_last_writer = cpu;
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700113 }
114 }
115
Pravin B Shelare298e502013-10-29 17:22:21 -0700116 stats->used = jiffies;
117 stats->packet_count++;
Ben Pfaff24cc59d2014-12-31 08:45:46 -0800118 stats->byte_count += len;
Pravin B Shelare298e502013-10-29 17:22:21 -0700119 stats->tcp_flags |= tcp_flags;
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700120unlock:
Pravin B Shelare298e502013-10-29 17:22:21 -0700121 spin_unlock(&stats->lock);
122}
123
Jarno Rajahalme86ec8db2014-05-05 14:17:28 -0700124/* Must be called with rcu_read_lock or ovs_mutex. */
125void ovs_flow_stats_get(const struct sw_flow *flow,
126 struct ovs_flow_stats *ovs_stats,
Pravin B Shelare298e502013-10-29 17:22:21 -0700127 unsigned long *used, __be16 *tcp_flags)
128{
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -0300129 int cpu;
Pravin B Shelare298e502013-10-29 17:22:21 -0700130
131 *used = 0;
132 *tcp_flags = 0;
133 memset(ovs_stats, 0, sizeof(*ovs_stats));
134
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -0300135 /* We open code this to make sure cpu 0 is always considered */
Tonghao Zhangc4b2bf62017-07-17 23:28:06 -0700136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
Pablo Neira Ayusoaef833c2019-07-19 18:20:13 +0200137 struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
Pravin B Shelare298e502013-10-29 17:22:21 -0700138
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700139 if (stats) {
140 /* Local CPU may write on non-local stats, so we must
141 * block bottom-halves here.
142 */
143 spin_lock_bh(&stats->lock);
144 if (!*used || time_after(stats->used, *used))
145 *used = stats->used;
146 *tcp_flags |= stats->tcp_flags;
147 ovs_stats->n_packets += stats->packet_count;
148 ovs_stats->n_bytes += stats->byte_count;
149 spin_unlock_bh(&stats->lock);
150 }
Pravin B Shelare298e502013-10-29 17:22:21 -0700151 }
Pravin B Shelare298e502013-10-29 17:22:21 -0700152}
153
Jarno Rajahalme86ec8db2014-05-05 14:17:28 -0700154/* Called with ovs_mutex. */
Pravin B Shelare298e502013-10-29 17:22:21 -0700155void ovs_flow_stats_clear(struct sw_flow *flow)
156{
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -0300157 int cpu;
Pravin B Shelare298e502013-10-29 17:22:21 -0700158
Thadeu Lima de Souza Cascardodb74a332016-09-15 19:11:53 -0300159 /* We open code this to make sure cpu 0 is always considered */
Tonghao Zhangc4b2bf62017-07-17 23:28:06 -0700160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
Pablo Neira Ayusoaef833c2019-07-19 18:20:13 +0200161 struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
Jarno Rajahalme23dabf82014-03-27 12:35:23 -0700162
Jarno Rajahalme63e79592014-03-27 12:42:54 -0700163 if (stats) {
164 spin_lock_bh(&stats->lock);
165 stats->used = 0;
166 stats->packet_count = 0;
167 stats->byte_count = 0;
168 stats->tcp_flags = 0;
169 spin_unlock_bh(&stats->lock);
170 }
171 }
Andy Zhou03f0d912013-08-07 20:01:00 -0700172}
173
Jesse Grossccb13522011-10-25 19:26:31 -0700174static int check_header(struct sk_buff *skb, int len)
175{
176 if (unlikely(skb->len < len))
177 return -EINVAL;
178 if (unlikely(!pskb_may_pull(skb, len)))
179 return -ENOMEM;
180 return 0;
181}
182
183static bool arphdr_ok(struct sk_buff *skb)
184{
185 return pskb_may_pull(skb, skb_network_offset(skb) +
186 sizeof(struct arp_eth_header));
187}
188
189static int check_iphdr(struct sk_buff *skb)
190{
191 unsigned int nh_ofs = skb_network_offset(skb);
192 unsigned int ip_len;
193 int err;
194
195 err = check_header(skb, nh_ofs + sizeof(struct iphdr));
196 if (unlikely(err))
197 return err;
198
199 ip_len = ip_hdrlen(skb);
200 if (unlikely(ip_len < sizeof(struct iphdr) ||
201 skb->len < nh_ofs + ip_len))
202 return -EINVAL;
203
204 skb_set_transport_header(skb, nh_ofs + ip_len);
205 return 0;
206}
207
208static bool tcphdr_ok(struct sk_buff *skb)
209{
210 int th_ofs = skb_transport_offset(skb);
211 int tcp_len;
212
213 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
214 return false;
215
216 tcp_len = tcp_hdrlen(skb);
217 if (unlikely(tcp_len < sizeof(struct tcphdr) ||
218 skb->len < th_ofs + tcp_len))
219 return false;
220
221 return true;
222}
223
224static bool udphdr_ok(struct sk_buff *skb)
225{
226 return pskb_may_pull(skb, skb_transport_offset(skb) +
227 sizeof(struct udphdr));
228}
229
Joe Stringera175a722013-08-22 12:30:48 -0700230static bool sctphdr_ok(struct sk_buff *skb)
231{
232 return pskb_may_pull(skb, skb_transport_offset(skb) +
233 sizeof(struct sctphdr));
234}
235
Jesse Grossccb13522011-10-25 19:26:31 -0700236static bool icmphdr_ok(struct sk_buff *skb)
237{
238 return pskb_may_pull(skb, skb_transport_offset(skb) +
239 sizeof(struct icmphdr));
240}
241
Andy Zhou03f0d912013-08-07 20:01:00 -0700242static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700243{
Yi-Hung Weifa642f02018-09-04 15:33:41 -0700244 unsigned short frag_off;
245 unsigned int payload_ofs = 0;
Jesse Grossccb13522011-10-25 19:26:31 -0700246 unsigned int nh_ofs = skb_network_offset(skb);
247 unsigned int nh_len;
Jesse Grossccb13522011-10-25 19:26:31 -0700248 struct ipv6hdr *nh;
Yi-Hung Weifa642f02018-09-04 15:33:41 -0700249 int err, nexthdr, flags = 0;
Jesse Grossccb13522011-10-25 19:26:31 -0700250
Jesse Grossccb13522011-10-25 19:26:31 -0700251 err = check_header(skb, nh_ofs + sizeof(*nh));
252 if (unlikely(err))
253 return err;
254
255 nh = ipv6_hdr(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700256
257 key->ip.proto = NEXTHDR_NONE;
258 key->ip.tos = ipv6_get_dsfield(nh);
259 key->ip.ttl = nh->hop_limit;
260 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
261 key->ipv6.addr.src = nh->saddr;
262 key->ipv6.addr.dst = nh->daddr;
263
Yi-Hung Weifa642f02018-09-04 15:33:41 -0700264 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
265 if (flags & IP6_FH_F_FRAG) {
Yi-Hung Wei41e4e2c2019-01-03 09:51:57 -0800266 if (frag_off) {
Jesse Grossccb13522011-10-25 19:26:31 -0700267 key->ip.frag = OVS_FRAG_TYPE_LATER;
Yi-Hung Wei41e4e2c2019-01-03 09:51:57 -0800268 key->ip.proto = nexthdr;
269 return 0;
270 }
271 key->ip.frag = OVS_FRAG_TYPE_FIRST;
Pravin B Shelar25ef1322014-10-17 13:56:31 -0700272 } else {
273 key->ip.frag = OVS_FRAG_TYPE_NONE;
Jesse Grossccb13522011-10-25 19:26:31 -0700274 }
275
Yi-Hung Weifa642f02018-09-04 15:33:41 -0700276 /* Delayed handling of error in ipv6_find_hdr() as it
277 * always sets flags and frag_off to a valid value which may be
Simon Hormanc30da492015-08-29 09:02:21 +0900278 * used to set key->ip.frag above.
279 */
Yi-Hung Weifa642f02018-09-04 15:33:41 -0700280 if (unlikely(nexthdr < 0))
Simon Hormanc30da492015-08-29 09:02:21 +0900281 return -EPROTO;
282
Jesse Grossccb13522011-10-25 19:26:31 -0700283 nh_len = payload_ofs - nh_ofs;
284 skb_set_transport_header(skb, nh_ofs + nh_len);
285 key->ip.proto = nexthdr;
286 return nh_len;
287}
288
289static bool icmp6hdr_ok(struct sk_buff *skb)
290{
291 return pskb_may_pull(skb, skb_transport_offset(skb) +
292 sizeof(struct icmp6hdr));
293}
294
Eric Garver018c1dd2016-09-07 12:56:59 -0400295/**
296 * Parse vlan tag from vlan header.
297 * Returns ERROR on memory error.
298 * Returns 0 if it encounters a non-vlan or incomplete packet.
299 * Returns 1 after successfully parsing vlan tag.
300 */
pravin shelardf30f742016-12-26 08:31:27 -0800301static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
302 bool untag_vlan)
Jesse Grossccb13522011-10-25 19:26:31 -0700303{
Eric Garver018c1dd2016-09-07 12:56:59 -0400304 struct vlan_head *vh = (struct vlan_head *)skb->data;
Jesse Grossccb13522011-10-25 19:26:31 -0700305
Eric Garver018c1dd2016-09-07 12:56:59 -0400306 if (likely(!eth_type_vlan(vh->tpid)))
Jesse Grossccb13522011-10-25 19:26:31 -0700307 return 0;
308
Eric Garver018c1dd2016-09-07 12:56:59 -0400309 if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16)))
310 return 0;
311
312 if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) +
313 sizeof(__be16))))
Jesse Grossccb13522011-10-25 19:26:31 -0700314 return -ENOMEM;
315
Eric Garver018c1dd2016-09-07 12:56:59 -0400316 vh = (struct vlan_head *)skb->data;
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100317 key_vh->tci = vh->tci | htons(VLAN_CFI_MASK);
Eric Garver018c1dd2016-09-07 12:56:59 -0400318 key_vh->tpid = vh->tpid;
319
pravin shelardf30f742016-12-26 08:31:27 -0800320 if (unlikely(untag_vlan)) {
321 int offset = skb->data - skb_mac_header(skb);
322 u16 tci;
323 int err;
324
325 __skb_push(skb, offset);
326 err = __skb_vlan_pop(skb, &tci);
327 __skb_pull(skb, offset);
328 if (err)
329 return err;
330 __vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
331 } else {
332 __skb_pull(skb, sizeof(struct vlan_head));
333 }
Eric Garver018c1dd2016-09-07 12:56:59 -0400334 return 1;
335}
336
Jiri Benc5108bba2016-11-10 16:28:21 +0100337static void clear_vlan(struct sw_flow_key *key)
Eric Garver018c1dd2016-09-07 12:56:59 -0400338{
Eric Garver018c1dd2016-09-07 12:56:59 -0400339 key->eth.vlan.tci = 0;
340 key->eth.vlan.tpid = 0;
341 key->eth.cvlan.tci = 0;
342 key->eth.cvlan.tpid = 0;
Jiri Benc5108bba2016-11-10 16:28:21 +0100343}
344
345static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
346{
347 int res;
Eric Garver018c1dd2016-09-07 12:56:59 -0400348
Jiri Benc20ecf1e2016-10-10 17:02:42 +0200349 if (skb_vlan_tag_present(skb)) {
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100350 key->eth.vlan.tci = htons(skb->vlan_tci) | htons(VLAN_CFI_MASK);
Eric Garver018c1dd2016-09-07 12:56:59 -0400351 key->eth.vlan.tpid = skb->vlan_proto;
352 } else {
353 /* Parse outer vlan tag in the non-accelerated case. */
pravin shelardf30f742016-12-26 08:31:27 -0800354 res = parse_vlan_tag(skb, &key->eth.vlan, true);
Eric Garver018c1dd2016-09-07 12:56:59 -0400355 if (res <= 0)
356 return res;
357 }
358
359 /* Parse inner vlan tag. */
pravin shelardf30f742016-12-26 08:31:27 -0800360 res = parse_vlan_tag(skb, &key->eth.cvlan, false);
Eric Garver018c1dd2016-09-07 12:56:59 -0400361 if (res <= 0)
362 return res;
Jesse Grossccb13522011-10-25 19:26:31 -0700363
364 return 0;
365}
366
367static __be16 parse_ethertype(struct sk_buff *skb)
368{
369 struct llc_snap_hdr {
370 u8 dsap; /* Always 0xAA */
371 u8 ssap; /* Always 0xAA */
372 u8 ctrl;
373 u8 oui[3];
374 __be16 ethertype;
375 };
376 struct llc_snap_hdr *llc;
377 __be16 proto;
378
379 proto = *(__be16 *) skb->data;
380 __skb_pull(skb, sizeof(__be16));
381
Alexander Duyck6713fc92015-05-04 14:34:05 -0700382 if (eth_proto_is_802_3(proto))
Jesse Grossccb13522011-10-25 19:26:31 -0700383 return proto;
384
385 if (skb->len < sizeof(struct llc_snap_hdr))
386 return htons(ETH_P_802_2);
387
388 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
389 return htons(0);
390
391 llc = (struct llc_snap_hdr *) skb->data;
392 if (llc->dsap != LLC_SAP_SNAP ||
393 llc->ssap != LLC_SAP_SNAP ||
394 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
395 return htons(ETH_P_802_2);
396
397 __skb_pull(skb, sizeof(struct llc_snap_hdr));
Rich Lane17b682a2013-02-19 11:10:30 -0800398
Alexander Duyck6713fc92015-05-04 14:34:05 -0700399 if (eth_proto_is_802_3(llc->ethertype))
Rich Lane17b682a2013-02-19 11:10:30 -0800400 return llc->ethertype;
401
402 return htons(ETH_P_802_2);
Jesse Grossccb13522011-10-25 19:26:31 -0700403}
404
405static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
Andy Zhou03f0d912013-08-07 20:01:00 -0700406 int nh_len)
Jesse Grossccb13522011-10-25 19:26:31 -0700407{
408 struct icmp6hdr *icmp = icmp6_hdr(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700409
410 /* The ICMPv6 type and code fields use the 16-bit transport port
411 * fields, so we need to store them in 16-bit network byte order.
412 */
Jarno Rajahalme1139e242014-05-05 09:54:49 -0700413 key->tp.src = htons(icmp->icmp6_type);
414 key->tp.dst = htons(icmp->icmp6_code);
Pravin B Shelar25ef1322014-10-17 13:56:31 -0700415 memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
Jesse Grossccb13522011-10-25 19:26:31 -0700416
417 if (icmp->icmp6_code == 0 &&
418 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
419 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
420 int icmp_len = skb->len - skb_transport_offset(skb);
421 struct nd_msg *nd;
422 int offset;
423
Jesse Grossccb13522011-10-25 19:26:31 -0700424 /* In order to process neighbor discovery options, we need the
425 * entire packet.
426 */
427 if (unlikely(icmp_len < sizeof(*nd)))
Andy Zhou03f0d912013-08-07 20:01:00 -0700428 return 0;
429
430 if (unlikely(skb_linearize(skb)))
431 return -ENOMEM;
Jesse Grossccb13522011-10-25 19:26:31 -0700432
433 nd = (struct nd_msg *)skb_transport_header(skb);
434 key->ipv6.nd.target = nd->target;
Jesse Grossccb13522011-10-25 19:26:31 -0700435
436 icmp_len -= sizeof(*nd);
437 offset = 0;
438 while (icmp_len >= 8) {
439 struct nd_opt_hdr *nd_opt =
440 (struct nd_opt_hdr *)(nd->opt + offset);
441 int opt_len = nd_opt->nd_opt_len * 8;
442
443 if (unlikely(!opt_len || opt_len > icmp_len))
Andy Zhou03f0d912013-08-07 20:01:00 -0700444 return 0;
Jesse Grossccb13522011-10-25 19:26:31 -0700445
446 /* Store the link layer address if the appropriate
447 * option is provided. It is considered an error if
448 * the same link layer option is specified twice.
449 */
450 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
451 && opt_len == 8) {
452 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
453 goto invalid;
Joe Perches8c63ff02014-02-18 11:15:45 -0800454 ether_addr_copy(key->ipv6.nd.sll,
455 &nd->opt[offset+sizeof(*nd_opt)]);
Jesse Grossccb13522011-10-25 19:26:31 -0700456 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
457 && opt_len == 8) {
458 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
459 goto invalid;
Joe Perches8c63ff02014-02-18 11:15:45 -0800460 ether_addr_copy(key->ipv6.nd.tll,
461 &nd->opt[offset+sizeof(*nd_opt)]);
Jesse Grossccb13522011-10-25 19:26:31 -0700462 }
463
464 icmp_len -= opt_len;
465 offset += opt_len;
466 }
467 }
468
Andy Zhou03f0d912013-08-07 20:01:00 -0700469 return 0;
Jesse Grossccb13522011-10-25 19:26:31 -0700470
471invalid:
472 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
473 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
474 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
475
Andy Zhou03f0d912013-08-07 20:01:00 -0700476 return 0;
Jesse Grossccb13522011-10-25 19:26:31 -0700477}
478
Yi Yangb2d0f5d2017-11-07 21:07:02 +0800479static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
480{
481 struct nshhdr *nh;
482 unsigned int nh_ofs = skb_network_offset(skb);
483 u8 version, length;
484 int err;
485
486 err = check_header(skb, nh_ofs + NSH_BASE_HDR_LEN);
487 if (unlikely(err))
488 return err;
489
490 nh = nsh_hdr(skb);
491 version = nsh_get_ver(nh);
492 length = nsh_hdr_len(nh);
493
494 if (version != 0)
495 return -EINVAL;
496
497 err = check_header(skb, nh_ofs + length);
498 if (unlikely(err))
499 return err;
500
501 nh = nsh_hdr(skb);
502 key->nsh.base.flags = nsh_get_flags(nh);
503 key->nsh.base.ttl = nsh_get_ttl(nh);
504 key->nsh.base.mdtype = nh->mdtype;
505 key->nsh.base.np = nh->np;
506 key->nsh.base.path_hdr = nh->path_hdr;
507 switch (key->nsh.base.mdtype) {
508 case NSH_M_TYPE1:
509 if (length != NSH_M_TYPE1_LEN)
510 return -EINVAL;
511 memcpy(key->nsh.context, nh->md1.context,
512 sizeof(nh->md1));
513 break;
514 case NSH_M_TYPE2:
515 memset(key->nsh.context, 0,
516 sizeof(nh->md1));
517 break;
518 default:
519 return -EINVAL;
520 }
521
522 return 0;
523}
524
Jesse Grossccb13522011-10-25 19:26:31 -0700525/**
Greg Rosead06a562019-08-27 07:58:09 -0700526 * key_extract_l3l4 - extracts L3/L4 header information.
Jesse Grossccb13522011-10-25 19:26:31 -0700527 * @skb: sk_buff that contains the frame, with skb->data pointing to the
Greg Rosead06a562019-08-27 07:58:09 -0700528 * L3 header
Jesse Grossccb13522011-10-25 19:26:31 -0700529 * @key: output flow key
Jesse Grossccb13522011-10-25 19:26:31 -0700530 *
Jesse Grossccb13522011-10-25 19:26:31 -0700531 */
Greg Rosead06a562019-08-27 07:58:09 -0700532static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700533{
Andy Zhou03f0d912013-08-07 20:01:00 -0700534 int error;
Jesse Grossccb13522011-10-25 19:26:31 -0700535
536 /* Network layer. */
537 if (key->eth.type == htons(ETH_P_IP)) {
538 struct iphdr *nh;
539 __be16 offset;
540
Jesse Grossccb13522011-10-25 19:26:31 -0700541 error = check_iphdr(skb);
542 if (unlikely(error)) {
Jesse Gross07148122014-10-03 15:35:29 -0700543 memset(&key->ip, 0, sizeof(key->ip));
544 memset(&key->ipv4, 0, sizeof(key->ipv4));
Jesse Grossccb13522011-10-25 19:26:31 -0700545 if (error == -EINVAL) {
546 skb->transport_header = skb->network_header;
547 error = 0;
548 }
Andy Zhou03f0d912013-08-07 20:01:00 -0700549 return error;
Jesse Grossccb13522011-10-25 19:26:31 -0700550 }
551
552 nh = ip_hdr(skb);
553 key->ipv4.addr.src = nh->saddr;
554 key->ipv4.addr.dst = nh->daddr;
555
556 key->ip.proto = nh->protocol;
557 key->ip.tos = nh->tos;
558 key->ip.ttl = nh->ttl;
559
560 offset = nh->frag_off & htons(IP_OFFSET);
561 if (offset) {
562 key->ip.frag = OVS_FRAG_TYPE_LATER;
Justin Pettit0754b4e2019-08-27 07:58:10 -0700563 memset(&key->tp, 0, sizeof(key->tp));
Andy Zhou03f0d912013-08-07 20:01:00 -0700564 return 0;
Jesse Grossccb13522011-10-25 19:26:31 -0700565 }
Willem de Bruijn0c19f8462017-11-21 10:22:25 -0500566 if (nh->frag_off & htons(IP_MF) ||
567 skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
Jesse Grossccb13522011-10-25 19:26:31 -0700568 key->ip.frag = OVS_FRAG_TYPE_FIRST;
Jesse Gross07148122014-10-03 15:35:29 -0700569 else
570 key->ip.frag = OVS_FRAG_TYPE_NONE;
Jesse Grossccb13522011-10-25 19:26:31 -0700571
572 /* Transport layer. */
573 if (key->ip.proto == IPPROTO_TCP) {
Jesse Grossccb13522011-10-25 19:26:31 -0700574 if (tcphdr_ok(skb)) {
575 struct tcphdr *tcp = tcp_hdr(skb);
Jarno Rajahalme1139e242014-05-05 09:54:49 -0700576 key->tp.src = tcp->source;
577 key->tp.dst = tcp->dest;
578 key->tp.flags = TCP_FLAGS_BE16(tcp);
Jesse Gross07148122014-10-03 15:35:29 -0700579 } else {
580 memset(&key->tp, 0, sizeof(key->tp));
Jesse Grossccb13522011-10-25 19:26:31 -0700581 }
Jesse Gross07148122014-10-03 15:35:29 -0700582
Jesse Grossccb13522011-10-25 19:26:31 -0700583 } else if (key->ip.proto == IPPROTO_UDP) {
Jesse Grossccb13522011-10-25 19:26:31 -0700584 if (udphdr_ok(skb)) {
585 struct udphdr *udp = udp_hdr(skb);
Jarno Rajahalme1139e242014-05-05 09:54:49 -0700586 key->tp.src = udp->source;
587 key->tp.dst = udp->dest;
Jesse Gross07148122014-10-03 15:35:29 -0700588 } else {
589 memset(&key->tp, 0, sizeof(key->tp));
Jesse Grossccb13522011-10-25 19:26:31 -0700590 }
Joe Stringera175a722013-08-22 12:30:48 -0700591 } else if (key->ip.proto == IPPROTO_SCTP) {
592 if (sctphdr_ok(skb)) {
593 struct sctphdr *sctp = sctp_hdr(skb);
Jarno Rajahalme1139e242014-05-05 09:54:49 -0700594 key->tp.src = sctp->source;
595 key->tp.dst = sctp->dest;
Jesse Gross07148122014-10-03 15:35:29 -0700596 } else {
597 memset(&key->tp, 0, sizeof(key->tp));
Joe Stringera175a722013-08-22 12:30:48 -0700598 }
Jesse Grossccb13522011-10-25 19:26:31 -0700599 } else if (key->ip.proto == IPPROTO_ICMP) {
Jesse Grossccb13522011-10-25 19:26:31 -0700600 if (icmphdr_ok(skb)) {
601 struct icmphdr *icmp = icmp_hdr(skb);
602 /* The ICMP type and code fields use the 16-bit
603 * transport port fields, so we need to store
604 * them in 16-bit network byte order. */
Jarno Rajahalme1139e242014-05-05 09:54:49 -0700605 key->tp.src = htons(icmp->type);
606 key->tp.dst = htons(icmp->code);
Jesse Gross07148122014-10-03 15:35:29 -0700607 } else {
608 memset(&key->tp, 0, sizeof(key->tp));
Jesse Grossccb13522011-10-25 19:26:31 -0700609 }
610 }
611
Jesse Gross07148122014-10-03 15:35:29 -0700612 } else if (key->eth.type == htons(ETH_P_ARP) ||
613 key->eth.type == htons(ETH_P_RARP)) {
Jesse Grossccb13522011-10-25 19:26:31 -0700614 struct arp_eth_header *arp;
Li RongQing389f4892014-10-17 14:03:08 +0800615 bool arp_available = arphdr_ok(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700616
617 arp = (struct arp_eth_header *)skb_network_header(skb);
618
Li RongQing389f4892014-10-17 14:03:08 +0800619 if (arp_available &&
Jesse Gross07148122014-10-03 15:35:29 -0700620 arp->ar_hrd == htons(ARPHRD_ETHER) &&
621 arp->ar_pro == htons(ETH_P_IP) &&
622 arp->ar_hln == ETH_ALEN &&
623 arp->ar_pln == 4) {
Jesse Grossccb13522011-10-25 19:26:31 -0700624
625 /* We only match on the lower 8 bits of the opcode. */
626 if (ntohs(arp->ar_op) <= 0xff)
627 key->ip.proto = ntohs(arp->ar_op);
Jesse Gross07148122014-10-03 15:35:29 -0700628 else
629 key->ip.proto = 0;
630
Mehak Mahajand04d3822012-10-30 15:50:28 -0700631 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
632 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
Joe Perches8c63ff02014-02-18 11:15:45 -0800633 ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
634 ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
Jesse Gross07148122014-10-03 15:35:29 -0700635 } else {
636 memset(&key->ip, 0, sizeof(key->ip));
637 memset(&key->ipv4, 0, sizeof(key->ipv4));
Jesse Grossccb13522011-10-25 19:26:31 -0700638 }
Simon Horman25cd9ba2014-10-06 05:05:13 -0700639 } else if (eth_p_mpls(key->eth.type)) {
640 size_t stack_len = MPLS_HLEN;
641
Jiri Bencf7d49bc2016-09-30 19:08:05 +0200642 skb_set_inner_network_header(skb, skb->mac_len);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700643 while (1) {
644 __be32 lse;
645
646 error = check_header(skb, skb->mac_len + stack_len);
647 if (unlikely(error))
648 return 0;
649
Jiri Bencf7d49bc2016-09-30 19:08:05 +0200650 memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700651
652 if (stack_len == MPLS_HLEN)
653 memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN);
654
Jiri Bencf7d49bc2016-09-30 19:08:05 +0200655 skb_set_inner_network_header(skb, skb->mac_len + stack_len);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700656 if (lse & htonl(MPLS_LS_S_MASK))
657 break;
658
659 stack_len += MPLS_HLEN;
660 }
Jesse Grossccb13522011-10-25 19:26:31 -0700661 } else if (key->eth.type == htons(ETH_P_IPV6)) {
662 int nh_len; /* IPv6 Header + Extensions */
663
Andy Zhou03f0d912013-08-07 20:01:00 -0700664 nh_len = parse_ipv6hdr(skb, key);
Jesse Grossccb13522011-10-25 19:26:31 -0700665 if (unlikely(nh_len < 0)) {
Simon Hormanc30da492015-08-29 09:02:21 +0900666 switch (nh_len) {
667 case -EINVAL:
668 memset(&key->ip, 0, sizeof(key->ip));
669 memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
670 /* fall-through */
671 case -EPROTO:
Jesse Grossccb13522011-10-25 19:26:31 -0700672 skb->transport_header = skb->network_header;
Andy Zhou03f0d912013-08-07 20:01:00 -0700673 error = 0;
Simon Hormanc30da492015-08-29 09:02:21 +0900674 break;
675 default:
Jesse Grossccb13522011-10-25 19:26:31 -0700676 error = nh_len;
Andy Zhou03f0d912013-08-07 20:01:00 -0700677 }
678 return error;
Jesse Grossccb13522011-10-25 19:26:31 -0700679 }
680
Justin Pettit0754b4e2019-08-27 07:58:10 -0700681 if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
682 memset(&key->tp, 0, sizeof(key->tp));
Andy Zhou03f0d912013-08-07 20:01:00 -0700683 return 0;
Justin Pettit0754b4e2019-08-27 07:58:10 -0700684 }
Willem de Bruijn0c19f8462017-11-21 10:22:25 -0500685 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
686 key->ip.frag = OVS_FRAG_TYPE_FIRST;
687
Jesse Grossccb13522011-10-25 19:26:31 -0700688 /* Transport layer. */
689 if (key->ip.proto == NEXTHDR_TCP) {
Jesse Grossccb13522011-10-25 19:26:31 -0700690 if (tcphdr_ok(skb)) {
691 struct tcphdr *tcp = tcp_hdr(skb);
Jarno Rajahalme1139e242014-05-05 09:54:49 -0700692 key->tp.src = tcp->source;
693 key->tp.dst = tcp->dest;
694 key->tp.flags = TCP_FLAGS_BE16(tcp);
Jesse Gross07148122014-10-03 15:35:29 -0700695 } else {
696 memset(&key->tp, 0, sizeof(key->tp));
Jesse Grossccb13522011-10-25 19:26:31 -0700697 }
698 } else if (key->ip.proto == NEXTHDR_UDP) {
Jesse Grossccb13522011-10-25 19:26:31 -0700699 if (udphdr_ok(skb)) {
700 struct udphdr *udp = udp_hdr(skb);
Jarno Rajahalme1139e242014-05-05 09:54:49 -0700701 key->tp.src = udp->source;
702 key->tp.dst = udp->dest;
Jesse Gross07148122014-10-03 15:35:29 -0700703 } else {
704 memset(&key->tp, 0, sizeof(key->tp));
Jesse Grossccb13522011-10-25 19:26:31 -0700705 }
Joe Stringera175a722013-08-22 12:30:48 -0700706 } else if (key->ip.proto == NEXTHDR_SCTP) {
707 if (sctphdr_ok(skb)) {
708 struct sctphdr *sctp = sctp_hdr(skb);
Jarno Rajahalme1139e242014-05-05 09:54:49 -0700709 key->tp.src = sctp->source;
710 key->tp.dst = sctp->dest;
Jesse Gross07148122014-10-03 15:35:29 -0700711 } else {
712 memset(&key->tp, 0, sizeof(key->tp));
Joe Stringera175a722013-08-22 12:30:48 -0700713 }
Jesse Grossccb13522011-10-25 19:26:31 -0700714 } else if (key->ip.proto == NEXTHDR_ICMP) {
Jesse Grossccb13522011-10-25 19:26:31 -0700715 if (icmp6hdr_ok(skb)) {
Andy Zhou03f0d912013-08-07 20:01:00 -0700716 error = parse_icmpv6(skb, key, nh_len);
717 if (error)
718 return error;
Jesse Gross07148122014-10-03 15:35:29 -0700719 } else {
720 memset(&key->tp, 0, sizeof(key->tp));
Jesse Grossccb13522011-10-25 19:26:31 -0700721 }
722 }
Yi Yangb2d0f5d2017-11-07 21:07:02 +0800723 } else if (key->eth.type == htons(ETH_P_NSH)) {
724 error = parse_nsh(skb, key);
725 if (error)
726 return error;
Jesse Grossccb13522011-10-25 19:26:31 -0700727 }
Andy Zhou03f0d912013-08-07 20:01:00 -0700728 return 0;
Jesse Grossccb13522011-10-25 19:26:31 -0700729}
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700730
Greg Rosead06a562019-08-27 07:58:09 -0700731/**
732 * key_extract - extracts a flow key from an Ethernet frame.
733 * @skb: sk_buff that contains the frame, with skb->data pointing to the
734 * Ethernet header
735 * @key: output flow key
736 *
737 * The caller must ensure that skb->len >= ETH_HLEN.
738 *
739 * Returns 0 if successful, otherwise a negative errno value.
740 *
741 * Initializes @skb header fields as follows:
742 *
743 * - skb->mac_header: the L2 header.
744 *
745 * - skb->network_header: just past the L2 header, or just past the
746 * VLAN header, to the first byte of the L2 payload.
747 *
748 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
749 * on output, then just past the IP header, if one is present and
750 * of a correct length, otherwise the same as skb->network_header.
751 * For other key->eth.type values it is left untouched.
752 *
753 * - skb->protocol: the type of the data starting at skb->network_header.
754 * Equals to key->eth.type.
755 */
756static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
757{
758 struct ethhdr *eth;
759
760 /* Flags are always used as part of stats */
761 key->tp.flags = 0;
762
763 skb_reset_mac_header(skb);
764
765 /* Link layer. */
766 clear_vlan(key);
767 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
768 if (unlikely(eth_type_vlan(skb->protocol)))
769 return -EINVAL;
770
771 skb_reset_network_header(skb);
772 key->eth.type = skb->protocol;
773 } else {
774 eth = eth_hdr(skb);
775 ether_addr_copy(key->eth.src, eth->h_source);
776 ether_addr_copy(key->eth.dst, eth->h_dest);
777
778 __skb_pull(skb, 2 * ETH_ALEN);
779 /* We are going to push all headers that we pull, so no need to
780 * update skb->csum here.
781 */
782
783 if (unlikely(parse_vlan(skb, key)))
784 return -ENOMEM;
785
786 key->eth.type = parse_ethertype(skb);
787 if (unlikely(key->eth.type == htons(0)))
788 return -ENOMEM;
789
790 /* Multiple tagged packets need to retain TPID to satisfy
791 * skb_vlan_pop(), which will later shift the ethertype into
792 * skb->protocol.
793 */
794 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
795 skb->protocol = key->eth.cvlan.tpid;
796 else
797 skb->protocol = key->eth.type;
798
799 skb_reset_network_header(skb);
800 __skb_push(skb, skb->data - skb_mac_header(skb));
801 }
802
803 skb_reset_mac_len(skb);
804
805 /* Fill out L3/L4 key info, if any */
806 return key_extract_l3l4(skb, key);
807}
808
809/* In the case of conntrack fragment handling it expects L3 headers,
810 * add a helper.
811 */
812int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
813{
814 return key_extract_l3l4(skb, key);
815}
816
Andy Zhou971427f32014-09-15 19:37:25 -0700817int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
818{
Yi-Hung Wei6f56f612017-03-30 12:36:03 -0700819 int res;
820
821 res = key_extract(skb, key);
822 if (!res)
823 key->mac_proto &= ~SW_FLOW_KEY_INVALID;
824
825 return res;
Andy Zhou971427f32014-09-15 19:37:25 -0700826}
827
Jiri Benc5108bba2016-11-10 16:28:21 +0100828static int key_extract_mac_proto(struct sk_buff *skb)
829{
830 switch (skb->dev->type) {
831 case ARPHRD_ETHER:
832 return MAC_PROTO_ETHERNET;
833 case ARPHRD_NONE:
834 if (skb->protocol == htons(ETH_P_TEB))
835 return MAC_PROTO_ETHERNET;
836 return MAC_PROTO_NONE;
837 }
838 WARN_ON_ONCE(1);
839 return -EINVAL;
840}
841
Thomas Graf1d8fff92015-07-21 10:43:54 +0200842int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
Pravin B Shelar8c8b1b82014-09-15 19:28:44 -0700843 struct sk_buff *skb, struct sw_flow_key *key)
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700844{
Paul Blakey95a72332019-09-04 16:56:37 +0300845#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
846 struct tc_skb_ext *tc_ext;
847#endif
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800848 int res, err;
Jiri Benc5108bba2016-11-10 16:28:21 +0100849
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700850 /* Extract metadata from packet. */
Jesse Grossf5796682014-10-03 15:35:33 -0700851 if (tun_info) {
Jiri Benc00a93ba2015-10-05 13:09:46 +0200852 key->tun_proto = ip_tunnel_info_af(tun_info);
Thomas Graf1d8fff92015-07-21 10:43:54 +0200853 memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
Jesse Grossf5796682014-10-03 15:35:33 -0700854
Pravin B Shelar4c222792015-08-30 18:09:38 -0700855 if (tun_info->options_len) {
Jesse Grossf5796682014-10-03 15:35:33 -0700856 BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
857 8)) - 1
858 > sizeof(key->tun_opts));
Pravin B Shelar4c222792015-08-30 18:09:38 -0700859
860 ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len),
861 tun_info);
Jesse Grossf5796682014-10-03 15:35:33 -0700862 key->tun_opts_len = tun_info->options_len;
863 } else {
864 key->tun_opts_len = 0;
865 }
866 } else {
Jiri Benc00a93ba2015-10-05 13:09:46 +0200867 key->tun_proto = 0;
Jesse Grossf5796682014-10-03 15:35:33 -0700868 key->tun_opts_len = 0;
Jesse Gross07148122014-10-03 15:35:29 -0700869 memset(&key->tun_key, 0, sizeof(key->tun_key));
Jesse Grossf5796682014-10-03 15:35:33 -0700870 }
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700871
872 key->phy.priority = skb->priority;
873 key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
874 key->phy.skb_mark = skb->mark;
Jesse Gross07148122014-10-03 15:35:29 -0700875 key->ovs_flow_hash = 0;
Jiri Benc5108bba2016-11-10 16:28:21 +0100876 res = key_extract_mac_proto(skb);
877 if (res < 0)
878 return res;
879 key->mac_proto = res;
Paul Blakey95a72332019-09-04 16:56:37 +0300880
881#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
882 if (static_branch_unlikely(&tc_recirc_sharing_support)) {
883 tc_ext = skb_ext_find(skb, TC_SKB_EXT);
884 key->recirc_id = tc_ext ? tc_ext->chain : 0;
885 } else {
886 key->recirc_id = 0;
887 }
888#else
Jesse Gross07148122014-10-03 15:35:29 -0700889 key->recirc_id = 0;
Paul Blakey95a72332019-09-04 16:56:37 +0300890#endif
Jesse Gross07148122014-10-03 15:35:29 -0700891
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800892 err = key_extract(skb, key);
893 if (!err)
894 ovs_ct_fill_key(skb, key); /* Must be after key_extract(). */
895 return err;
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700896}
897
Joe Stringerc2ac6672015-08-26 11:31:52 -0700898int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700899 struct sk_buff *skb,
Jarno Rajahalme05da5892014-11-06 07:03:05 -0800900 struct sw_flow_key *key, bool log)
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700901{
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800902 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
903 u64 attrs = 0;
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700904 int err;
905
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800906 err = parse_flow_nlattrs(attr, a, &attrs, log);
907 if (err)
908 return -EINVAL;
909
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700910 /* Extract metadata from netlink attributes. */
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800911 err = ovs_nla_get_flow_metadata(net, a, attrs, key, log);
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700912 if (err)
913 return err;
914
pravin shelardf30f742016-12-26 08:31:27 -0800915 /* key_extract assumes that skb->protocol is set-up for
916 * layer 3 packets which is the case for other callers,
917 * in particular packets received from the network stack.
918 * Here the correct value can be set from the metadata
919 * extracted above.
920 * For L2 packet key eth type would be zero. skb protocol
921 * would be set to correct value later during key-extact.
922 */
Jiri Benc5108bba2016-11-10 16:28:21 +0100923
pravin shelardf30f742016-12-26 08:31:27 -0800924 skb->protocol = key->eth.type;
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -0800925 err = key_extract(skb, key);
926 if (err)
927 return err;
928
929 /* Check that we have conntrack original direction tuple metadata only
930 * for packets for which it makes sense. Otherwise the key may be
931 * corrupted due to overlapping key fields.
932 */
933 if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4) &&
934 key->eth.type != htons(ETH_P_IP))
935 return -EINVAL;
936 if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6) &&
937 (key->eth.type != htons(ETH_P_IPV6) ||
938 sw_flow_key_is_nd(key)))
939 return -EINVAL;
940
941 return 0;
Pravin B Shelar83c8df22014-09-15 19:20:31 -0700942}