blob: fd340893a27cd8e48331801429ef01ea68475129 [file] [log] [blame]
Thomas Gleixnerc9422992019-05-29 07:12:43 -07001// SPDX-License-Identifier: GPL-2.0-only
Jesse Grossccb13522011-10-25 19:26:31 -07002/*
andy zhou4572ef52017-03-20 16:32:28 -07003 * Copyright (c) 2007-2017 Nicira, Inc.
Jesse Grossccb13522011-10-25 19:26:31 -07004 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/skbuff.h>
9#include <linux/in.h>
10#include <linux/ip.h>
11#include <linux/openvswitch.h>
Joe Stringera175a722013-08-22 12:30:48 -070012#include <linux/sctp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070013#include <linux/tcp.h>
14#include <linux/udp.h>
15#include <linux/in6.h>
16#include <linux/if_arp.h>
17#include <linux/if_vlan.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070018
Joe Stringer7f8a4362015-08-26 11:31:48 -070019#include <net/dst.h>
Jesse Grossccb13522011-10-25 19:26:31 -070020#include <net/ip.h>
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -080021#include <net/ipv6.h>
Joe Stringer7b85b4d2015-08-27 15:25:46 -070022#include <net/ip6_fib.h>
Jesse Grossccb13522011-10-25 19:26:31 -070023#include <net/checksum.h>
24#include <net/dsfield.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070025#include <net/mpls.h>
Joe Stringera175a722013-08-22 12:30:48 -070026#include <net/sctp/checksum.h>
Jesse Grossccb13522011-10-25 19:26:31 -070027
28#include "datapath.h"
Andy Zhou971427f32014-09-15 19:37:25 -070029#include "flow.h"
Joe Stringer7f8a4362015-08-26 11:31:48 -070030#include "conntrack.h"
Jesse Grossccb13522011-10-25 19:26:31 -070031#include "vport.h"
Yi Yangb2d0f5d2017-11-07 21:07:02 +080032#include "flow_netlink.h"
Jesse Grossccb13522011-10-25 19:26:31 -070033
Andy Zhou971427f32014-09-15 19:37:25 -070034struct deferred_action {
35 struct sk_buff *skb;
36 const struct nlattr *actions;
andy zhou47c697a2017-03-20 16:32:27 -070037 int actions_len;
Andy Zhou971427f32014-09-15 19:37:25 -070038
39 /* Store pkt_key clone when creating deferred action. */
40 struct sw_flow_key pkt_key;
41};
42
Joe Stringer7f8a4362015-08-26 11:31:48 -070043#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
44struct ovs_frag_data {
45 unsigned long dst;
46 struct vport *vport;
47 struct ovs_skb_cb cb;
48 __be16 inner_protocol;
Jiri Bencc66549f2016-10-05 15:01:57 +020049 u16 network_offset; /* valid only for MPLS */
50 u16 vlan_tci;
Joe Stringer7f8a4362015-08-26 11:31:48 -070051 __be16 vlan_proto;
52 unsigned int l2_len;
Jiri Bence2d9d832016-11-10 16:28:19 +010053 u8 mac_proto;
Joe Stringer7f8a4362015-08-26 11:31:48 -070054 u8 l2_data[MAX_L2_LEN];
55};
56
57static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
58
Andy Zhou971427f32014-09-15 19:37:25 -070059#define DEFERRED_ACTION_FIFO_SIZE 10
Lance Richardson2679d042016-09-13 10:08:54 -040060#define OVS_RECURSION_LIMIT 5
61#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
Andy Zhou971427f32014-09-15 19:37:25 -070062struct action_fifo {
63 int head;
64 int tail;
65 /* Deferred action fifo queue storage. */
66 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
67};
68
andy zhou4572ef52017-03-20 16:32:28 -070069struct action_flow_keys {
Lance Richardson2679d042016-09-13 10:08:54 -040070 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
71};
72
Andy Zhou971427f32014-09-15 19:37:25 -070073static struct action_fifo __percpu *action_fifos;
andy zhou4572ef52017-03-20 16:32:28 -070074static struct action_flow_keys __percpu *flow_keys;
Andy Zhou971427f32014-09-15 19:37:25 -070075static DEFINE_PER_CPU(int, exec_actions_level);
76
andy zhou4572ef52017-03-20 16:32:28 -070077/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
78 * space. Return NULL if out of key spaces.
79 */
80static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
81{
82 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
83 int level = this_cpu_read(exec_actions_level);
84 struct sw_flow_key *key = NULL;
85
86 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
87 key = &keys->key[level - 1];
88 *key = *key_;
89 }
90
91 return key;
92}
93
Andy Zhou971427f32014-09-15 19:37:25 -070094static void action_fifo_init(struct action_fifo *fifo)
95{
96 fifo->head = 0;
97 fifo->tail = 0;
98}
99
Thomas Graf12eb18f2014-11-06 06:58:52 -0800100static bool action_fifo_is_empty(const struct action_fifo *fifo)
Andy Zhou971427f32014-09-15 19:37:25 -0700101{
102 return (fifo->head == fifo->tail);
103}
104
105static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
106{
107 if (action_fifo_is_empty(fifo))
108 return NULL;
109
110 return &fifo->fifo[fifo->tail++];
111}
112
113static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
114{
115 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
116 return NULL;
117
118 return &fifo->fifo[fifo->head++];
119}
120
121/* Return true if fifo is not full */
122static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
andy zhou47c697a2017-03-20 16:32:27 -0700123 const struct sw_flow_key *key,
124 const struct nlattr *actions,
125 const int actions_len)
Andy Zhou971427f32014-09-15 19:37:25 -0700126{
127 struct action_fifo *fifo;
128 struct deferred_action *da;
129
130 fifo = this_cpu_ptr(action_fifos);
131 da = action_fifo_put(fifo);
132 if (da) {
133 da->skb = skb;
andy zhou47c697a2017-03-20 16:32:27 -0700134 da->actions = actions;
135 da->actions_len = actions_len;
Andy Zhou971427f32014-09-15 19:37:25 -0700136 da->pkt_key = *key;
137 }
138
139 return da;
140}
141
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800142static void invalidate_flow_key(struct sw_flow_key *key)
143{
Jiri Benc329f45b2016-11-10 16:28:18 +0100144 key->mac_proto |= SW_FLOW_KEY_INVALID;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800145}
146
147static bool is_flow_key_valid(const struct sw_flow_key *key)
148{
Jiri Benc329f45b2016-11-10 16:28:18 +0100149 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800150}
151
andy zhoubef7f752017-03-20 16:32:30 -0700152static int clone_execute(struct datapath *dp, struct sk_buff *skb,
153 struct sw_flow_key *key,
154 u32 recirc_id,
155 const struct nlattr *actions, int len,
156 bool last, bool clone_flow_key);
157
Numan Siddique4d5ec892019-03-26 06:13:46 +0530158static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
159 struct sw_flow_key *key,
160 const struct nlattr *attr, int len);
161
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800162static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
Martin Varghesef66b53f2019-12-21 08:50:46 +0530163 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700164{
John Hurley8822e272019-07-07 15:01:54 +0100165 int err;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700166
Martin Varghesef66b53f2019-12-21 08:50:46 +0530167 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
John Hurley8822e272019-07-07 15:01:54 +0100168 if (err)
169 return err;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700170
Martin Varghesef66b53f2019-12-21 08:50:46 +0530171 if (!mac_len)
172 key->mac_proto = MAC_PROTO_NONE;
173
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800174 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700175 return 0;
176}
177
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800178static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
179 const __be16 ethertype)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700180{
Simon Horman25cd9ba2014-10-06 05:05:13 -0700181 int err;
182
Martin Varghese040b5cf2019-12-02 10:49:51 +0530183 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
184 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
John Hurleyed246ce2019-07-07 15:01:55 +0100185 if (err)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700186 return err;
187
Martin Varghesef66b53f2019-12-21 08:50:46 +0530188 if (ethertype == htons(ETH_P_TEB))
189 key->mac_proto = MAC_PROTO_ETHERNET;
190
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800191 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700192 return 0;
193}
194
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800195static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
196 const __be32 *mpls_lse, const __be32 *mask)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700197{
Jiri Benc85de4a22016-09-30 19:08:07 +0200198 struct mpls_shim_hdr *stack;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800199 __be32 lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700200 int err;
201
Jiri Benc85de4a22016-09-30 19:08:07 +0200202 stack = mpls_hdr(skb);
203 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
John Hurleyd27cf5c2019-07-07 15:01:56 +0100204 err = skb_mpls_update_lse(skb, lse);
205 if (err)
206 return err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800207
Martin Varghesefbdcdd72019-11-04 07:27:44 +0530208 flow_key->mpls.lse[0] = lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700209 return 0;
210}
211
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800212static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700213{
Jesse Grossccb13522011-10-25 19:26:31 -0700214 int err;
215
Jiri Pirko93515d52014-11-19 14:05:02 +0100216 err = skb_vlan_pop(skb);
Eric Garver018c1dd2016-09-07 12:56:59 -0400217 if (skb_vlan_tag_present(skb)) {
Jiri Pirko93515d52014-11-19 14:05:02 +0100218 invalidate_flow_key(key);
Eric Garver018c1dd2016-09-07 12:56:59 -0400219 } else {
220 key->eth.vlan.tci = 0;
221 key->eth.vlan.tpid = 0;
222 }
Jiri Pirko93515d52014-11-19 14:05:02 +0100223 return err;
Jesse Grossccb13522011-10-25 19:26:31 -0700224}
225
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800226static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
227 const struct ovs_action_push_vlan *vlan)
Jesse Grossccb13522011-10-25 19:26:31 -0700228{
Eric Garver018c1dd2016-09-07 12:56:59 -0400229 if (skb_vlan_tag_present(skb)) {
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800230 invalidate_flow_key(key);
Eric Garver018c1dd2016-09-07 12:56:59 -0400231 } else {
232 key->eth.vlan.tci = vlan->vlan_tci;
233 key->eth.vlan.tpid = vlan->vlan_tpid;
234 }
Jiri Pirko93515d52014-11-19 14:05:02 +0100235 return skb_vlan_push(skb, vlan->vlan_tpid,
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100236 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
Jesse Grossccb13522011-10-25 19:26:31 -0700237}
238
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800239/* 'src' is already properly masked. */
240static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
241{
242 u16 *dst = (u16 *)dst_;
243 const u16 *src = (const u16 *)src_;
244 const u16 *mask = (const u16 *)mask_;
245
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700246 OVS_SET_MASKED(dst[0], src[0], mask[0]);
247 OVS_SET_MASKED(dst[1], src[1], mask[1]);
248 OVS_SET_MASKED(dst[2], src[2], mask[2]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800249}
250
251static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
252 const struct ovs_key_ethernet *key,
253 const struct ovs_key_ethernet *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700254{
255 int err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800256
Jiri Pirkoe2195122014-11-19 14:05:01 +0100257 err = skb_ensure_writable(skb, ETH_HLEN);
Jesse Grossccb13522011-10-25 19:26:31 -0700258 if (unlikely(err))
259 return err;
260
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700261 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
262
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800263 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
264 mask->eth_src);
265 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
266 mask->eth_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700267
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100268 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700269
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800270 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
271 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
Jesse Grossccb13522011-10-25 19:26:31 -0700272 return 0;
273}
274
Jiri Benc91820da2016-11-10 16:28:23 +0100275/* pop_eth does not support VLAN packets as this action is never called
276 * for them.
277 */
278static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
279{
280 skb_pull_rcsum(skb, ETH_HLEN);
281 skb_reset_mac_header(skb);
282 skb_reset_mac_len(skb);
283
284 /* safe right before invalidate_flow_key */
285 key->mac_proto = MAC_PROTO_NONE;
286 invalidate_flow_key(key);
287 return 0;
288}
289
290static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
291 const struct ovs_action_push_eth *ethh)
292{
293 struct ethhdr *hdr;
294
295 /* Add the new Ethernet header */
296 if (skb_cow_head(skb, ETH_HLEN) < 0)
297 return -ENOMEM;
298
299 skb_push(skb, ETH_HLEN);
300 skb_reset_mac_header(skb);
301 skb_reset_mac_len(skb);
302
303 hdr = eth_hdr(skb);
304 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
305 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
306 hdr->h_proto = skb->protocol;
307
308 skb_postpush_rcsum(skb, hdr, ETH_HLEN);
309
310 /* safe right before invalidate_flow_key */
311 key->mac_proto = MAC_PROTO_ETHERNET;
312 invalidate_flow_key(key);
313 return 0;
314}
315
Yi Yangb2d0f5d2017-11-07 21:07:02 +0800316static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
317 const struct nshhdr *nh)
318{
319 int err;
320
321 err = nsh_push(skb, nh);
322 if (err)
323 return err;
324
325 /* safe right before invalidate_flow_key */
326 key->mac_proto = MAC_PROTO_NONE;
327 invalidate_flow_key(key);
328 return 0;
329}
330
331static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
332{
333 int err;
334
335 err = nsh_pop(skb);
336 if (err)
337 return err;
338
339 /* safe right before invalidate_flow_key */
340 if (skb->protocol == htons(ETH_P_TEB))
341 key->mac_proto = MAC_PROTO_ETHERNET;
342 else
343 key->mac_proto = MAC_PROTO_NONE;
344 invalidate_flow_key(key);
345 return 0;
346}
347
Glenn Griffin3576fd72015-08-03 09:56:54 -0700348static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
349 __be32 addr, __be32 new_addr)
Jesse Grossccb13522011-10-25 19:26:31 -0700350{
351 int transport_len = skb->len - skb_transport_offset(skb);
352
Glenn Griffin3576fd72015-08-03 09:56:54 -0700353 if (nh->frag_off & htons(IP_OFFSET))
354 return;
355
Jesse Grossccb13522011-10-25 19:26:31 -0700356 if (nh->protocol == IPPROTO_TCP) {
357 if (likely(transport_len >= sizeof(struct tcphdr)))
358 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700359 addr, new_addr, true);
Jesse Grossccb13522011-10-25 19:26:31 -0700360 } else if (nh->protocol == IPPROTO_UDP) {
Jesse Gross81e5d412012-03-06 15:05:46 -0800361 if (likely(transport_len >= sizeof(struct udphdr))) {
362 struct udphdr *uh = udp_hdr(skb);
363
364 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
365 inet_proto_csum_replace4(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700366 addr, new_addr, true);
Jesse Gross81e5d412012-03-06 15:05:46 -0800367 if (!uh->check)
368 uh->check = CSUM_MANGLED_0;
369 }
370 }
Jesse Grossccb13522011-10-25 19:26:31 -0700371 }
Glenn Griffin3576fd72015-08-03 09:56:54 -0700372}
Jesse Grossccb13522011-10-25 19:26:31 -0700373
Glenn Griffin3576fd72015-08-03 09:56:54 -0700374static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
375 __be32 *addr, __be32 new_addr)
376{
377 update_ip_l4_checksum(skb, nh, *addr, new_addr);
Jesse Grossccb13522011-10-25 19:26:31 -0700378 csum_replace4(&nh->check, *addr, new_addr);
Tom Herbert7539fad2013-12-15 22:12:18 -0800379 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700380 *addr = new_addr;
381}
382
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800383static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
384 __be32 addr[4], const __be32 new_addr[4])
385{
386 int transport_len = skb->len - skb_transport_offset(skb);
387
Jesse Gross856447d2014-11-11 14:32:20 -0800388 if (l4_proto == NEXTHDR_TCP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800389 if (likely(transport_len >= sizeof(struct tcphdr)))
390 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700391 addr, new_addr, true);
Jesse Gross856447d2014-11-11 14:32:20 -0800392 } else if (l4_proto == NEXTHDR_UDP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800393 if (likely(transport_len >= sizeof(struct udphdr))) {
394 struct udphdr *uh = udp_hdr(skb);
395
396 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
397 inet_proto_csum_replace16(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700398 addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800399 if (!uh->check)
400 uh->check = CSUM_MANGLED_0;
401 }
402 }
Jesse Gross856447d2014-11-11 14:32:20 -0800403 } else if (l4_proto == NEXTHDR_ICMP) {
404 if (likely(transport_len >= sizeof(struct icmp6hdr)))
405 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
Tom Herbert4b048d62015-08-17 13:42:25 -0700406 skb, addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800407 }
408}
409
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800410static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
411 const __be32 mask[4], __be32 masked[4])
412{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700413 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
414 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
415 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
416 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800417}
418
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800419static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
420 __be32 addr[4], const __be32 new_addr[4],
421 bool recalculate_csum)
422{
423 if (recalculate_csum)
424 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
425
Tom Herbert7539fad2013-12-15 22:12:18 -0800426 skb_clear_hash(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800427 memcpy(addr, new_addr, sizeof(__be32[4]));
428}
429
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800430static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800431{
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800432 /* Bits 21-24 are always unmasked, so this retains their values. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700433 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
434 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
435 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800436}
437
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800438static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
439 u8 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800440{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700441 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800442
Jesse Grossccb13522011-10-25 19:26:31 -0700443 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
444 nh->ttl = new_ttl;
445}
446
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800447static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
448 const struct ovs_key_ipv4 *key,
449 const struct ovs_key_ipv4 *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700450{
451 struct iphdr *nh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800452 __be32 new_addr;
Jesse Grossccb13522011-10-25 19:26:31 -0700453 int err;
454
Jiri Pirkoe2195122014-11-19 14:05:01 +0100455 err = skb_ensure_writable(skb, skb_network_offset(skb) +
456 sizeof(struct iphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700457 if (unlikely(err))
458 return err;
459
460 nh = ip_hdr(skb);
461
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800462 /* Setting an IP addresses is typically only a side effect of
463 * matching on them in the current userspace implementation, so it
464 * makes sense to check if the value actually changed.
465 */
466 if (mask->ipv4_src) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700467 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
Jesse Grossccb13522011-10-25 19:26:31 -0700468
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800469 if (unlikely(new_addr != nh->saddr)) {
470 set_ip_addr(skb, nh, &nh->saddr, new_addr);
471 flow_key->ipv4.addr.src = new_addr;
472 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800473 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800474 if (mask->ipv4_dst) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700475 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700476
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800477 if (unlikely(new_addr != nh->daddr)) {
478 set_ip_addr(skb, nh, &nh->daddr, new_addr);
479 flow_key->ipv4.addr.dst = new_addr;
480 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800481 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800482 if (mask->ipv4_tos) {
483 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
484 flow_key->ip.tos = nh->tos;
485 }
486 if (mask->ipv4_ttl) {
487 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
488 flow_key->ip.ttl = nh->ttl;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800489 }
Jesse Grossccb13522011-10-25 19:26:31 -0700490
491 return 0;
492}
493
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800494static bool is_ipv6_mask_nonzero(const __be32 addr[4])
495{
496 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
497}
498
499static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
500 const struct ovs_key_ipv6 *key,
501 const struct ovs_key_ipv6 *mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800502{
503 struct ipv6hdr *nh;
504 int err;
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800505
Jiri Pirkoe2195122014-11-19 14:05:01 +0100506 err = skb_ensure_writable(skb, skb_network_offset(skb) +
507 sizeof(struct ipv6hdr));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800508 if (unlikely(err))
509 return err;
510
511 nh = ipv6_hdr(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800512
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800513 /* Setting an IP addresses is typically only a side effect of
514 * matching on them in the current userspace implementation, so it
515 * makes sense to check if the value actually changed.
516 */
517 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
518 __be32 *saddr = (__be32 *)&nh->saddr;
519 __be32 masked[4];
520
521 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
522
523 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
Simon Hormanb4f70522016-04-21 11:49:15 +1000524 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800525 true);
526 memcpy(&flow_key->ipv6.addr.src, masked,
527 sizeof(flow_key->ipv6.addr.src));
528 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800529 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800530 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800531 unsigned int offset = 0;
532 int flags = IP6_FH_F_SKIP_RH;
533 bool recalc_csum = true;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800534 __be32 *daddr = (__be32 *)&nh->daddr;
535 __be32 masked[4];
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800536
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800537 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800538
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800539 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
540 if (ipv6_ext_hdr(nh->nexthdr))
541 recalc_csum = (ipv6_find_hdr(skb, &offset,
542 NEXTHDR_ROUTING,
543 NULL, &flags)
544 != NEXTHDR_ROUTING);
545
Simon Hormanb4f70522016-04-21 11:49:15 +1000546 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800547 recalc_csum);
548 memcpy(&flow_key->ipv6.addr.dst, masked,
549 sizeof(flow_key->ipv6.addr.dst));
550 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800551 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800552 if (mask->ipv6_tclass) {
553 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
554 flow_key->ip.tos = ipv6_get_dsfield(nh);
555 }
556 if (mask->ipv6_label) {
557 set_ipv6_fl(nh, ntohl(key->ipv6_label),
558 ntohl(mask->ipv6_label));
559 flow_key->ipv6.label =
560 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
561 }
562 if (mask->ipv6_hlimit) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700563 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
564 mask->ipv6_hlimit);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800565 flow_key->ip.ttl = nh->hop_limit;
566 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800567 return 0;
568}
569
Yi Yangb2d0f5d2017-11-07 21:07:02 +0800570static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
571 const struct nlattr *a)
572{
573 struct nshhdr *nh;
574 size_t length;
575 int err;
576 u8 flags;
577 u8 ttl;
578 int i;
579
580 struct ovs_key_nsh key;
581 struct ovs_key_nsh mask;
582
583 err = nsh_key_from_nlattr(a, &key, &mask);
584 if (err)
585 return err;
586
587 /* Make sure the NSH base header is there */
588 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
589 return -ENOMEM;
590
591 nh = nsh_hdr(skb);
592 length = nsh_hdr_len(nh);
593
594 /* Make sure the whole NSH header is there */
595 err = skb_ensure_writable(skb, skb_network_offset(skb) +
596 length);
597 if (unlikely(err))
598 return err;
599
600 nh = nsh_hdr(skb);
601 skb_postpull_rcsum(skb, nh, length);
602 flags = nsh_get_flags(nh);
603 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
604 flow_key->nsh.base.flags = flags;
605 ttl = nsh_get_ttl(nh);
606 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
607 flow_key->nsh.base.ttl = ttl;
608 nsh_set_flags_and_ttl(nh, flags, ttl);
609 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
610 mask.base.path_hdr);
611 flow_key->nsh.base.path_hdr = nh->path_hdr;
612 switch (nh->mdtype) {
613 case NSH_M_TYPE1:
614 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
615 nh->md1.context[i] =
616 OVS_MASKED(nh->md1.context[i], key.context[i],
617 mask.context[i]);
618 }
619 memcpy(flow_key->nsh.context, nh->md1.context,
620 sizeof(nh->md1.context));
621 break;
622 case NSH_M_TYPE2:
623 memset(flow_key->nsh.context, 0,
624 sizeof(flow_key->nsh.context));
625 break;
626 default:
627 return -EINVAL;
628 }
629 skb_postpush_rcsum(skb, nh, length);
630 return 0;
631}
632
Jiri Pirkoe2195122014-11-19 14:05:01 +0100633/* Must follow skb_ensure_writable() since that can move the skb data. */
Jesse Grossccb13522011-10-25 19:26:31 -0700634static void set_tp_port(struct sk_buff *skb, __be16 *port,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800635 __be16 new_port, __sum16 *check)
Jesse Grossccb13522011-10-25 19:26:31 -0700636{
Tom Herbert4b048d62015-08-17 13:42:25 -0700637 inet_proto_csum_replace2(check, skb, *port, new_port, false);
Jesse Grossccb13522011-10-25 19:26:31 -0700638 *port = new_port;
Jesse Grossccb13522011-10-25 19:26:31 -0700639}
640
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800641static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
642 const struct ovs_key_udp *key,
643 const struct ovs_key_udp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700644{
645 struct udphdr *uh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800646 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700647 int err;
648
Jiri Pirkoe2195122014-11-19 14:05:01 +0100649 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
650 sizeof(struct udphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700651 if (unlikely(err))
652 return err;
653
654 uh = udp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800655 /* Either of the masks is non-zero, so do not bother checking them. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700656 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
657 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800658
659 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
660 if (likely(src != uh->source)) {
661 set_tp_port(skb, &uh->source, src, &uh->check);
662 flow_key->tp.src = src;
663 }
664 if (likely(dst != uh->dest)) {
665 set_tp_port(skb, &uh->dest, dst, &uh->check);
666 flow_key->tp.dst = dst;
667 }
668
669 if (unlikely(!uh->check))
670 uh->check = CSUM_MANGLED_0;
671 } else {
672 uh->source = src;
673 uh->dest = dst;
674 flow_key->tp.src = src;
675 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800676 }
Jesse Grossccb13522011-10-25 19:26:31 -0700677
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800678 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700679
680 return 0;
681}
682
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800683static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
684 const struct ovs_key_tcp *key,
685 const struct ovs_key_tcp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700686{
687 struct tcphdr *th;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800688 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700689 int err;
690
Jiri Pirkoe2195122014-11-19 14:05:01 +0100691 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
692 sizeof(struct tcphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700693 if (unlikely(err))
694 return err;
695
696 th = tcp_hdr(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700697 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800698 if (likely(src != th->source)) {
699 set_tp_port(skb, &th->source, src, &th->check);
700 flow_key->tp.src = src;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800701 }
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700702 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800703 if (likely(dst != th->dest)) {
704 set_tp_port(skb, &th->dest, dst, &th->check);
705 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800706 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800707 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700708
709 return 0;
710}
711
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800712static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
713 const struct ovs_key_sctp *key,
714 const struct ovs_key_sctp *mask)
Joe Stringera175a722013-08-22 12:30:48 -0700715{
Joe Stringera175a722013-08-22 12:30:48 -0700716 unsigned int sctphoff = skb_transport_offset(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800717 struct sctphdr *sh;
718 __le32 old_correct_csum, new_csum, old_csum;
719 int err;
Joe Stringera175a722013-08-22 12:30:48 -0700720
Jiri Pirkoe2195122014-11-19 14:05:01 +0100721 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
Joe Stringera175a722013-08-22 12:30:48 -0700722 if (unlikely(err))
723 return err;
724
725 sh = sctp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800726 old_csum = sh->checksum;
727 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700728
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700729 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
730 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
Joe Stringera175a722013-08-22 12:30:48 -0700731
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800732 new_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700733
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800734 /* Carry any checksum errors through. */
735 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
Joe Stringera175a722013-08-22 12:30:48 -0700736
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800737 skb_clear_hash(skb);
738 flow_key->tp.src = sh->source;
739 flow_key->tp.dst = sh->dest;
Joe Stringera175a722013-08-22 12:30:48 -0700740
741 return 0;
742}
743
Eric W. Biederman188515f2015-09-14 20:08:51 -0500744static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700745{
746 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
747 struct vport *vport = data->vport;
748
749 if (skb_cow_head(skb, data->l2_len) < 0) {
750 kfree_skb(skb);
751 return -ENOMEM;
752 }
753
754 __skb_dst_copy(skb, data->dst);
755 *OVS_CB(skb) = data->cb;
756 skb->inner_protocol = data->inner_protocol;
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100757 if (data->vlan_tci & VLAN_CFI_MASK)
758 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
759 else
760 __vlan_hwaccel_clear_tag(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700761
762 /* Reconstruct the MAC header. */
763 skb_push(skb, data->l2_len);
764 memcpy(skb->data, &data->l2_data, data->l2_len);
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100765 skb_postpush_rcsum(skb, skb->data, data->l2_len);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700766 skb_reset_mac_header(skb);
767
Jiri Bencc66549f2016-10-05 15:01:57 +0200768 if (eth_p_mpls(skb->protocol)) {
769 skb->inner_network_header = skb->network_header;
770 skb_set_network_header(skb, data->network_offset);
771 skb_reset_mac_len(skb);
772 }
773
Jiri Bence2d9d832016-11-10 16:28:19 +0100774 ovs_vport_send(vport, skb, data->mac_proto);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700775 return 0;
776}
777
778static unsigned int
779ovs_dst_get_mtu(const struct dst_entry *dst)
780{
781 return dst->dev->mtu;
782}
783
784static struct dst_ops ovs_dst_ops = {
785 .family = AF_UNSPEC,
786 .mtu = ovs_dst_get_mtu,
787};
788
789/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
790 * ovs_vport_output(), which is called once per fragmented packet.
791 */
Jiri Bencc66549f2016-10-05 15:01:57 +0200792static void prepare_frag(struct vport *vport, struct sk_buff *skb,
Jiri Bence2d9d832016-11-10 16:28:19 +0100793 u16 orig_network_offset, u8 mac_proto)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700794{
795 unsigned int hlen = skb_network_offset(skb);
796 struct ovs_frag_data *data;
797
798 data = this_cpu_ptr(&ovs_frag_data_storage);
799 data->dst = skb->_skb_refdst;
800 data->vport = vport;
801 data->cb = *OVS_CB(skb);
802 data->inner_protocol = skb->inner_protocol;
Jiri Bencc66549f2016-10-05 15:01:57 +0200803 data->network_offset = orig_network_offset;
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100804 if (skb_vlan_tag_present(skb))
805 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
806 else
807 data->vlan_tci = 0;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700808 data->vlan_proto = skb->vlan_proto;
Jiri Bence2d9d832016-11-10 16:28:19 +0100809 data->mac_proto = mac_proto;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700810 data->l2_len = hlen;
811 memcpy(&data->l2_data, skb->data, hlen);
812
813 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
814 skb_pull(skb, hlen);
815}
816
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500817static void ovs_fragment(struct net *net, struct vport *vport,
Jiri Bence2d9d832016-11-10 16:28:19 +0100818 struct sk_buff *skb, u16 mru,
819 struct sw_flow_key *key)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700820{
Jiri Bencc66549f2016-10-05 15:01:57 +0200821 u16 orig_network_offset = 0;
822
823 if (eth_p_mpls(skb->protocol)) {
824 orig_network_offset = skb_network_offset(skb);
825 skb->network_header = skb->inner_network_header;
826 }
827
Joe Stringer7f8a4362015-08-26 11:31:48 -0700828 if (skb_network_offset(skb) > MAX_L2_LEN) {
829 OVS_NLERR(1, "L2 header too long to fragment");
Joe Stringerb8f22572015-10-06 10:59:57 -0700830 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700831 }
832
Jiri Bence2d9d832016-11-10 16:28:19 +0100833 if (key->eth.type == htons(ETH_P_IP)) {
Joe Stringer7f8a4362015-08-26 11:31:48 -0700834 struct dst_entry ovs_dst;
835 unsigned long orig_dst;
836
Jiri Bence2d9d832016-11-10 16:28:19 +0100837 prepare_frag(vport, skb, orig_network_offset,
838 ovs_key_mac_proto(key));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700839 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
840 DST_OBSOLETE_NONE, DST_NOCOUNT);
841 ovs_dst.dev = vport->dev;
842
843 orig_dst = skb->_skb_refdst;
844 skb_dst_set_noref(skb, &ovs_dst);
845 IPCB(skb)->frag_max_size = mru;
846
Eric W. Biederman694869b2015-06-12 21:55:31 -0500847 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700848 refdst_drop(orig_dst);
Jiri Bence2d9d832016-11-10 16:28:19 +0100849 } else if (key->eth.type == htons(ETH_P_IPV6)) {
Joe Stringer7f8a4362015-08-26 11:31:48 -0700850 unsigned long orig_dst;
851 struct rt6_info ovs_rt;
852
Jiri Bence2d9d832016-11-10 16:28:19 +0100853 prepare_frag(vport, skb, orig_network_offset,
854 ovs_key_mac_proto(key));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700855 memset(&ovs_rt, 0, sizeof(ovs_rt));
856 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
857 DST_OBSOLETE_NONE, DST_NOCOUNT);
858 ovs_rt.dst.dev = vport->dev;
859
860 orig_dst = skb->_skb_refdst;
861 skb_dst_set_noref(skb, &ovs_rt.dst);
862 IP6CB(skb)->frag_max_size = mru;
863
wenxua7c978c2020-08-28 23:14:32 +0800864 ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700865 refdst_drop(orig_dst);
866 } else {
867 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
Jiri Bence2d9d832016-11-10 16:28:19 +0100868 ovs_vport_name(vport), ntohs(key->eth.type), mru,
Joe Stringer7f8a4362015-08-26 11:31:48 -0700869 vport->dev->mtu);
Joe Stringerb8f22572015-10-06 10:59:57 -0700870 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700871 }
Joe Stringerb8f22572015-10-06 10:59:57 -0700872
873 return;
874err:
875 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700876}
877
878static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
879 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700880{
Andy Zhou738967b2014-09-08 00:35:02 -0700881 struct vport *vport = ovs_vport_rcu(dp, out_port);
Jesse Grossccb13522011-10-25 19:26:31 -0700882
Joe Stringer7f8a4362015-08-26 11:31:48 -0700883 if (likely(vport)) {
884 u16 mru = OVS_CB(skb)->mru;
William Tuf2a4d082016-06-10 11:49:33 -0700885 u32 cutlen = OVS_CB(skb)->cutlen;
886
887 if (unlikely(cutlen > 0)) {
Jiri Bence2d9d832016-11-10 16:28:19 +0100888 if (skb->len - cutlen > ovs_mac_header_len(key))
William Tuf2a4d082016-06-10 11:49:33 -0700889 pskb_trim(skb, skb->len - cutlen);
890 else
Jiri Bence2d9d832016-11-10 16:28:19 +0100891 pskb_trim(skb, ovs_mac_header_len(key));
William Tuf2a4d082016-06-10 11:49:33 -0700892 }
Joe Stringer7f8a4362015-08-26 11:31:48 -0700893
Jiri Benc738314a2016-11-10 16:28:17 +0100894 if (likely(!mru ||
895 (skb->len <= mru + vport->dev->hard_header_len))) {
Jiri Bence2d9d832016-11-10 16:28:19 +0100896 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700897 } else if (mru <= vport->dev->mtu) {
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500898 struct net *net = read_pnet(&dp->net);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700899
Jiri Bence2d9d832016-11-10 16:28:19 +0100900 ovs_fragment(net, vport, skb, mru, key);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700901 } else {
902 kfree_skb(skb);
903 }
904 } else {
Jesse Grossccb13522011-10-25 19:26:31 -0700905 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700906 }
Jesse Grossccb13522011-10-25 19:26:31 -0700907}
908
909static int output_userspace(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700910 struct sw_flow_key *key, const struct nlattr *attr,
William Tuf2a4d082016-06-10 11:49:33 -0700911 const struct nlattr *actions, int actions_len,
912 uint32_t cutlen)
Jesse Grossccb13522011-10-25 19:26:31 -0700913{
914 struct dp_upcall_info upcall;
915 const struct nlattr *a;
916 int rem;
917
Neil McKeeccea7442015-05-26 20:59:43 -0700918 memset(&upcall, 0, sizeof(upcall));
Jesse Grossccb13522011-10-25 19:26:31 -0700919 upcall.cmd = OVS_PACKET_CMD_ACTION;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700920 upcall.mru = OVS_CB(skb)->mru;
Jesse Grossccb13522011-10-25 19:26:31 -0700921
922 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
923 a = nla_next(a, &rem)) {
924 switch (nla_type(a)) {
925 case OVS_USERSPACE_ATTR_USERDATA:
926 upcall.userdata = a;
927 break;
928
929 case OVS_USERSPACE_ATTR_PID:
Eric W. Biederman15e47302012-09-07 20:12:54 +0000930 upcall.portid = nla_get_u32(a);
Jesse Grossccb13522011-10-25 19:26:31 -0700931 break;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800932
933 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
934 /* Get out tunnel info. */
935 struct vport *vport;
936
937 vport = ovs_vport_rcu(dp, nla_get_u32(a));
938 if (vport) {
939 int err;
940
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700941 err = dev_fill_metadata_dst(vport->dev, skb);
942 if (!err)
943 upcall.egress_tun_info = skb_tunnel_info(skb);
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800944 }
Pravin B Shelar4c222792015-08-30 18:09:38 -0700945
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800946 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700947 }
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800948
Neil McKeeccea7442015-05-26 20:59:43 -0700949 case OVS_USERSPACE_ATTR_ACTIONS: {
950 /* Include actions. */
951 upcall.actions = actions;
952 upcall.actions_len = actions_len;
953 break;
954 }
955
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800956 } /* End of switch. */
Jesse Grossccb13522011-10-25 19:26:31 -0700957 }
958
William Tuf2a4d082016-06-10 11:49:33 -0700959 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
Jesse Grossccb13522011-10-25 19:26:31 -0700960}
961
Matteo Croce744676e2020-02-15 14:20:56 +0100962static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
963 struct sw_flow_key *key,
964 const struct nlattr *attr, bool last)
965{
966 /* The first action is always 'OVS_DEC_TTL_ATTR_ARG'. */
967 struct nlattr *dec_ttl_arg = nla_data(attr);
968 int rem = nla_len(attr);
969
970 if (nla_len(dec_ttl_arg)) {
971 struct nlattr *actions = nla_next(dec_ttl_arg, &rem);
972
973 if (actions)
974 return clone_execute(dp, skb, key, 0, actions, rem,
975 last, false);
976 }
977 consume_skb(skb);
978 return 0;
979}
980
andy zhou798c1662017-03-20 16:32:29 -0700981/* When 'last' is true, sample() should always consume the 'skb'.
982 * Otherwise, sample() should keep 'skb' intact regardless what
983 * actions are executed within sample().
984 */
Jesse Grossccb13522011-10-25 19:26:31 -0700985static int sample(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700986 struct sw_flow_key *key, const struct nlattr *attr,
andy zhou798c1662017-03-20 16:32:29 -0700987 bool last)
Jesse Grossccb13522011-10-25 19:26:31 -0700988{
andy zhou798c1662017-03-20 16:32:29 -0700989 struct nlattr *actions;
990 struct nlattr *sample_arg;
andy zhou798c1662017-03-20 16:32:29 -0700991 int rem = nla_len(attr);
andy zhou798c1662017-03-20 16:32:29 -0700992 const struct sample_arg *arg;
andy zhoubef7f752017-03-20 16:32:30 -0700993 bool clone_flow_key;
Jesse Grossccb13522011-10-25 19:26:31 -0700994
andy zhou798c1662017-03-20 16:32:29 -0700995 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
996 sample_arg = nla_data(attr);
997 arg = nla_data(sample_arg);
998 actions = nla_next(sample_arg, &rem);
Wenyu Zhange05176a2015-08-05 00:30:47 -0700999
andy zhou798c1662017-03-20 16:32:29 -07001000 if ((arg->probability != U32_MAX) &&
1001 (!arg->probability || prandom_u32() > arg->probability)) {
1002 if (last)
1003 consume_skb(skb);
1004 return 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001005 }
1006
andy zhoubef7f752017-03-20 16:32:30 -07001007 clone_flow_key = !arg->exec;
1008 return clone_execute(dp, skb, key, 0, actions, rem, last,
1009 clone_flow_key);
Andy Zhou971427f32014-09-15 19:37:25 -07001010}
1011
Yifeng Sunb2335042018-07-02 08:18:03 -07001012/* When 'last' is true, clone() should always consume the 'skb'.
1013 * Otherwise, clone() should keep 'skb' intact regardless what
1014 * actions are executed within clone().
1015 */
1016static int clone(struct datapath *dp, struct sk_buff *skb,
1017 struct sw_flow_key *key, const struct nlattr *attr,
1018 bool last)
1019{
1020 struct nlattr *actions;
1021 struct nlattr *clone_arg;
1022 int rem = nla_len(attr);
1023 bool dont_clone_flow_key;
1024
1025 /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
1026 clone_arg = nla_data(attr);
1027 dont_clone_flow_key = nla_get_u32(clone_arg);
1028 actions = nla_next(clone_arg, &rem);
1029
1030 return clone_execute(dp, skb, key, 0, actions, rem, last,
1031 !dont_clone_flow_key);
1032}
1033
Andy Zhou971427f32014-09-15 19:37:25 -07001034static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1035 const struct nlattr *attr)
1036{
1037 struct ovs_action_hash *hash_act = nla_data(attr);
1038 u32 hash = 0;
1039
1040 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1041 hash = skb_get_hash(skb);
1042 hash = jhash_1word(hash, hash_act->hash_basis);
1043 if (!hash)
1044 hash = 0x1;
1045
1046 key->ovs_flow_hash = hash;
Jesse Grossccb13522011-10-25 19:26:31 -07001047}
1048
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001049static int execute_set_action(struct sk_buff *skb,
1050 struct sw_flow_key *flow_key,
1051 const struct nlattr *a)
1052{
1053 /* Only tunnel set execution is supported without a mask. */
1054 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
Thomas Graf34ae9322015-07-21 10:44:03 +02001055 struct ovs_tunnel_info *tun = nla_data(a);
1056
1057 skb_dst_drop(skb);
1058 dst_hold((struct dst_entry *)tun->tun_dst);
1059 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001060 return 0;
1061 }
1062
1063 return -EINVAL;
1064}
1065
1066/* Mask is at the midpoint of the data. */
1067#define get_mask(a, type) ((const type)nla_data(a) + 1)
1068
1069static int execute_masked_set_action(struct sk_buff *skb,
1070 struct sw_flow_key *flow_key,
1071 const struct nlattr *a)
Jesse Grossccb13522011-10-25 19:26:31 -07001072{
1073 int err = 0;
1074
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001075 switch (nla_type(a)) {
Jesse Grossccb13522011-10-25 19:26:31 -07001076 case OVS_KEY_ATTR_PRIORITY:
Joe Stringerbe26b9a2015-08-26 11:31:45 -07001077 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1078 *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001079 flow_key->phy.priority = skb->priority;
Jesse Grossccb13522011-10-25 19:26:31 -07001080 break;
1081
Ansis Atteka39c7caeb2012-11-26 11:24:11 -08001082 case OVS_KEY_ATTR_SKB_MARK:
Joe Stringerbe26b9a2015-08-26 11:31:45 -07001083 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001084 flow_key->phy.skb_mark = skb->mark;
Ansis Atteka39c7caeb2012-11-26 11:24:11 -08001085 break;
1086
Jesse Grossf0b128c2014-10-03 15:35:31 -07001087 case OVS_KEY_ATTR_TUNNEL_INFO:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001088 /* Masked data not supported for tunnel. */
1089 err = -EINVAL;
Pravin B Shelar7d5437c2013-06-17 17:50:18 -07001090 break;
1091
Jesse Grossccb13522011-10-25 19:26:31 -07001092 case OVS_KEY_ATTR_ETHERNET:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001093 err = set_eth_addr(skb, flow_key, nla_data(a),
1094 get_mask(a, struct ovs_key_ethernet *));
Jesse Grossccb13522011-10-25 19:26:31 -07001095 break;
1096
Yi Yangb2d0f5d2017-11-07 21:07:02 +08001097 case OVS_KEY_ATTR_NSH:
1098 err = set_nsh(skb, flow_key, a);
1099 break;
1100
Jesse Grossccb13522011-10-25 19:26:31 -07001101 case OVS_KEY_ATTR_IPV4:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001102 err = set_ipv4(skb, flow_key, nla_data(a),
1103 get_mask(a, struct ovs_key_ipv4 *));
Jesse Grossccb13522011-10-25 19:26:31 -07001104 break;
1105
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -08001106 case OVS_KEY_ATTR_IPV6:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001107 err = set_ipv6(skb, flow_key, nla_data(a),
1108 get_mask(a, struct ovs_key_ipv6 *));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -08001109 break;
1110
Jesse Grossccb13522011-10-25 19:26:31 -07001111 case OVS_KEY_ATTR_TCP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001112 err = set_tcp(skb, flow_key, nla_data(a),
1113 get_mask(a, struct ovs_key_tcp *));
Jesse Grossccb13522011-10-25 19:26:31 -07001114 break;
1115
1116 case OVS_KEY_ATTR_UDP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001117 err = set_udp(skb, flow_key, nla_data(a),
1118 get_mask(a, struct ovs_key_udp *));
Jesse Grossccb13522011-10-25 19:26:31 -07001119 break;
Joe Stringera175a722013-08-22 12:30:48 -07001120
1121 case OVS_KEY_ATTR_SCTP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001122 err = set_sctp(skb, flow_key, nla_data(a),
1123 get_mask(a, struct ovs_key_sctp *));
Joe Stringera175a722013-08-22 12:30:48 -07001124 break;
Simon Horman25cd9ba2014-10-06 05:05:13 -07001125
1126 case OVS_KEY_ATTR_MPLS:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001127 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1128 __be32 *));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001129 break;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001130
1131 case OVS_KEY_ATTR_CT_STATE:
1132 case OVS_KEY_ATTR_CT_ZONE:
Joe Stringer182e3042015-08-26 11:31:49 -07001133 case OVS_KEY_ATTR_CT_MARK:
Joe Stringer33db4122015-10-01 15:00:37 -07001134 case OVS_KEY_ATTR_CT_LABELS:
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -08001135 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1136 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
Joe Stringer7f8a4362015-08-26 11:31:48 -07001137 err = -EINVAL;
1138 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001139 }
1140
1141 return err;
1142}
1143
Andy Zhou971427f32014-09-15 19:37:25 -07001144static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1145 struct sw_flow_key *key,
andy zhoubef7f752017-03-20 16:32:30 -07001146 const struct nlattr *a, bool last)
Andy Zhou971427f32014-09-15 19:37:25 -07001147{
andy zhoubef7f752017-03-20 16:32:30 -07001148 u32 recirc_id;
Andy Zhou971427f32014-09-15 19:37:25 -07001149
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001150 if (!is_flow_key_valid(key)) {
1151 int err;
1152
1153 err = ovs_flow_key_update(skb, key);
1154 if (err)
1155 return err;
1156 }
1157 BUG_ON(!is_flow_key_valid(key));
Andy Zhou971427f32014-09-15 19:37:25 -07001158
andy zhoubef7f752017-03-20 16:32:30 -07001159 recirc_id = nla_get_u32(a);
1160 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
Andy Zhou971427f32014-09-15 19:37:25 -07001161}
1162
Numan Siddique4d5ec892019-03-26 06:13:46 +05301163static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1164 struct sw_flow_key *key,
1165 const struct nlattr *attr, bool last)
1166{
Lorenzo Bianconi17843652020-06-23 18:33:15 +02001167 struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
Numan Siddique4d5ec892019-03-26 06:13:46 +05301168 const struct nlattr *actions, *cpl_arg;
Lorenzo Bianconi17843652020-06-23 18:33:15 +02001169 int len, max_len, rem = nla_len(attr);
Numan Siddique4d5ec892019-03-26 06:13:46 +05301170 const struct check_pkt_len_arg *arg;
Numan Siddique4d5ec892019-03-26 06:13:46 +05301171 bool clone_flow_key;
1172
1173 /* The first netlink attribute in 'attr' is always
1174 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1175 */
1176 cpl_arg = nla_data(attr);
1177 arg = nla_data(cpl_arg);
1178
Lorenzo Bianconi17843652020-06-23 18:33:15 +02001179 len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
1180 max_len = arg->pkt_len;
1181
1182 if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
1183 len <= max_len) {
Numan Siddique4d5ec892019-03-26 06:13:46 +05301184 /* Second netlink attribute in 'attr' is always
1185 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1186 */
1187 actions = nla_next(cpl_arg, &rem);
1188 clone_flow_key = !arg->exec_for_lesser_equal;
1189 } else {
1190 /* Third netlink attribute in 'attr' is always
1191 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1192 */
1193 actions = nla_next(cpl_arg, &rem);
1194 actions = nla_next(actions, &rem);
1195 clone_flow_key = !arg->exec_for_greater;
1196 }
1197
1198 return clone_execute(dp, skb, key, 0, nla_data(actions),
1199 nla_len(actions), last, clone_flow_key);
1200}
1201
Matteo Croce744676e2020-02-15 14:20:56 +01001202static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1203{
1204 int err;
1205
1206 if (skb->protocol == htons(ETH_P_IPV6)) {
1207 struct ipv6hdr *nh;
1208
1209 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1210 sizeof(*nh));
1211 if (unlikely(err))
1212 return err;
1213
1214 nh = ipv6_hdr(skb);
1215
1216 if (nh->hop_limit <= 1)
1217 return -EHOSTUNREACH;
1218
1219 key->ip.ttl = --nh->hop_limit;
1220 } else {
1221 struct iphdr *nh;
1222 u8 old_ttl;
1223
1224 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1225 sizeof(*nh));
1226 if (unlikely(err))
1227 return err;
1228
1229 nh = ip_hdr(skb);
1230 if (nh->ttl <= 1)
1231 return -EHOSTUNREACH;
1232
1233 old_ttl = nh->ttl--;
1234 csum_replace2(&nh->check, htons(old_ttl << 8),
1235 htons(nh->ttl << 8));
1236 key->ip.ttl = nh->ttl;
1237 }
1238 return 0;
1239}
1240
Jesse Grossccb13522011-10-25 19:26:31 -07001241/* Execute a list of actions against 'skb'. */
1242static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001243 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -07001244 const struct nlattr *attr, int len)
Jesse Grossccb13522011-10-25 19:26:31 -07001245{
Jesse Grossccb13522011-10-25 19:26:31 -07001246 const struct nlattr *a;
1247 int rem;
1248
1249 for (a = attr, rem = len; rem > 0;
1250 a = nla_next(a, &rem)) {
1251 int err = 0;
1252
Jesse Grossccb13522011-10-25 19:26:31 -07001253 switch (nla_type(a)) {
andy zhou5b8784a2017-01-27 13:45:28 -08001254 case OVS_ACTION_ATTR_OUTPUT: {
1255 int port = nla_get_u32(a);
1256 struct sk_buff *clone;
1257
1258 /* Every output action needs a separate clone
1259 * of 'skb', In case the output action is the
1260 * last action, cloning can be avoided.
1261 */
1262 if (nla_is_last(a, rem)) {
1263 do_output(dp, skb, port, key);
1264 /* 'skb' has been used for output.
1265 */
1266 return 0;
1267 }
1268
1269 clone = skb_clone(skb, GFP_ATOMIC);
1270 if (clone)
1271 do_output(dp, clone, port, key);
1272 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001273 break;
andy zhou5b8784a2017-01-27 13:45:28 -08001274 }
Jesse Grossccb13522011-10-25 19:26:31 -07001275
William Tuf2a4d082016-06-10 11:49:33 -07001276 case OVS_ACTION_ATTR_TRUNC: {
1277 struct ovs_action_trunc *trunc = nla_data(a);
1278
1279 if (skb->len > trunc->max_len)
1280 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1281 break;
1282 }
1283
Jesse Grossccb13522011-10-25 19:26:31 -07001284 case OVS_ACTION_ATTR_USERSPACE:
William Tuf2a4d082016-06-10 11:49:33 -07001285 output_userspace(dp, skb, key, a, attr,
1286 len, OVS_CB(skb)->cutlen);
1287 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001288 break;
1289
Andy Zhou971427f32014-09-15 19:37:25 -07001290 case OVS_ACTION_ATTR_HASH:
1291 execute_hash(skb, key, a);
1292 break;
1293
Martin Varghesef66b53f2019-12-21 08:50:46 +05301294 case OVS_ACTION_ATTR_PUSH_MPLS: {
1295 struct ovs_action_push_mpls *mpls = nla_data(a);
Simon Horman25cd9ba2014-10-06 05:05:13 -07001296
Martin Varghesef66b53f2019-12-21 08:50:46 +05301297 err = push_mpls(skb, key, mpls->mpls_lse,
1298 mpls->mpls_ethertype, skb->mac_len);
1299 break;
1300 }
1301 case OVS_ACTION_ATTR_ADD_MPLS: {
1302 struct ovs_action_add_mpls *mpls = nla_data(a);
1303 __u16 mac_len = 0;
1304
1305 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1306 mac_len = skb->mac_len;
1307
1308 err = push_mpls(skb, key, mpls->mpls_lse,
1309 mpls->mpls_ethertype, mac_len);
1310 break;
1311 }
Simon Horman25cd9ba2014-10-06 05:05:13 -07001312 case OVS_ACTION_ATTR_POP_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001313 err = pop_mpls(skb, key, nla_get_be16(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001314 break;
1315
Jesse Grossccb13522011-10-25 19:26:31 -07001316 case OVS_ACTION_ATTR_PUSH_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001317 err = push_vlan(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001318 break;
1319
1320 case OVS_ACTION_ATTR_POP_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001321 err = pop_vlan(skb, key);
Jesse Grossccb13522011-10-25 19:26:31 -07001322 break;
1323
andy zhoubef7f752017-03-20 16:32:30 -07001324 case OVS_ACTION_ATTR_RECIRC: {
1325 bool last = nla_is_last(a, rem);
1326
1327 err = execute_recirc(dp, skb, key, a, last);
1328 if (last) {
Andy Zhou971427f32014-09-15 19:37:25 -07001329 /* If this is the last action, the skb has
1330 * been consumed or freed.
1331 * Return immediately.
1332 */
1333 return err;
1334 }
1335 break;
andy zhoubef7f752017-03-20 16:32:30 -07001336 }
Andy Zhou971427f32014-09-15 19:37:25 -07001337
Jesse Grossccb13522011-10-25 19:26:31 -07001338 case OVS_ACTION_ATTR_SET:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001339 err = execute_set_action(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001340 break;
1341
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001342 case OVS_ACTION_ATTR_SET_MASKED:
1343 case OVS_ACTION_ATTR_SET_TO_MASKED:
1344 err = execute_masked_set_action(skb, key, nla_data(a));
1345 break;
1346
andy zhou798c1662017-03-20 16:32:29 -07001347 case OVS_ACTION_ATTR_SAMPLE: {
1348 bool last = nla_is_last(a, rem);
1349
1350 err = sample(dp, skb, key, a, last);
1351 if (last)
1352 return err;
1353
Jesse Grossccb13522011-10-25 19:26:31 -07001354 break;
andy zhou798c1662017-03-20 16:32:29 -07001355 }
Joe Stringer7f8a4362015-08-26 11:31:48 -07001356
1357 case OVS_ACTION_ATTR_CT:
Joe Stringerec0d0432015-10-06 10:59:58 -07001358 if (!is_flow_key_valid(key)) {
1359 err = ovs_flow_key_update(skb, key);
1360 if (err)
1361 return err;
1362 }
1363
Joe Stringer7f8a4362015-08-26 11:31:48 -07001364 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1365 nla_data(a));
1366
1367 /* Hide stolen IP fragments from user space. */
Joe Stringer74c16612015-10-25 20:21:48 -07001368 if (err)
1369 return err == -EINPROGRESS ? 0 : err;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001370 break;
Jiri Benc91820da2016-11-10 16:28:23 +01001371
Eric Garverb8226962017-10-10 16:54:44 -04001372 case OVS_ACTION_ATTR_CT_CLEAR:
1373 err = ovs_ct_clear(skb, key);
1374 break;
1375
Jiri Benc91820da2016-11-10 16:28:23 +01001376 case OVS_ACTION_ATTR_PUSH_ETH:
1377 err = push_eth(skb, key, nla_data(a));
1378 break;
1379
1380 case OVS_ACTION_ATTR_POP_ETH:
1381 err = pop_eth(skb, key);
1382 break;
Yi Yangb2d0f5d2017-11-07 21:07:02 +08001383
1384 case OVS_ACTION_ATTR_PUSH_NSH: {
1385 u8 buffer[NSH_HDR_MAX_LEN];
1386 struct nshhdr *nh = (struct nshhdr *)buffer;
1387
1388 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1389 NSH_HDR_MAX_LEN);
1390 if (unlikely(err))
1391 break;
1392 err = push_nsh(skb, key, nh);
1393 break;
1394 }
1395
1396 case OVS_ACTION_ATTR_POP_NSH:
1397 err = pop_nsh(skb, key);
1398 break;
Andy Zhoucd8a6c32017-11-10 12:09:43 -08001399
1400 case OVS_ACTION_ATTR_METER:
1401 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1402 consume_skb(skb);
1403 return 0;
1404 }
Yifeng Sunb2335042018-07-02 08:18:03 -07001405 break;
1406
1407 case OVS_ACTION_ATTR_CLONE: {
1408 bool last = nla_is_last(a, rem);
1409
1410 err = clone(dp, skb, key, a, last);
1411 if (last)
1412 return err;
1413
1414 break;
1415 }
Numan Siddique4d5ec892019-03-26 06:13:46 +05301416
1417 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1418 bool last = nla_is_last(a, rem);
1419
1420 err = execute_check_pkt_len(dp, skb, key, a, last);
1421 if (last)
1422 return err;
1423
1424 break;
1425 }
Matteo Croce744676e2020-02-15 14:20:56 +01001426
1427 case OVS_ACTION_ATTR_DEC_TTL:
1428 err = execute_dec_ttl(skb, key);
1429 if (err == -EHOSTUNREACH) {
1430 err = dec_ttl_exception_handler(dp, skb, key,
1431 a, true);
1432 return err;
1433 }
1434 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001435 }
1436
1437 if (unlikely(err)) {
1438 kfree_skb(skb);
1439 return err;
1440 }
1441 }
1442
andy zhou5b8784a2017-01-27 13:45:28 -08001443 consume_skb(skb);
Jesse Grossccb13522011-10-25 19:26:31 -07001444 return 0;
1445}
1446
andy zhoubef7f752017-03-20 16:32:30 -07001447/* Execute the actions on the clone of the packet. The effect of the
1448 * execution does not affect the original 'skb' nor the original 'key'.
1449 *
1450 * The execution may be deferred in case the actions can not be executed
1451 * immediately.
1452 */
1453static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1454 struct sw_flow_key *key, u32 recirc_id,
1455 const struct nlattr *actions, int len,
1456 bool last, bool clone_flow_key)
1457{
1458 struct deferred_action *da;
1459 struct sw_flow_key *clone;
1460
1461 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1462 if (!skb) {
1463 /* Out of memory, skip this action.
1464 */
1465 return 0;
1466 }
1467
1468 /* When clone_flow_key is false, the 'key' will not be change
1469 * by the actions, then the 'key' can be used directly.
1470 * Otherwise, try to clone key from the next recursion level of
1471 * 'flow_keys'. If clone is successful, execute the actions
1472 * without deferring.
1473 */
1474 clone = clone_flow_key ? clone_key(key) : key;
1475 if (clone) {
1476 int err = 0;
1477
1478 if (actions) { /* Sample action */
1479 if (clone_flow_key)
1480 __this_cpu_inc(exec_actions_level);
1481
1482 err = do_execute_actions(dp, skb, clone,
1483 actions, len);
1484
1485 if (clone_flow_key)
1486 __this_cpu_dec(exec_actions_level);
1487 } else { /* Recirc action */
1488 clone->recirc_id = recirc_id;
1489 ovs_dp_process_packet(skb, clone);
1490 }
1491 return err;
1492 }
1493
1494 /* Out of 'flow_keys' space. Defer actions */
1495 da = add_deferred_actions(skb, key, actions, len);
1496 if (da) {
1497 if (!actions) { /* Recirc action */
1498 key = &da->pkt_key;
1499 key->recirc_id = recirc_id;
1500 }
1501 } else {
1502 /* Out of per CPU action FIFO space. Drop the 'skb' and
1503 * log an error.
1504 */
1505 kfree_skb(skb);
1506
1507 if (net_ratelimit()) {
1508 if (actions) { /* Sample action */
1509 pr_warn("%s: deferred action limit reached, drop sample action\n",
1510 ovs_dp_name(dp));
1511 } else { /* Recirc action */
1512 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1513 ovs_dp_name(dp));
1514 }
1515 }
1516 }
1517 return 0;
1518}
1519
Andy Zhou971427f32014-09-15 19:37:25 -07001520static void process_deferred_actions(struct datapath *dp)
1521{
1522 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1523
1524 /* Do not touch the FIFO in case there is no deferred actions. */
1525 if (action_fifo_is_empty(fifo))
1526 return;
1527
1528 /* Finishing executing all deferred actions. */
1529 do {
1530 struct deferred_action *da = action_fifo_get(fifo);
1531 struct sk_buff *skb = da->skb;
1532 struct sw_flow_key *key = &da->pkt_key;
1533 const struct nlattr *actions = da->actions;
andy zhou47c697a2017-03-20 16:32:27 -07001534 int actions_len = da->actions_len;
Andy Zhou971427f32014-09-15 19:37:25 -07001535
1536 if (actions)
andy zhou47c697a2017-03-20 16:32:27 -07001537 do_execute_actions(dp, skb, key, actions, actions_len);
Andy Zhou971427f32014-09-15 19:37:25 -07001538 else
1539 ovs_dp_process_packet(skb, key);
1540 } while (!action_fifo_is_empty(fifo));
1541
1542 /* Reset FIFO for the next packet. */
1543 action_fifo_init(fifo);
1544}
1545
Jesse Grossccb13522011-10-25 19:26:31 -07001546/* Execute a list of actions against 'skb'. */
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001547int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -08001548 const struct sw_flow_actions *acts,
1549 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -07001550{
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001551 int err, level;
Jesse Grossccb13522011-10-25 19:26:31 -07001552
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001553 level = __this_cpu_inc_return(exec_actions_level);
Lance Richardson2679d042016-09-13 10:08:54 -04001554 if (unlikely(level > OVS_RECURSION_LIMIT)) {
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001555 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1556 ovs_dp_name(dp));
1557 kfree_skb(skb);
1558 err = -ENETDOWN;
1559 goto out;
1560 }
1561
Liping Zhang494bea32017-08-16 13:30:07 +08001562 OVS_CB(skb)->acts_origlen = acts->orig_len;
Andy Zhou971427f32014-09-15 19:37:25 -07001563 err = do_execute_actions(dp, skb, key,
1564 acts->actions, acts->actions_len);
1565
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001566 if (level == 1)
Andy Zhou971427f32014-09-15 19:37:25 -07001567 process_deferred_actions(dp);
1568
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001569out:
1570 __this_cpu_dec(exec_actions_level);
Andy Zhou971427f32014-09-15 19:37:25 -07001571 return err;
1572}
1573
1574int action_fifos_init(void)
1575{
1576 action_fifos = alloc_percpu(struct action_fifo);
1577 if (!action_fifos)
1578 return -ENOMEM;
1579
andy zhou4572ef52017-03-20 16:32:28 -07001580 flow_keys = alloc_percpu(struct action_flow_keys);
1581 if (!flow_keys) {
Lance Richardson2679d042016-09-13 10:08:54 -04001582 free_percpu(action_fifos);
1583 return -ENOMEM;
1584 }
1585
Andy Zhou971427f32014-09-15 19:37:25 -07001586 return 0;
1587}
1588
1589void action_fifos_exit(void)
1590{
1591 free_percpu(action_fifos);
andy zhou4572ef52017-03-20 16:32:28 -07001592 free_percpu(flow_keys);
Jesse Grossccb13522011-10-25 19:26:31 -07001593}