blob: 44144f914920a4f8ba0229db64fdf78187540195 [file] [log] [blame]
Jesse Grossccb13522011-10-25 19:26:31 -07001/*
Andy Zhou971427f32014-09-15 19:37:25 -07002 * Copyright (c) 2007-2014 Nicira, Inc.
Jesse Grossccb13522011-10-25 19:26:31 -07003 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/skbuff.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#include <linux/openvswitch.h>
Joe Stringer7f8a4362015-08-26 11:31:48 -070025#include <linux/netfilter_ipv6.h>
Joe Stringera175a722013-08-22 12:30:48 -070026#include <linux/sctp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070027#include <linux/tcp.h>
28#include <linux/udp.h>
29#include <linux/in6.h>
30#include <linux/if_arp.h>
31#include <linux/if_vlan.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070032
Joe Stringer7f8a4362015-08-26 11:31:48 -070033#include <net/dst.h>
Jesse Grossccb13522011-10-25 19:26:31 -070034#include <net/ip.h>
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -080035#include <net/ipv6.h>
Joe Stringer7b85b4d2015-08-27 15:25:46 -070036#include <net/ip6_fib.h>
Jesse Grossccb13522011-10-25 19:26:31 -070037#include <net/checksum.h>
38#include <net/dsfield.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070039#include <net/mpls.h>
Joe Stringera175a722013-08-22 12:30:48 -070040#include <net/sctp/checksum.h>
Jesse Grossccb13522011-10-25 19:26:31 -070041
42#include "datapath.h"
Andy Zhou971427f32014-09-15 19:37:25 -070043#include "flow.h"
Joe Stringer7f8a4362015-08-26 11:31:48 -070044#include "conntrack.h"
Jesse Grossccb13522011-10-25 19:26:31 -070045#include "vport.h"
46
47static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -070048 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -070049 const struct nlattr *attr, int len);
Jesse Grossccb13522011-10-25 19:26:31 -070050
Andy Zhou971427f32014-09-15 19:37:25 -070051struct deferred_action {
52 struct sk_buff *skb;
53 const struct nlattr *actions;
54
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key;
57};
58
Joe Stringer7f8a4362015-08-26 11:31:48 -070059#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60struct ovs_frag_data {
61 unsigned long dst;
62 struct vport *vport;
63 struct ovs_skb_cb cb;
64 __be16 inner_protocol;
Jiri Bencc66549f2016-10-05 15:01:57 +020065 u16 network_offset; /* valid only for MPLS */
66 u16 vlan_tci;
Joe Stringer7f8a4362015-08-26 11:31:48 -070067 __be16 vlan_proto;
68 unsigned int l2_len;
69 u8 l2_data[MAX_L2_LEN];
70};
71
72static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
73
Andy Zhou971427f32014-09-15 19:37:25 -070074#define DEFERRED_ACTION_FIFO_SIZE 10
Lance Richardson2679d042016-09-13 10:08:54 -040075#define OVS_RECURSION_LIMIT 5
76#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
Andy Zhou971427f32014-09-15 19:37:25 -070077struct action_fifo {
78 int head;
79 int tail;
80 /* Deferred action fifo queue storage. */
81 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
82};
83
Lance Richardson2679d042016-09-13 10:08:54 -040084struct recirc_keys {
85 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
86};
87
Andy Zhou971427f32014-09-15 19:37:25 -070088static struct action_fifo __percpu *action_fifos;
Lance Richardson2679d042016-09-13 10:08:54 -040089static struct recirc_keys __percpu *recirc_keys;
Andy Zhou971427f32014-09-15 19:37:25 -070090static DEFINE_PER_CPU(int, exec_actions_level);
91
92static void action_fifo_init(struct action_fifo *fifo)
93{
94 fifo->head = 0;
95 fifo->tail = 0;
96}
97
Thomas Graf12eb18f2014-11-06 06:58:52 -080098static bool action_fifo_is_empty(const struct action_fifo *fifo)
Andy Zhou971427f32014-09-15 19:37:25 -070099{
100 return (fifo->head == fifo->tail);
101}
102
103static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
104{
105 if (action_fifo_is_empty(fifo))
106 return NULL;
107
108 return &fifo->fifo[fifo->tail++];
109}
110
111static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
112{
113 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
114 return NULL;
115
116 return &fifo->fifo[fifo->head++];
117}
118
119/* Return true if fifo is not full */
120static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -0800121 const struct sw_flow_key *key,
Andy Zhou971427f32014-09-15 19:37:25 -0700122 const struct nlattr *attr)
123{
124 struct action_fifo *fifo;
125 struct deferred_action *da;
126
127 fifo = this_cpu_ptr(action_fifos);
128 da = action_fifo_put(fifo);
129 if (da) {
130 da->skb = skb;
131 da->actions = attr;
132 da->pkt_key = *key;
133 }
134
135 return da;
136}
137
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800138static void invalidate_flow_key(struct sw_flow_key *key)
139{
Jiri Benc329f45b2016-11-10 16:28:18 +0100140 key->mac_proto |= SW_FLOW_KEY_INVALID;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800141}
142
143static bool is_flow_key_valid(const struct sw_flow_key *key)
144{
Jiri Benc329f45b2016-11-10 16:28:18 +0100145 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800146}
147
Simon Hormanbc7cc592016-05-30 14:04:25 +0900148static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
149 __be16 ethertype)
150{
151 if (skb->ip_summed == CHECKSUM_COMPLETE) {
152 __be16 diff[] = { ~(hdr->h_proto), ethertype };
153
154 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
155 ~skb->csum);
156 }
157
158 hdr->h_proto = ethertype;
159}
160
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800161static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
Simon Horman25cd9ba2014-10-06 05:05:13 -0700162 const struct ovs_action_push_mpls *mpls)
163{
Jiri Benc85de4a22016-09-30 19:08:07 +0200164 struct mpls_shim_hdr *new_mpls_lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700165
166 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
167 if (skb->encapsulation)
168 return -ENOTSUPP;
169
170 if (skb_cow_head(skb, MPLS_HLEN) < 0)
171 return -ENOMEM;
172
David Ahern48d2ab62016-08-24 20:10:44 -0700173 if (!skb->inner_protocol) {
174 skb_set_inner_network_header(skb, skb->mac_len);
175 skb_set_inner_protocol(skb, skb->protocol);
176 }
177
Simon Horman25cd9ba2014-10-06 05:05:13 -0700178 skb_push(skb, MPLS_HLEN);
179 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
180 skb->mac_len);
181 skb_reset_mac_header(skb);
David Ahern48d2ab62016-08-24 20:10:44 -0700182 skb_set_network_header(skb, skb->mac_len);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700183
Jiri Benc85de4a22016-09-30 19:08:07 +0200184 new_mpls_lse = mpls_hdr(skb);
185 new_mpls_lse->label_stack_entry = mpls->mpls_lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700186
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100187 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700188
Simon Hormanbc7cc592016-05-30 14:04:25 +0900189 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700190 skb->protocol = mpls->mpls_ethertype;
191
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800192 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700193 return 0;
194}
195
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800196static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
197 const __be16 ethertype)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700198{
199 struct ethhdr *hdr;
200 int err;
201
Jiri Pirkoe2195122014-11-19 14:05:01 +0100202 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700203 if (unlikely(err))
204 return err;
205
Jiri Benc85de4a22016-09-30 19:08:07 +0200206 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700207
208 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
209 skb->mac_len);
210
211 __skb_pull(skb, MPLS_HLEN);
212 skb_reset_mac_header(skb);
David Ahern48d2ab62016-08-24 20:10:44 -0700213 skb_set_network_header(skb, skb->mac_len);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700214
Jiri Benc85de4a22016-09-30 19:08:07 +0200215 /* mpls_hdr() is used to locate the ethertype field correctly in the
216 * presence of VLAN tags.
Simon Horman25cd9ba2014-10-06 05:05:13 -0700217 */
Jiri Benc85de4a22016-09-30 19:08:07 +0200218 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
Simon Hormanbc7cc592016-05-30 14:04:25 +0900219 update_ethertype(skb, hdr, ethertype);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700220 if (eth_p_mpls(skb->protocol))
221 skb->protocol = ethertype;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800222
223 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700224 return 0;
225}
226
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800227static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
228 const __be32 *mpls_lse, const __be32 *mask)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700229{
Jiri Benc85de4a22016-09-30 19:08:07 +0200230 struct mpls_shim_hdr *stack;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800231 __be32 lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700232 int err;
233
Jiri Pirkoe2195122014-11-19 14:05:01 +0100234 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700235 if (unlikely(err))
236 return err;
237
Jiri Benc85de4a22016-09-30 19:08:07 +0200238 stack = mpls_hdr(skb);
239 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700240 if (skb->ip_summed == CHECKSUM_COMPLETE) {
Jiri Benc85de4a22016-09-30 19:08:07 +0200241 __be32 diff[] = { ~(stack->label_stack_entry), lse };
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800242
Simon Horman25cd9ba2014-10-06 05:05:13 -0700243 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
244 ~skb->csum);
245 }
246
Jiri Benc85de4a22016-09-30 19:08:07 +0200247 stack->label_stack_entry = lse;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800248 flow_key->mpls.top_lse = lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700249 return 0;
250}
251
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800252static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700253{
Jesse Grossccb13522011-10-25 19:26:31 -0700254 int err;
255
Jiri Pirko93515d52014-11-19 14:05:02 +0100256 err = skb_vlan_pop(skb);
Eric Garver018c1dd2016-09-07 12:56:59 -0400257 if (skb_vlan_tag_present(skb)) {
Jiri Pirko93515d52014-11-19 14:05:02 +0100258 invalidate_flow_key(key);
Eric Garver018c1dd2016-09-07 12:56:59 -0400259 } else {
260 key->eth.vlan.tci = 0;
261 key->eth.vlan.tpid = 0;
262 }
Jiri Pirko93515d52014-11-19 14:05:02 +0100263 return err;
Jesse Grossccb13522011-10-25 19:26:31 -0700264}
265
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800266static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
267 const struct ovs_action_push_vlan *vlan)
Jesse Grossccb13522011-10-25 19:26:31 -0700268{
Eric Garver018c1dd2016-09-07 12:56:59 -0400269 if (skb_vlan_tag_present(skb)) {
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800270 invalidate_flow_key(key);
Eric Garver018c1dd2016-09-07 12:56:59 -0400271 } else {
272 key->eth.vlan.tci = vlan->vlan_tci;
273 key->eth.vlan.tpid = vlan->vlan_tpid;
274 }
Jiri Pirko93515d52014-11-19 14:05:02 +0100275 return skb_vlan_push(skb, vlan->vlan_tpid,
276 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
Jesse Grossccb13522011-10-25 19:26:31 -0700277}
278
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800279/* 'src' is already properly masked. */
280static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
281{
282 u16 *dst = (u16 *)dst_;
283 const u16 *src = (const u16 *)src_;
284 const u16 *mask = (const u16 *)mask_;
285
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700286 OVS_SET_MASKED(dst[0], src[0], mask[0]);
287 OVS_SET_MASKED(dst[1], src[1], mask[1]);
288 OVS_SET_MASKED(dst[2], src[2], mask[2]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800289}
290
291static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
292 const struct ovs_key_ethernet *key,
293 const struct ovs_key_ethernet *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700294{
295 int err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800296
Jiri Pirkoe2195122014-11-19 14:05:01 +0100297 err = skb_ensure_writable(skb, ETH_HLEN);
Jesse Grossccb13522011-10-25 19:26:31 -0700298 if (unlikely(err))
299 return err;
300
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700301 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
302
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800303 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
304 mask->eth_src);
305 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
306 mask->eth_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700307
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100308 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700309
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800310 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
311 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
Jesse Grossccb13522011-10-25 19:26:31 -0700312 return 0;
313}
314
Glenn Griffin3576fd72015-08-03 09:56:54 -0700315static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
316 __be32 addr, __be32 new_addr)
Jesse Grossccb13522011-10-25 19:26:31 -0700317{
318 int transport_len = skb->len - skb_transport_offset(skb);
319
Glenn Griffin3576fd72015-08-03 09:56:54 -0700320 if (nh->frag_off & htons(IP_OFFSET))
321 return;
322
Jesse Grossccb13522011-10-25 19:26:31 -0700323 if (nh->protocol == IPPROTO_TCP) {
324 if (likely(transport_len >= sizeof(struct tcphdr)))
325 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700326 addr, new_addr, true);
Jesse Grossccb13522011-10-25 19:26:31 -0700327 } else if (nh->protocol == IPPROTO_UDP) {
Jesse Gross81e5d412012-03-06 15:05:46 -0800328 if (likely(transport_len >= sizeof(struct udphdr))) {
329 struct udphdr *uh = udp_hdr(skb);
330
331 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
332 inet_proto_csum_replace4(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700333 addr, new_addr, true);
Jesse Gross81e5d412012-03-06 15:05:46 -0800334 if (!uh->check)
335 uh->check = CSUM_MANGLED_0;
336 }
337 }
Jesse Grossccb13522011-10-25 19:26:31 -0700338 }
Glenn Griffin3576fd72015-08-03 09:56:54 -0700339}
Jesse Grossccb13522011-10-25 19:26:31 -0700340
Glenn Griffin3576fd72015-08-03 09:56:54 -0700341static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
342 __be32 *addr, __be32 new_addr)
343{
344 update_ip_l4_checksum(skb, nh, *addr, new_addr);
Jesse Grossccb13522011-10-25 19:26:31 -0700345 csum_replace4(&nh->check, *addr, new_addr);
Tom Herbert7539fad2013-12-15 22:12:18 -0800346 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700347 *addr = new_addr;
348}
349
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800350static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
351 __be32 addr[4], const __be32 new_addr[4])
352{
353 int transport_len = skb->len - skb_transport_offset(skb);
354
Jesse Gross856447d2014-11-11 14:32:20 -0800355 if (l4_proto == NEXTHDR_TCP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800356 if (likely(transport_len >= sizeof(struct tcphdr)))
357 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700358 addr, new_addr, true);
Jesse Gross856447d2014-11-11 14:32:20 -0800359 } else if (l4_proto == NEXTHDR_UDP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800360 if (likely(transport_len >= sizeof(struct udphdr))) {
361 struct udphdr *uh = udp_hdr(skb);
362
363 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
364 inet_proto_csum_replace16(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700365 addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800366 if (!uh->check)
367 uh->check = CSUM_MANGLED_0;
368 }
369 }
Jesse Gross856447d2014-11-11 14:32:20 -0800370 } else if (l4_proto == NEXTHDR_ICMP) {
371 if (likely(transport_len >= sizeof(struct icmp6hdr)))
372 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
Tom Herbert4b048d62015-08-17 13:42:25 -0700373 skb, addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800374 }
375}
376
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800377static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
378 const __be32 mask[4], __be32 masked[4])
379{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700380 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
381 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
382 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
383 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800384}
385
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800386static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
387 __be32 addr[4], const __be32 new_addr[4],
388 bool recalculate_csum)
389{
390 if (recalculate_csum)
391 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
392
Tom Herbert7539fad2013-12-15 22:12:18 -0800393 skb_clear_hash(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800394 memcpy(addr, new_addr, sizeof(__be32[4]));
395}
396
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800397static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800398{
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800399 /* Bits 21-24 are always unmasked, so this retains their values. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700400 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
401 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
402 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800403}
404
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800405static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
406 u8 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800407{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700408 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800409
Jesse Grossccb13522011-10-25 19:26:31 -0700410 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
411 nh->ttl = new_ttl;
412}
413
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800414static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
415 const struct ovs_key_ipv4 *key,
416 const struct ovs_key_ipv4 *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700417{
418 struct iphdr *nh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800419 __be32 new_addr;
Jesse Grossccb13522011-10-25 19:26:31 -0700420 int err;
421
Jiri Pirkoe2195122014-11-19 14:05:01 +0100422 err = skb_ensure_writable(skb, skb_network_offset(skb) +
423 sizeof(struct iphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700424 if (unlikely(err))
425 return err;
426
427 nh = ip_hdr(skb);
428
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800429 /* Setting an IP addresses is typically only a side effect of
430 * matching on them in the current userspace implementation, so it
431 * makes sense to check if the value actually changed.
432 */
433 if (mask->ipv4_src) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700434 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
Jesse Grossccb13522011-10-25 19:26:31 -0700435
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800436 if (unlikely(new_addr != nh->saddr)) {
437 set_ip_addr(skb, nh, &nh->saddr, new_addr);
438 flow_key->ipv4.addr.src = new_addr;
439 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800440 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800441 if (mask->ipv4_dst) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700442 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700443
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800444 if (unlikely(new_addr != nh->daddr)) {
445 set_ip_addr(skb, nh, &nh->daddr, new_addr);
446 flow_key->ipv4.addr.dst = new_addr;
447 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800448 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800449 if (mask->ipv4_tos) {
450 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
451 flow_key->ip.tos = nh->tos;
452 }
453 if (mask->ipv4_ttl) {
454 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
455 flow_key->ip.ttl = nh->ttl;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800456 }
Jesse Grossccb13522011-10-25 19:26:31 -0700457
458 return 0;
459}
460
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800461static bool is_ipv6_mask_nonzero(const __be32 addr[4])
462{
463 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
464}
465
466static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
467 const struct ovs_key_ipv6 *key,
468 const struct ovs_key_ipv6 *mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800469{
470 struct ipv6hdr *nh;
471 int err;
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800472
Jiri Pirkoe2195122014-11-19 14:05:01 +0100473 err = skb_ensure_writable(skb, skb_network_offset(skb) +
474 sizeof(struct ipv6hdr));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800475 if (unlikely(err))
476 return err;
477
478 nh = ipv6_hdr(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800479
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800480 /* Setting an IP addresses is typically only a side effect of
481 * matching on them in the current userspace implementation, so it
482 * makes sense to check if the value actually changed.
483 */
484 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
485 __be32 *saddr = (__be32 *)&nh->saddr;
486 __be32 masked[4];
487
488 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
489
490 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
Simon Hormanb4f70522016-04-21 11:49:15 +1000491 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800492 true);
493 memcpy(&flow_key->ipv6.addr.src, masked,
494 sizeof(flow_key->ipv6.addr.src));
495 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800496 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800497 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800498 unsigned int offset = 0;
499 int flags = IP6_FH_F_SKIP_RH;
500 bool recalc_csum = true;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800501 __be32 *daddr = (__be32 *)&nh->daddr;
502 __be32 masked[4];
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800503
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800504 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800505
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800506 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
507 if (ipv6_ext_hdr(nh->nexthdr))
508 recalc_csum = (ipv6_find_hdr(skb, &offset,
509 NEXTHDR_ROUTING,
510 NULL, &flags)
511 != NEXTHDR_ROUTING);
512
Simon Hormanb4f70522016-04-21 11:49:15 +1000513 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800514 recalc_csum);
515 memcpy(&flow_key->ipv6.addr.dst, masked,
516 sizeof(flow_key->ipv6.addr.dst));
517 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800518 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800519 if (mask->ipv6_tclass) {
520 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
521 flow_key->ip.tos = ipv6_get_dsfield(nh);
522 }
523 if (mask->ipv6_label) {
524 set_ipv6_fl(nh, ntohl(key->ipv6_label),
525 ntohl(mask->ipv6_label));
526 flow_key->ipv6.label =
527 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
528 }
529 if (mask->ipv6_hlimit) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700530 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
531 mask->ipv6_hlimit);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800532 flow_key->ip.ttl = nh->hop_limit;
533 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800534 return 0;
535}
536
Jiri Pirkoe2195122014-11-19 14:05:01 +0100537/* Must follow skb_ensure_writable() since that can move the skb data. */
Jesse Grossccb13522011-10-25 19:26:31 -0700538static void set_tp_port(struct sk_buff *skb, __be16 *port,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800539 __be16 new_port, __sum16 *check)
Jesse Grossccb13522011-10-25 19:26:31 -0700540{
Tom Herbert4b048d62015-08-17 13:42:25 -0700541 inet_proto_csum_replace2(check, skb, *port, new_port, false);
Jesse Grossccb13522011-10-25 19:26:31 -0700542 *port = new_port;
Jesse Grossccb13522011-10-25 19:26:31 -0700543}
544
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800545static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
546 const struct ovs_key_udp *key,
547 const struct ovs_key_udp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700548{
549 struct udphdr *uh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800550 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700551 int err;
552
Jiri Pirkoe2195122014-11-19 14:05:01 +0100553 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
554 sizeof(struct udphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700555 if (unlikely(err))
556 return err;
557
558 uh = udp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800559 /* Either of the masks is non-zero, so do not bother checking them. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700560 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
561 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800562
563 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
564 if (likely(src != uh->source)) {
565 set_tp_port(skb, &uh->source, src, &uh->check);
566 flow_key->tp.src = src;
567 }
568 if (likely(dst != uh->dest)) {
569 set_tp_port(skb, &uh->dest, dst, &uh->check);
570 flow_key->tp.dst = dst;
571 }
572
573 if (unlikely(!uh->check))
574 uh->check = CSUM_MANGLED_0;
575 } else {
576 uh->source = src;
577 uh->dest = dst;
578 flow_key->tp.src = src;
579 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800580 }
Jesse Grossccb13522011-10-25 19:26:31 -0700581
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800582 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700583
584 return 0;
585}
586
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800587static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
588 const struct ovs_key_tcp *key,
589 const struct ovs_key_tcp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700590{
591 struct tcphdr *th;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800592 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700593 int err;
594
Jiri Pirkoe2195122014-11-19 14:05:01 +0100595 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
596 sizeof(struct tcphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700597 if (unlikely(err))
598 return err;
599
600 th = tcp_hdr(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700601 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800602 if (likely(src != th->source)) {
603 set_tp_port(skb, &th->source, src, &th->check);
604 flow_key->tp.src = src;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800605 }
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700606 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800607 if (likely(dst != th->dest)) {
608 set_tp_port(skb, &th->dest, dst, &th->check);
609 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800610 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800611 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700612
613 return 0;
614}
615
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800616static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
617 const struct ovs_key_sctp *key,
618 const struct ovs_key_sctp *mask)
Joe Stringera175a722013-08-22 12:30:48 -0700619{
Joe Stringera175a722013-08-22 12:30:48 -0700620 unsigned int sctphoff = skb_transport_offset(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800621 struct sctphdr *sh;
622 __le32 old_correct_csum, new_csum, old_csum;
623 int err;
Joe Stringera175a722013-08-22 12:30:48 -0700624
Jiri Pirkoe2195122014-11-19 14:05:01 +0100625 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
Joe Stringera175a722013-08-22 12:30:48 -0700626 if (unlikely(err))
627 return err;
628
629 sh = sctp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800630 old_csum = sh->checksum;
631 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700632
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700633 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
634 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
Joe Stringera175a722013-08-22 12:30:48 -0700635
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800636 new_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700637
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800638 /* Carry any checksum errors through. */
639 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
Joe Stringera175a722013-08-22 12:30:48 -0700640
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800641 skb_clear_hash(skb);
642 flow_key->tp.src = sh->source;
643 flow_key->tp.dst = sh->dest;
Joe Stringera175a722013-08-22 12:30:48 -0700644
645 return 0;
646}
647
Eric W. Biederman188515f2015-09-14 20:08:51 -0500648static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700649{
650 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
651 struct vport *vport = data->vport;
652
653 if (skb_cow_head(skb, data->l2_len) < 0) {
654 kfree_skb(skb);
655 return -ENOMEM;
656 }
657
658 __skb_dst_copy(skb, data->dst);
659 *OVS_CB(skb) = data->cb;
660 skb->inner_protocol = data->inner_protocol;
661 skb->vlan_tci = data->vlan_tci;
662 skb->vlan_proto = data->vlan_proto;
663
664 /* Reconstruct the MAC header. */
665 skb_push(skb, data->l2_len);
666 memcpy(skb->data, &data->l2_data, data->l2_len);
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100667 skb_postpush_rcsum(skb, skb->data, data->l2_len);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700668 skb_reset_mac_header(skb);
669
Jiri Bencc66549f2016-10-05 15:01:57 +0200670 if (eth_p_mpls(skb->protocol)) {
671 skb->inner_network_header = skb->network_header;
672 skb_set_network_header(skb, data->network_offset);
673 skb_reset_mac_len(skb);
674 }
675
Joe Stringer7f8a4362015-08-26 11:31:48 -0700676 ovs_vport_send(vport, skb);
677 return 0;
678}
679
680static unsigned int
681ovs_dst_get_mtu(const struct dst_entry *dst)
682{
683 return dst->dev->mtu;
684}
685
686static struct dst_ops ovs_dst_ops = {
687 .family = AF_UNSPEC,
688 .mtu = ovs_dst_get_mtu,
689};
690
691/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
692 * ovs_vport_output(), which is called once per fragmented packet.
693 */
Jiri Bencc66549f2016-10-05 15:01:57 +0200694static void prepare_frag(struct vport *vport, struct sk_buff *skb,
695 u16 orig_network_offset)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700696{
697 unsigned int hlen = skb_network_offset(skb);
698 struct ovs_frag_data *data;
699
700 data = this_cpu_ptr(&ovs_frag_data_storage);
701 data->dst = skb->_skb_refdst;
702 data->vport = vport;
703 data->cb = *OVS_CB(skb);
704 data->inner_protocol = skb->inner_protocol;
Jiri Bencc66549f2016-10-05 15:01:57 +0200705 data->network_offset = orig_network_offset;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700706 data->vlan_tci = skb->vlan_tci;
707 data->vlan_proto = skb->vlan_proto;
708 data->l2_len = hlen;
709 memcpy(&data->l2_data, skb->data, hlen);
710
711 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
712 skb_pull(skb, hlen);
713}
714
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500715static void ovs_fragment(struct net *net, struct vport *vport,
716 struct sk_buff *skb, u16 mru, __be16 ethertype)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700717{
Jiri Bencc66549f2016-10-05 15:01:57 +0200718 u16 orig_network_offset = 0;
719
720 if (eth_p_mpls(skb->protocol)) {
721 orig_network_offset = skb_network_offset(skb);
722 skb->network_header = skb->inner_network_header;
723 }
724
Joe Stringer7f8a4362015-08-26 11:31:48 -0700725 if (skb_network_offset(skb) > MAX_L2_LEN) {
726 OVS_NLERR(1, "L2 header too long to fragment");
Joe Stringerb8f22572015-10-06 10:59:57 -0700727 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700728 }
729
730 if (ethertype == htons(ETH_P_IP)) {
731 struct dst_entry ovs_dst;
732 unsigned long orig_dst;
733
Jiri Bencc66549f2016-10-05 15:01:57 +0200734 prepare_frag(vport, skb, orig_network_offset);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700735 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
736 DST_OBSOLETE_NONE, DST_NOCOUNT);
737 ovs_dst.dev = vport->dev;
738
739 orig_dst = skb->_skb_refdst;
740 skb_dst_set_noref(skb, &ovs_dst);
741 IPCB(skb)->frag_max_size = mru;
742
Eric W. Biederman694869b2015-06-12 21:55:31 -0500743 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700744 refdst_drop(orig_dst);
745 } else if (ethertype == htons(ETH_P_IPV6)) {
746 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
747 unsigned long orig_dst;
748 struct rt6_info ovs_rt;
749
750 if (!v6ops) {
Joe Stringerb8f22572015-10-06 10:59:57 -0700751 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700752 }
753
Jiri Bencc66549f2016-10-05 15:01:57 +0200754 prepare_frag(vport, skb, orig_network_offset);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700755 memset(&ovs_rt, 0, sizeof(ovs_rt));
756 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
757 DST_OBSOLETE_NONE, DST_NOCOUNT);
758 ovs_rt.dst.dev = vport->dev;
759
760 orig_dst = skb->_skb_refdst;
761 skb_dst_set_noref(skb, &ovs_rt.dst);
762 IP6CB(skb)->frag_max_size = mru;
763
Eric W. Biederman7d8c6e32015-06-12 22:12:04 -0500764 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700765 refdst_drop(orig_dst);
766 } else {
767 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
768 ovs_vport_name(vport), ntohs(ethertype), mru,
769 vport->dev->mtu);
Joe Stringerb8f22572015-10-06 10:59:57 -0700770 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700771 }
Joe Stringerb8f22572015-10-06 10:59:57 -0700772
773 return;
774err:
775 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700776}
777
778static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
779 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700780{
Andy Zhou738967b2014-09-08 00:35:02 -0700781 struct vport *vport = ovs_vport_rcu(dp, out_port);
Jesse Grossccb13522011-10-25 19:26:31 -0700782
Joe Stringer7f8a4362015-08-26 11:31:48 -0700783 if (likely(vport)) {
784 u16 mru = OVS_CB(skb)->mru;
William Tuf2a4d082016-06-10 11:49:33 -0700785 u32 cutlen = OVS_CB(skb)->cutlen;
786
787 if (unlikely(cutlen > 0)) {
788 if (skb->len - cutlen > ETH_HLEN)
789 pskb_trim(skb, skb->len - cutlen);
790 else
791 pskb_trim(skb, ETH_HLEN);
792 }
Joe Stringer7f8a4362015-08-26 11:31:48 -0700793
Jiri Benc738314a2016-11-10 16:28:17 +0100794 if (likely(!mru ||
795 (skb->len <= mru + vport->dev->hard_header_len))) {
Joe Stringer7f8a4362015-08-26 11:31:48 -0700796 ovs_vport_send(vport, skb);
797 } else if (mru <= vport->dev->mtu) {
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500798 struct net *net = read_pnet(&dp->net);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700799
Jiri Benc329f45b2016-11-10 16:28:18 +0100800 ovs_fragment(net, vport, skb, mru, key->eth.type);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700801 } else {
802 kfree_skb(skb);
803 }
804 } else {
Jesse Grossccb13522011-10-25 19:26:31 -0700805 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700806 }
Jesse Grossccb13522011-10-25 19:26:31 -0700807}
808
809static int output_userspace(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700810 struct sw_flow_key *key, const struct nlattr *attr,
William Tuf2a4d082016-06-10 11:49:33 -0700811 const struct nlattr *actions, int actions_len,
812 uint32_t cutlen)
Jesse Grossccb13522011-10-25 19:26:31 -0700813{
814 struct dp_upcall_info upcall;
815 const struct nlattr *a;
816 int rem;
817
Neil McKeeccea7442015-05-26 20:59:43 -0700818 memset(&upcall, 0, sizeof(upcall));
Jesse Grossccb13522011-10-25 19:26:31 -0700819 upcall.cmd = OVS_PACKET_CMD_ACTION;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700820 upcall.mru = OVS_CB(skb)->mru;
Jesse Grossccb13522011-10-25 19:26:31 -0700821
822 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
823 a = nla_next(a, &rem)) {
824 switch (nla_type(a)) {
825 case OVS_USERSPACE_ATTR_USERDATA:
826 upcall.userdata = a;
827 break;
828
829 case OVS_USERSPACE_ATTR_PID:
Eric W. Biederman15e47302012-09-07 20:12:54 +0000830 upcall.portid = nla_get_u32(a);
Jesse Grossccb13522011-10-25 19:26:31 -0700831 break;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800832
833 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
834 /* Get out tunnel info. */
835 struct vport *vport;
836
837 vport = ovs_vport_rcu(dp, nla_get_u32(a));
838 if (vport) {
839 int err;
840
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700841 err = dev_fill_metadata_dst(vport->dev, skb);
842 if (!err)
843 upcall.egress_tun_info = skb_tunnel_info(skb);
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800844 }
Pravin B Shelar4c222792015-08-30 18:09:38 -0700845
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800846 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700847 }
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800848
Neil McKeeccea7442015-05-26 20:59:43 -0700849 case OVS_USERSPACE_ATTR_ACTIONS: {
850 /* Include actions. */
851 upcall.actions = actions;
852 upcall.actions_len = actions_len;
853 break;
854 }
855
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800856 } /* End of switch. */
Jesse Grossccb13522011-10-25 19:26:31 -0700857 }
858
William Tuf2a4d082016-06-10 11:49:33 -0700859 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
Jesse Grossccb13522011-10-25 19:26:31 -0700860}
861
862static int sample(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700863 struct sw_flow_key *key, const struct nlattr *attr,
864 const struct nlattr *actions, int actions_len)
Jesse Grossccb13522011-10-25 19:26:31 -0700865{
866 const struct nlattr *acts_list = NULL;
867 const struct nlattr *a;
868 int rem;
William Tuf2a4d082016-06-10 11:49:33 -0700869 u32 cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -0700870
871 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
872 a = nla_next(a, &rem)) {
Wenyu Zhange05176a2015-08-05 00:30:47 -0700873 u32 probability;
874
Jesse Grossccb13522011-10-25 19:26:31 -0700875 switch (nla_type(a)) {
876 case OVS_SAMPLE_ATTR_PROBABILITY:
Wenyu Zhange05176a2015-08-05 00:30:47 -0700877 probability = nla_get_u32(a);
878 if (!probability || prandom_u32() > probability)
Jesse Grossccb13522011-10-25 19:26:31 -0700879 return 0;
880 break;
881
882 case OVS_SAMPLE_ATTR_ACTIONS:
883 acts_list = a;
884 break;
885 }
886 }
887
Simon Horman651887b2014-07-21 15:12:34 -0700888 rem = nla_len(acts_list);
889 a = nla_data(acts_list);
890
Andy Zhou32ae87f2014-09-15 19:33:50 -0700891 /* Actions list is empty, do nothing */
892 if (unlikely(!rem))
893 return 0;
Simon Horman651887b2014-07-21 15:12:34 -0700894
Andy Zhou32ae87f2014-09-15 19:33:50 -0700895 /* The only known usage of sample action is having a single user-space
William Tuf2a4d082016-06-10 11:49:33 -0700896 * action, or having a truncate action followed by a single user-space
Andy Zhou32ae87f2014-09-15 19:33:50 -0700897 * action. Treat this usage as a special case.
898 * The output_userspace() should clone the skb to be sent to the
899 * user space. This skb will be consumed by its caller.
Simon Horman651887b2014-07-21 15:12:34 -0700900 */
William Tuf2a4d082016-06-10 11:49:33 -0700901 if (unlikely(nla_type(a) == OVS_ACTION_ATTR_TRUNC)) {
902 struct ovs_action_trunc *trunc = nla_data(a);
903
904 if (skb->len > trunc->max_len)
905 cutlen = skb->len - trunc->max_len;
906
907 a = nla_next(a, &rem);
908 }
909
Andy Zhou32ae87f2014-09-15 19:33:50 -0700910 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
Simon Horman941d8eb2014-10-27 16:12:16 +0900911 nla_is_last(a, rem)))
William Tuf2a4d082016-06-10 11:49:33 -0700912 return output_userspace(dp, skb, key, a, actions,
913 actions_len, cutlen);
Andy Zhou32ae87f2014-09-15 19:33:50 -0700914
915 skb = skb_clone(skb, GFP_ATOMIC);
916 if (!skb)
917 /* Skip the sample action when out of memory. */
918 return 0;
919
Andy Zhou971427f32014-09-15 19:37:25 -0700920 if (!add_deferred_actions(skb, key, a)) {
921 if (net_ratelimit())
922 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
923 ovs_dp_name(dp));
924
925 kfree_skb(skb);
926 }
927 return 0;
928}
929
930static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
931 const struct nlattr *attr)
932{
933 struct ovs_action_hash *hash_act = nla_data(attr);
934 u32 hash = 0;
935
936 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
937 hash = skb_get_hash(skb);
938 hash = jhash_1word(hash, hash_act->hash_basis);
939 if (!hash)
940 hash = 0x1;
941
942 key->ovs_flow_hash = hash;
Jesse Grossccb13522011-10-25 19:26:31 -0700943}
944
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800945static int execute_set_action(struct sk_buff *skb,
946 struct sw_flow_key *flow_key,
947 const struct nlattr *a)
948{
949 /* Only tunnel set execution is supported without a mask. */
950 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
Thomas Graf34ae9322015-07-21 10:44:03 +0200951 struct ovs_tunnel_info *tun = nla_data(a);
952
953 skb_dst_drop(skb);
954 dst_hold((struct dst_entry *)tun->tun_dst);
955 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800956 return 0;
957 }
958
959 return -EINVAL;
960}
961
962/* Mask is at the midpoint of the data. */
963#define get_mask(a, type) ((const type)nla_data(a) + 1)
964
965static int execute_masked_set_action(struct sk_buff *skb,
966 struct sw_flow_key *flow_key,
967 const struct nlattr *a)
Jesse Grossccb13522011-10-25 19:26:31 -0700968{
969 int err = 0;
970
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800971 switch (nla_type(a)) {
Jesse Grossccb13522011-10-25 19:26:31 -0700972 case OVS_KEY_ATTR_PRIORITY:
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700973 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
974 *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800975 flow_key->phy.priority = skb->priority;
Jesse Grossccb13522011-10-25 19:26:31 -0700976 break;
977
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800978 case OVS_KEY_ATTR_SKB_MARK:
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700979 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800980 flow_key->phy.skb_mark = skb->mark;
Ansis Atteka39c7caeb2012-11-26 11:24:11 -0800981 break;
982
Jesse Grossf0b128c2014-10-03 15:35:31 -0700983 case OVS_KEY_ATTR_TUNNEL_INFO:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800984 /* Masked data not supported for tunnel. */
985 err = -EINVAL;
Pravin B Shelar7d5437c2013-06-17 17:50:18 -0700986 break;
987
Jesse Grossccb13522011-10-25 19:26:31 -0700988 case OVS_KEY_ATTR_ETHERNET:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800989 err = set_eth_addr(skb, flow_key, nla_data(a),
990 get_mask(a, struct ovs_key_ethernet *));
Jesse Grossccb13522011-10-25 19:26:31 -0700991 break;
992
993 case OVS_KEY_ATTR_IPV4:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800994 err = set_ipv4(skb, flow_key, nla_data(a),
995 get_mask(a, struct ovs_key_ipv4 *));
Jesse Grossccb13522011-10-25 19:26:31 -0700996 break;
997
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800998 case OVS_KEY_ATTR_IPV6:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800999 err = set_ipv6(skb, flow_key, nla_data(a),
1000 get_mask(a, struct ovs_key_ipv6 *));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -08001001 break;
1002
Jesse Grossccb13522011-10-25 19:26:31 -07001003 case OVS_KEY_ATTR_TCP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001004 err = set_tcp(skb, flow_key, nla_data(a),
1005 get_mask(a, struct ovs_key_tcp *));
Jesse Grossccb13522011-10-25 19:26:31 -07001006 break;
1007
1008 case OVS_KEY_ATTR_UDP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001009 err = set_udp(skb, flow_key, nla_data(a),
1010 get_mask(a, struct ovs_key_udp *));
Jesse Grossccb13522011-10-25 19:26:31 -07001011 break;
Joe Stringera175a722013-08-22 12:30:48 -07001012
1013 case OVS_KEY_ATTR_SCTP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001014 err = set_sctp(skb, flow_key, nla_data(a),
1015 get_mask(a, struct ovs_key_sctp *));
Joe Stringera175a722013-08-22 12:30:48 -07001016 break;
Simon Horman25cd9ba2014-10-06 05:05:13 -07001017
1018 case OVS_KEY_ATTR_MPLS:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001019 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1020 __be32 *));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001021 break;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001022
1023 case OVS_KEY_ATTR_CT_STATE:
1024 case OVS_KEY_ATTR_CT_ZONE:
Joe Stringer182e3042015-08-26 11:31:49 -07001025 case OVS_KEY_ATTR_CT_MARK:
Joe Stringer33db4122015-10-01 15:00:37 -07001026 case OVS_KEY_ATTR_CT_LABELS:
Joe Stringer7f8a4362015-08-26 11:31:48 -07001027 err = -EINVAL;
1028 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001029 }
1030
1031 return err;
1032}
1033
Andy Zhou971427f32014-09-15 19:37:25 -07001034static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1035 struct sw_flow_key *key,
1036 const struct nlattr *a, int rem)
1037{
1038 struct deferred_action *da;
Lance Richardson2679d042016-09-13 10:08:54 -04001039 int level;
Andy Zhou971427f32014-09-15 19:37:25 -07001040
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001041 if (!is_flow_key_valid(key)) {
1042 int err;
1043
1044 err = ovs_flow_key_update(skb, key);
1045 if (err)
1046 return err;
1047 }
1048 BUG_ON(!is_flow_key_valid(key));
Andy Zhou971427f32014-09-15 19:37:25 -07001049
Simon Horman941d8eb2014-10-27 16:12:16 +09001050 if (!nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -07001051 /* Recirc action is the not the last action
1052 * of the action list, need to clone the skb.
1053 */
1054 skb = skb_clone(skb, GFP_ATOMIC);
1055
1056 /* Skip the recirc action when out of memory, but
1057 * continue on with the rest of the action list.
1058 */
1059 if (!skb)
1060 return 0;
1061 }
1062
Lance Richardson2679d042016-09-13 10:08:54 -04001063 level = this_cpu_read(exec_actions_level);
1064 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
1065 struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
1066 struct sw_flow_key *recirc_key = &rks->key[level - 1];
1067
1068 *recirc_key = *key;
1069 recirc_key->recirc_id = nla_get_u32(a);
1070 ovs_dp_process_packet(skb, recirc_key);
1071
1072 return 0;
1073 }
1074
Andy Zhou971427f32014-09-15 19:37:25 -07001075 da = add_deferred_actions(skb, key, NULL);
1076 if (da) {
1077 da->pkt_key.recirc_id = nla_get_u32(a);
1078 } else {
1079 kfree_skb(skb);
1080
1081 if (net_ratelimit())
1082 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1083 ovs_dp_name(dp));
1084 }
1085
1086 return 0;
1087}
1088
Jesse Grossccb13522011-10-25 19:26:31 -07001089/* Execute a list of actions against 'skb'. */
1090static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001091 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -07001092 const struct nlattr *attr, int len)
Jesse Grossccb13522011-10-25 19:26:31 -07001093{
1094 /* Every output action needs a separate clone of 'skb', but the common
1095 * case is just a single output action, so that doing a clone and
1096 * then freeing the original skbuff is wasteful. So the following code
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001097 * is slightly obscure just to avoid that.
1098 */
Jesse Grossccb13522011-10-25 19:26:31 -07001099 int prev_port = -1;
1100 const struct nlattr *a;
1101 int rem;
1102
1103 for (a = attr, rem = len; rem > 0;
1104 a = nla_next(a, &rem)) {
1105 int err = 0;
1106
Andy Zhou738967b2014-09-08 00:35:02 -07001107 if (unlikely(prev_port != -1)) {
1108 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1109
1110 if (out_skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -07001111 do_output(dp, out_skb, prev_port, key);
Andy Zhou738967b2014-09-08 00:35:02 -07001112
William Tuf2a4d082016-06-10 11:49:33 -07001113 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001114 prev_port = -1;
1115 }
1116
1117 switch (nla_type(a)) {
1118 case OVS_ACTION_ATTR_OUTPUT:
1119 prev_port = nla_get_u32(a);
1120 break;
1121
William Tuf2a4d082016-06-10 11:49:33 -07001122 case OVS_ACTION_ATTR_TRUNC: {
1123 struct ovs_action_trunc *trunc = nla_data(a);
1124
1125 if (skb->len > trunc->max_len)
1126 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1127 break;
1128 }
1129
Jesse Grossccb13522011-10-25 19:26:31 -07001130 case OVS_ACTION_ATTR_USERSPACE:
William Tuf2a4d082016-06-10 11:49:33 -07001131 output_userspace(dp, skb, key, a, attr,
1132 len, OVS_CB(skb)->cutlen);
1133 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001134 break;
1135
Andy Zhou971427f32014-09-15 19:37:25 -07001136 case OVS_ACTION_ATTR_HASH:
1137 execute_hash(skb, key, a);
1138 break;
1139
Simon Horman25cd9ba2014-10-06 05:05:13 -07001140 case OVS_ACTION_ATTR_PUSH_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001141 err = push_mpls(skb, key, nla_data(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001142 break;
1143
1144 case OVS_ACTION_ATTR_POP_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001145 err = pop_mpls(skb, key, nla_get_be16(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001146 break;
1147
Jesse Grossccb13522011-10-25 19:26:31 -07001148 case OVS_ACTION_ATTR_PUSH_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001149 err = push_vlan(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001150 break;
1151
1152 case OVS_ACTION_ATTR_POP_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001153 err = pop_vlan(skb, key);
Jesse Grossccb13522011-10-25 19:26:31 -07001154 break;
1155
Andy Zhou971427f32014-09-15 19:37:25 -07001156 case OVS_ACTION_ATTR_RECIRC:
1157 err = execute_recirc(dp, skb, key, a, rem);
Simon Horman941d8eb2014-10-27 16:12:16 +09001158 if (nla_is_last(a, rem)) {
Andy Zhou971427f32014-09-15 19:37:25 -07001159 /* If this is the last action, the skb has
1160 * been consumed or freed.
1161 * Return immediately.
1162 */
1163 return err;
1164 }
1165 break;
1166
Jesse Grossccb13522011-10-25 19:26:31 -07001167 case OVS_ACTION_ATTR_SET:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001168 err = execute_set_action(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001169 break;
1170
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001171 case OVS_ACTION_ATTR_SET_MASKED:
1172 case OVS_ACTION_ATTR_SET_TO_MASKED:
1173 err = execute_masked_set_action(skb, key, nla_data(a));
1174 break;
1175
Jesse Grossccb13522011-10-25 19:26:31 -07001176 case OVS_ACTION_ATTR_SAMPLE:
Neil McKeeccea7442015-05-26 20:59:43 -07001177 err = sample(dp, skb, key, a, attr, len);
Jesse Grossccb13522011-10-25 19:26:31 -07001178 break;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001179
1180 case OVS_ACTION_ATTR_CT:
Joe Stringerec0d0432015-10-06 10:59:58 -07001181 if (!is_flow_key_valid(key)) {
1182 err = ovs_flow_key_update(skb, key);
1183 if (err)
1184 return err;
1185 }
1186
Joe Stringer7f8a4362015-08-26 11:31:48 -07001187 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1188 nla_data(a));
1189
1190 /* Hide stolen IP fragments from user space. */
Joe Stringer74c16612015-10-25 20:21:48 -07001191 if (err)
1192 return err == -EINPROGRESS ? 0 : err;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001193 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001194 }
1195
1196 if (unlikely(err)) {
1197 kfree_skb(skb);
1198 return err;
1199 }
1200 }
1201
Simon Horman651887b2014-07-21 15:12:34 -07001202 if (prev_port != -1)
Joe Stringer7f8a4362015-08-26 11:31:48 -07001203 do_output(dp, skb, prev_port, key);
Simon Horman651887b2014-07-21 15:12:34 -07001204 else
Jesse Grossccb13522011-10-25 19:26:31 -07001205 consume_skb(skb);
1206
1207 return 0;
1208}
1209
Andy Zhou971427f32014-09-15 19:37:25 -07001210static void process_deferred_actions(struct datapath *dp)
1211{
1212 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1213
1214 /* Do not touch the FIFO in case there is no deferred actions. */
1215 if (action_fifo_is_empty(fifo))
1216 return;
1217
1218 /* Finishing executing all deferred actions. */
1219 do {
1220 struct deferred_action *da = action_fifo_get(fifo);
1221 struct sk_buff *skb = da->skb;
1222 struct sw_flow_key *key = &da->pkt_key;
1223 const struct nlattr *actions = da->actions;
1224
1225 if (actions)
1226 do_execute_actions(dp, skb, key, actions,
1227 nla_len(actions));
1228 else
1229 ovs_dp_process_packet(skb, key);
1230 } while (!action_fifo_is_empty(fifo));
1231
1232 /* Reset FIFO for the next packet. */
1233 action_fifo_init(fifo);
1234}
1235
Jesse Grossccb13522011-10-25 19:26:31 -07001236/* Execute a list of actions against 'skb'. */
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001237int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -08001238 const struct sw_flow_actions *acts,
1239 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -07001240{
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001241 int err, level;
Jesse Grossccb13522011-10-25 19:26:31 -07001242
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001243 level = __this_cpu_inc_return(exec_actions_level);
Lance Richardson2679d042016-09-13 10:08:54 -04001244 if (unlikely(level > OVS_RECURSION_LIMIT)) {
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001245 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1246 ovs_dp_name(dp));
1247 kfree_skb(skb);
1248 err = -ENETDOWN;
1249 goto out;
1250 }
1251
Andy Zhou971427f32014-09-15 19:37:25 -07001252 err = do_execute_actions(dp, skb, key,
1253 acts->actions, acts->actions_len);
1254
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001255 if (level == 1)
Andy Zhou971427f32014-09-15 19:37:25 -07001256 process_deferred_actions(dp);
1257
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001258out:
1259 __this_cpu_dec(exec_actions_level);
Andy Zhou971427f32014-09-15 19:37:25 -07001260 return err;
1261}
1262
1263int action_fifos_init(void)
1264{
1265 action_fifos = alloc_percpu(struct action_fifo);
1266 if (!action_fifos)
1267 return -ENOMEM;
1268
Lance Richardson2679d042016-09-13 10:08:54 -04001269 recirc_keys = alloc_percpu(struct recirc_keys);
1270 if (!recirc_keys) {
1271 free_percpu(action_fifos);
1272 return -ENOMEM;
1273 }
1274
Andy Zhou971427f32014-09-15 19:37:25 -07001275 return 0;
1276}
1277
1278void action_fifos_exit(void)
1279{
1280 free_percpu(action_fifos);
Lance Richardson2679d042016-09-13 10:08:54 -04001281 free_percpu(recirc_keys);
Jesse Grossccb13522011-10-25 19:26:31 -07001282}