blob: fc0efd8833c8482ae89cb3874072fec6be8b33ee [file] [log] [blame]
Thomas Gleixnerc9422992019-05-29 07:12:43 -07001// SPDX-License-Identifier: GPL-2.0-only
Jesse Grossccb13522011-10-25 19:26:31 -07002/*
andy zhou4572ef52017-03-20 16:32:28 -07003 * Copyright (c) 2007-2017 Nicira, Inc.
Jesse Grossccb13522011-10-25 19:26:31 -07004 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/skbuff.h>
9#include <linux/in.h>
10#include <linux/ip.h>
11#include <linux/openvswitch.h>
Joe Stringer7f8a4362015-08-26 11:31:48 -070012#include <linux/netfilter_ipv6.h>
Joe Stringera175a722013-08-22 12:30:48 -070013#include <linux/sctp.h>
Jesse Grossccb13522011-10-25 19:26:31 -070014#include <linux/tcp.h>
15#include <linux/udp.h>
16#include <linux/in6.h>
17#include <linux/if_arp.h>
18#include <linux/if_vlan.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070019
Joe Stringer7f8a4362015-08-26 11:31:48 -070020#include <net/dst.h>
Jesse Grossccb13522011-10-25 19:26:31 -070021#include <net/ip.h>
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -080022#include <net/ipv6.h>
Joe Stringer7b85b4d2015-08-27 15:25:46 -070023#include <net/ip6_fib.h>
Jesse Grossccb13522011-10-25 19:26:31 -070024#include <net/checksum.h>
25#include <net/dsfield.h>
Simon Horman25cd9ba2014-10-06 05:05:13 -070026#include <net/mpls.h>
Joe Stringera175a722013-08-22 12:30:48 -070027#include <net/sctp/checksum.h>
Jesse Grossccb13522011-10-25 19:26:31 -070028
29#include "datapath.h"
Andy Zhou971427f32014-09-15 19:37:25 -070030#include "flow.h"
Joe Stringer7f8a4362015-08-26 11:31:48 -070031#include "conntrack.h"
Jesse Grossccb13522011-10-25 19:26:31 -070032#include "vport.h"
Yi Yangb2d0f5d2017-11-07 21:07:02 +080033#include "flow_netlink.h"
Jesse Grossccb13522011-10-25 19:26:31 -070034
Andy Zhou971427f32014-09-15 19:37:25 -070035struct deferred_action {
36 struct sk_buff *skb;
37 const struct nlattr *actions;
andy zhou47c697a2017-03-20 16:32:27 -070038 int actions_len;
Andy Zhou971427f32014-09-15 19:37:25 -070039
40 /* Store pkt_key clone when creating deferred action. */
41 struct sw_flow_key pkt_key;
42};
43
Joe Stringer7f8a4362015-08-26 11:31:48 -070044#define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
45struct ovs_frag_data {
46 unsigned long dst;
47 struct vport *vport;
48 struct ovs_skb_cb cb;
49 __be16 inner_protocol;
Jiri Bencc66549f2016-10-05 15:01:57 +020050 u16 network_offset; /* valid only for MPLS */
51 u16 vlan_tci;
Joe Stringer7f8a4362015-08-26 11:31:48 -070052 __be16 vlan_proto;
53 unsigned int l2_len;
Jiri Bence2d9d832016-11-10 16:28:19 +010054 u8 mac_proto;
Joe Stringer7f8a4362015-08-26 11:31:48 -070055 u8 l2_data[MAX_L2_LEN];
56};
57
58static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
59
Andy Zhou971427f32014-09-15 19:37:25 -070060#define DEFERRED_ACTION_FIFO_SIZE 10
Lance Richardson2679d042016-09-13 10:08:54 -040061#define OVS_RECURSION_LIMIT 5
62#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
Andy Zhou971427f32014-09-15 19:37:25 -070063struct action_fifo {
64 int head;
65 int tail;
66 /* Deferred action fifo queue storage. */
67 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
68};
69
andy zhou4572ef52017-03-20 16:32:28 -070070struct action_flow_keys {
Lance Richardson2679d042016-09-13 10:08:54 -040071 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
72};
73
Andy Zhou971427f32014-09-15 19:37:25 -070074static struct action_fifo __percpu *action_fifos;
andy zhou4572ef52017-03-20 16:32:28 -070075static struct action_flow_keys __percpu *flow_keys;
Andy Zhou971427f32014-09-15 19:37:25 -070076static DEFINE_PER_CPU(int, exec_actions_level);
77
andy zhou4572ef52017-03-20 16:32:28 -070078/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
79 * space. Return NULL if out of key spaces.
80 */
81static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
82{
83 struct action_flow_keys *keys = this_cpu_ptr(flow_keys);
84 int level = this_cpu_read(exec_actions_level);
85 struct sw_flow_key *key = NULL;
86
87 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
88 key = &keys->key[level - 1];
89 *key = *key_;
90 }
91
92 return key;
93}
94
Andy Zhou971427f32014-09-15 19:37:25 -070095static void action_fifo_init(struct action_fifo *fifo)
96{
97 fifo->head = 0;
98 fifo->tail = 0;
99}
100
Thomas Graf12eb18f2014-11-06 06:58:52 -0800101static bool action_fifo_is_empty(const struct action_fifo *fifo)
Andy Zhou971427f32014-09-15 19:37:25 -0700102{
103 return (fifo->head == fifo->tail);
104}
105
106static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
107{
108 if (action_fifo_is_empty(fifo))
109 return NULL;
110
111 return &fifo->fifo[fifo->tail++];
112}
113
114static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
115{
116 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
117 return NULL;
118
119 return &fifo->fifo[fifo->head++];
120}
121
122/* Return true if fifo is not full */
123static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
andy zhou47c697a2017-03-20 16:32:27 -0700124 const struct sw_flow_key *key,
125 const struct nlattr *actions,
126 const int actions_len)
Andy Zhou971427f32014-09-15 19:37:25 -0700127{
128 struct action_fifo *fifo;
129 struct deferred_action *da;
130
131 fifo = this_cpu_ptr(action_fifos);
132 da = action_fifo_put(fifo);
133 if (da) {
134 da->skb = skb;
andy zhou47c697a2017-03-20 16:32:27 -0700135 da->actions = actions;
136 da->actions_len = actions_len;
Andy Zhou971427f32014-09-15 19:37:25 -0700137 da->pkt_key = *key;
138 }
139
140 return da;
141}
142
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800143static void invalidate_flow_key(struct sw_flow_key *key)
144{
Jiri Benc329f45b2016-11-10 16:28:18 +0100145 key->mac_proto |= SW_FLOW_KEY_INVALID;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800146}
147
148static bool is_flow_key_valid(const struct sw_flow_key *key)
149{
Jiri Benc329f45b2016-11-10 16:28:18 +0100150 return !(key->mac_proto & SW_FLOW_KEY_INVALID);
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800151}
152
andy zhoubef7f752017-03-20 16:32:30 -0700153static int clone_execute(struct datapath *dp, struct sk_buff *skb,
154 struct sw_flow_key *key,
155 u32 recirc_id,
156 const struct nlattr *actions, int len,
157 bool last, bool clone_flow_key);
158
Numan Siddique4d5ec892019-03-26 06:13:46 +0530159static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
160 struct sw_flow_key *key,
161 const struct nlattr *attr, int len);
162
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800163static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
Martin Varghesef66b53f2019-12-21 08:50:46 +0530164 __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700165{
John Hurley8822e272019-07-07 15:01:54 +0100166 int err;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700167
Martin Varghesef66b53f2019-12-21 08:50:46 +0530168 err = skb_mpls_push(skb, mpls_lse, mpls_ethertype, mac_len, !!mac_len);
John Hurley8822e272019-07-07 15:01:54 +0100169 if (err)
170 return err;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700171
Martin Varghesef66b53f2019-12-21 08:50:46 +0530172 if (!mac_len)
173 key->mac_proto = MAC_PROTO_NONE;
174
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800175 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700176 return 0;
177}
178
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800179static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
180 const __be16 ethertype)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700181{
Simon Horman25cd9ba2014-10-06 05:05:13 -0700182 int err;
183
Martin Varghese040b5cf2019-12-02 10:49:51 +0530184 err = skb_mpls_pop(skb, ethertype, skb->mac_len,
185 ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
John Hurleyed246ce2019-07-07 15:01:55 +0100186 if (err)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700187 return err;
188
Martin Varghesef66b53f2019-12-21 08:50:46 +0530189 if (ethertype == htons(ETH_P_TEB))
190 key->mac_proto = MAC_PROTO_ETHERNET;
191
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800192 invalidate_flow_key(key);
Simon Horman25cd9ba2014-10-06 05:05:13 -0700193 return 0;
194}
195
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800196static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
197 const __be32 *mpls_lse, const __be32 *mask)
Simon Horman25cd9ba2014-10-06 05:05:13 -0700198{
Jiri Benc85de4a22016-09-30 19:08:07 +0200199 struct mpls_shim_hdr *stack;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800200 __be32 lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700201 int err;
202
Jiri Benc85de4a22016-09-30 19:08:07 +0200203 stack = mpls_hdr(skb);
204 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
John Hurleyd27cf5c2019-07-07 15:01:56 +0100205 err = skb_mpls_update_lse(skb, lse);
206 if (err)
207 return err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800208
Martin Varghesefbdcdd72019-11-04 07:27:44 +0530209 flow_key->mpls.lse[0] = lse;
Simon Horman25cd9ba2014-10-06 05:05:13 -0700210 return 0;
211}
212
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800213static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700214{
Jesse Grossccb13522011-10-25 19:26:31 -0700215 int err;
216
Jiri Pirko93515d52014-11-19 14:05:02 +0100217 err = skb_vlan_pop(skb);
Eric Garver018c1dd2016-09-07 12:56:59 -0400218 if (skb_vlan_tag_present(skb)) {
Jiri Pirko93515d52014-11-19 14:05:02 +0100219 invalidate_flow_key(key);
Eric Garver018c1dd2016-09-07 12:56:59 -0400220 } else {
221 key->eth.vlan.tci = 0;
222 key->eth.vlan.tpid = 0;
223 }
Jiri Pirko93515d52014-11-19 14:05:02 +0100224 return err;
Jesse Grossccb13522011-10-25 19:26:31 -0700225}
226
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800227static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
228 const struct ovs_action_push_vlan *vlan)
Jesse Grossccb13522011-10-25 19:26:31 -0700229{
Eric Garver018c1dd2016-09-07 12:56:59 -0400230 if (skb_vlan_tag_present(skb)) {
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800231 invalidate_flow_key(key);
Eric Garver018c1dd2016-09-07 12:56:59 -0400232 } else {
233 key->eth.vlan.tci = vlan->vlan_tci;
234 key->eth.vlan.tpid = vlan->vlan_tpid;
235 }
Jiri Pirko93515d52014-11-19 14:05:02 +0100236 return skb_vlan_push(skb, vlan->vlan_tpid,
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100237 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
Jesse Grossccb13522011-10-25 19:26:31 -0700238}
239
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800240/* 'src' is already properly masked. */
241static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
242{
243 u16 *dst = (u16 *)dst_;
244 const u16 *src = (const u16 *)src_;
245 const u16 *mask = (const u16 *)mask_;
246
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700247 OVS_SET_MASKED(dst[0], src[0], mask[0]);
248 OVS_SET_MASKED(dst[1], src[1], mask[1]);
249 OVS_SET_MASKED(dst[2], src[2], mask[2]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800250}
251
252static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
253 const struct ovs_key_ethernet *key,
254 const struct ovs_key_ethernet *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700255{
256 int err;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800257
Jiri Pirkoe2195122014-11-19 14:05:01 +0100258 err = skb_ensure_writable(skb, ETH_HLEN);
Jesse Grossccb13522011-10-25 19:26:31 -0700259 if (unlikely(err))
260 return err;
261
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700262 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
263
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800264 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
265 mask->eth_src);
266 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
267 mask->eth_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700268
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100269 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
Pravin B Shelarb34df5e2013-06-13 11:11:44 -0700270
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800271 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
272 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
Jesse Grossccb13522011-10-25 19:26:31 -0700273 return 0;
274}
275
Jiri Benc91820da2016-11-10 16:28:23 +0100276/* pop_eth does not support VLAN packets as this action is never called
277 * for them.
278 */
279static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
280{
281 skb_pull_rcsum(skb, ETH_HLEN);
282 skb_reset_mac_header(skb);
283 skb_reset_mac_len(skb);
284
285 /* safe right before invalidate_flow_key */
286 key->mac_proto = MAC_PROTO_NONE;
287 invalidate_flow_key(key);
288 return 0;
289}
290
291static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
292 const struct ovs_action_push_eth *ethh)
293{
294 struct ethhdr *hdr;
295
296 /* Add the new Ethernet header */
297 if (skb_cow_head(skb, ETH_HLEN) < 0)
298 return -ENOMEM;
299
300 skb_push(skb, ETH_HLEN);
301 skb_reset_mac_header(skb);
302 skb_reset_mac_len(skb);
303
304 hdr = eth_hdr(skb);
305 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
306 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
307 hdr->h_proto = skb->protocol;
308
309 skb_postpush_rcsum(skb, hdr, ETH_HLEN);
310
311 /* safe right before invalidate_flow_key */
312 key->mac_proto = MAC_PROTO_ETHERNET;
313 invalidate_flow_key(key);
314 return 0;
315}
316
Yi Yangb2d0f5d2017-11-07 21:07:02 +0800317static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key,
318 const struct nshhdr *nh)
319{
320 int err;
321
322 err = nsh_push(skb, nh);
323 if (err)
324 return err;
325
326 /* safe right before invalidate_flow_key */
327 key->mac_proto = MAC_PROTO_NONE;
328 invalidate_flow_key(key);
329 return 0;
330}
331
332static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
333{
334 int err;
335
336 err = nsh_pop(skb);
337 if (err)
338 return err;
339
340 /* safe right before invalidate_flow_key */
341 if (skb->protocol == htons(ETH_P_TEB))
342 key->mac_proto = MAC_PROTO_ETHERNET;
343 else
344 key->mac_proto = MAC_PROTO_NONE;
345 invalidate_flow_key(key);
346 return 0;
347}
348
Glenn Griffin3576fd72015-08-03 09:56:54 -0700349static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
350 __be32 addr, __be32 new_addr)
Jesse Grossccb13522011-10-25 19:26:31 -0700351{
352 int transport_len = skb->len - skb_transport_offset(skb);
353
Glenn Griffin3576fd72015-08-03 09:56:54 -0700354 if (nh->frag_off & htons(IP_OFFSET))
355 return;
356
Jesse Grossccb13522011-10-25 19:26:31 -0700357 if (nh->protocol == IPPROTO_TCP) {
358 if (likely(transport_len >= sizeof(struct tcphdr)))
359 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700360 addr, new_addr, true);
Jesse Grossccb13522011-10-25 19:26:31 -0700361 } else if (nh->protocol == IPPROTO_UDP) {
Jesse Gross81e5d412012-03-06 15:05:46 -0800362 if (likely(transport_len >= sizeof(struct udphdr))) {
363 struct udphdr *uh = udp_hdr(skb);
364
365 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
366 inet_proto_csum_replace4(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700367 addr, new_addr, true);
Jesse Gross81e5d412012-03-06 15:05:46 -0800368 if (!uh->check)
369 uh->check = CSUM_MANGLED_0;
370 }
371 }
Jesse Grossccb13522011-10-25 19:26:31 -0700372 }
Glenn Griffin3576fd72015-08-03 09:56:54 -0700373}
Jesse Grossccb13522011-10-25 19:26:31 -0700374
Glenn Griffin3576fd72015-08-03 09:56:54 -0700375static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
376 __be32 *addr, __be32 new_addr)
377{
378 update_ip_l4_checksum(skb, nh, *addr, new_addr);
Jesse Grossccb13522011-10-25 19:26:31 -0700379 csum_replace4(&nh->check, *addr, new_addr);
Tom Herbert7539fad2013-12-15 22:12:18 -0800380 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700381 *addr = new_addr;
382}
383
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800384static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
385 __be32 addr[4], const __be32 new_addr[4])
386{
387 int transport_len = skb->len - skb_transport_offset(skb);
388
Jesse Gross856447d2014-11-11 14:32:20 -0800389 if (l4_proto == NEXTHDR_TCP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800390 if (likely(transport_len >= sizeof(struct tcphdr)))
391 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700392 addr, new_addr, true);
Jesse Gross856447d2014-11-11 14:32:20 -0800393 } else if (l4_proto == NEXTHDR_UDP) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800394 if (likely(transport_len >= sizeof(struct udphdr))) {
395 struct udphdr *uh = udp_hdr(skb);
396
397 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
398 inet_proto_csum_replace16(&uh->check, skb,
Tom Herbert4b048d62015-08-17 13:42:25 -0700399 addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800400 if (!uh->check)
401 uh->check = CSUM_MANGLED_0;
402 }
403 }
Jesse Gross856447d2014-11-11 14:32:20 -0800404 } else if (l4_proto == NEXTHDR_ICMP) {
405 if (likely(transport_len >= sizeof(struct icmp6hdr)))
406 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
Tom Herbert4b048d62015-08-17 13:42:25 -0700407 skb, addr, new_addr, true);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800408 }
409}
410
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800411static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
412 const __be32 mask[4], __be32 masked[4])
413{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700414 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
415 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
416 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
417 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800418}
419
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800420static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
421 __be32 addr[4], const __be32 new_addr[4],
422 bool recalculate_csum)
423{
424 if (recalculate_csum)
425 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
426
Tom Herbert7539fad2013-12-15 22:12:18 -0800427 skb_clear_hash(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800428 memcpy(addr, new_addr, sizeof(__be32[4]));
429}
430
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800431static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800432{
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800433 /* Bits 21-24 are always unmasked, so this retains their values. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700434 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
435 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
436 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800437}
438
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800439static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
440 u8 mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800441{
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700442 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800443
Jesse Grossccb13522011-10-25 19:26:31 -0700444 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
445 nh->ttl = new_ttl;
446}
447
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800448static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
449 const struct ovs_key_ipv4 *key,
450 const struct ovs_key_ipv4 *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700451{
452 struct iphdr *nh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800453 __be32 new_addr;
Jesse Grossccb13522011-10-25 19:26:31 -0700454 int err;
455
Jiri Pirkoe2195122014-11-19 14:05:01 +0100456 err = skb_ensure_writable(skb, skb_network_offset(skb) +
457 sizeof(struct iphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700458 if (unlikely(err))
459 return err;
460
461 nh = ip_hdr(skb);
462
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800463 /* Setting an IP addresses is typically only a side effect of
464 * matching on them in the current userspace implementation, so it
465 * makes sense to check if the value actually changed.
466 */
467 if (mask->ipv4_src) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700468 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
Jesse Grossccb13522011-10-25 19:26:31 -0700469
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800470 if (unlikely(new_addr != nh->saddr)) {
471 set_ip_addr(skb, nh, &nh->saddr, new_addr);
472 flow_key->ipv4.addr.src = new_addr;
473 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800474 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800475 if (mask->ipv4_dst) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700476 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
Jesse Grossccb13522011-10-25 19:26:31 -0700477
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800478 if (unlikely(new_addr != nh->daddr)) {
479 set_ip_addr(skb, nh, &nh->daddr, new_addr);
480 flow_key->ipv4.addr.dst = new_addr;
481 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800482 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800483 if (mask->ipv4_tos) {
484 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
485 flow_key->ip.tos = nh->tos;
486 }
487 if (mask->ipv4_ttl) {
488 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
489 flow_key->ip.ttl = nh->ttl;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800490 }
Jesse Grossccb13522011-10-25 19:26:31 -0700491
492 return 0;
493}
494
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800495static bool is_ipv6_mask_nonzero(const __be32 addr[4])
496{
497 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
498}
499
500static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
501 const struct ovs_key_ipv6 *key,
502 const struct ovs_key_ipv6 *mask)
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800503{
504 struct ipv6hdr *nh;
505 int err;
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800506
Jiri Pirkoe2195122014-11-19 14:05:01 +0100507 err = skb_ensure_writable(skb, skb_network_offset(skb) +
508 sizeof(struct ipv6hdr));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800509 if (unlikely(err))
510 return err;
511
512 nh = ipv6_hdr(skb);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800513
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800514 /* Setting an IP addresses is typically only a side effect of
515 * matching on them in the current userspace implementation, so it
516 * makes sense to check if the value actually changed.
517 */
518 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
519 __be32 *saddr = (__be32 *)&nh->saddr;
520 __be32 masked[4];
521
522 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
523
524 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
Simon Hormanb4f70522016-04-21 11:49:15 +1000525 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800526 true);
527 memcpy(&flow_key->ipv6.addr.src, masked,
528 sizeof(flow_key->ipv6.addr.src));
529 }
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800530 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800531 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800532 unsigned int offset = 0;
533 int flags = IP6_FH_F_SKIP_RH;
534 bool recalc_csum = true;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800535 __be32 *daddr = (__be32 *)&nh->daddr;
536 __be32 masked[4];
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800537
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800538 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800539
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800540 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
541 if (ipv6_ext_hdr(nh->nexthdr))
542 recalc_csum = (ipv6_find_hdr(skb, &offset,
543 NEXTHDR_ROUTING,
544 NULL, &flags)
545 != NEXTHDR_ROUTING);
546
Simon Hormanb4f70522016-04-21 11:49:15 +1000547 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800548 recalc_csum);
549 memcpy(&flow_key->ipv6.addr.dst, masked,
550 sizeof(flow_key->ipv6.addr.dst));
551 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800552 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800553 if (mask->ipv6_tclass) {
554 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
555 flow_key->ip.tos = ipv6_get_dsfield(nh);
556 }
557 if (mask->ipv6_label) {
558 set_ipv6_fl(nh, ntohl(key->ipv6_label),
559 ntohl(mask->ipv6_label));
560 flow_key->ipv6.label =
561 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
562 }
563 if (mask->ipv6_hlimit) {
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700564 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
565 mask->ipv6_hlimit);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800566 flow_key->ip.ttl = nh->hop_limit;
567 }
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -0800568 return 0;
569}
570
Yi Yangb2d0f5d2017-11-07 21:07:02 +0800571static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
572 const struct nlattr *a)
573{
574 struct nshhdr *nh;
575 size_t length;
576 int err;
577 u8 flags;
578 u8 ttl;
579 int i;
580
581 struct ovs_key_nsh key;
582 struct ovs_key_nsh mask;
583
584 err = nsh_key_from_nlattr(a, &key, &mask);
585 if (err)
586 return err;
587
588 /* Make sure the NSH base header is there */
589 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
590 return -ENOMEM;
591
592 nh = nsh_hdr(skb);
593 length = nsh_hdr_len(nh);
594
595 /* Make sure the whole NSH header is there */
596 err = skb_ensure_writable(skb, skb_network_offset(skb) +
597 length);
598 if (unlikely(err))
599 return err;
600
601 nh = nsh_hdr(skb);
602 skb_postpull_rcsum(skb, nh, length);
603 flags = nsh_get_flags(nh);
604 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
605 flow_key->nsh.base.flags = flags;
606 ttl = nsh_get_ttl(nh);
607 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
608 flow_key->nsh.base.ttl = ttl;
609 nsh_set_flags_and_ttl(nh, flags, ttl);
610 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
611 mask.base.path_hdr);
612 flow_key->nsh.base.path_hdr = nh->path_hdr;
613 switch (nh->mdtype) {
614 case NSH_M_TYPE1:
615 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
616 nh->md1.context[i] =
617 OVS_MASKED(nh->md1.context[i], key.context[i],
618 mask.context[i]);
619 }
620 memcpy(flow_key->nsh.context, nh->md1.context,
621 sizeof(nh->md1.context));
622 break;
623 case NSH_M_TYPE2:
624 memset(flow_key->nsh.context, 0,
625 sizeof(flow_key->nsh.context));
626 break;
627 default:
628 return -EINVAL;
629 }
630 skb_postpush_rcsum(skb, nh, length);
631 return 0;
632}
633
Jiri Pirkoe2195122014-11-19 14:05:01 +0100634/* Must follow skb_ensure_writable() since that can move the skb data. */
Jesse Grossccb13522011-10-25 19:26:31 -0700635static void set_tp_port(struct sk_buff *skb, __be16 *port,
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800636 __be16 new_port, __sum16 *check)
Jesse Grossccb13522011-10-25 19:26:31 -0700637{
Tom Herbert4b048d62015-08-17 13:42:25 -0700638 inet_proto_csum_replace2(check, skb, *port, new_port, false);
Jesse Grossccb13522011-10-25 19:26:31 -0700639 *port = new_port;
Jesse Grossccb13522011-10-25 19:26:31 -0700640}
641
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800642static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
643 const struct ovs_key_udp *key,
644 const struct ovs_key_udp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700645{
646 struct udphdr *uh;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800647 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700648 int err;
649
Jiri Pirkoe2195122014-11-19 14:05:01 +0100650 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
651 sizeof(struct udphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700652 if (unlikely(err))
653 return err;
654
655 uh = udp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800656 /* Either of the masks is non-zero, so do not bother checking them. */
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700657 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
658 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800659
660 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
661 if (likely(src != uh->source)) {
662 set_tp_port(skb, &uh->source, src, &uh->check);
663 flow_key->tp.src = src;
664 }
665 if (likely(dst != uh->dest)) {
666 set_tp_port(skb, &uh->dest, dst, &uh->check);
667 flow_key->tp.dst = dst;
668 }
669
670 if (unlikely(!uh->check))
671 uh->check = CSUM_MANGLED_0;
672 } else {
673 uh->source = src;
674 uh->dest = dst;
675 flow_key->tp.src = src;
676 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800677 }
Jesse Grossccb13522011-10-25 19:26:31 -0700678
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800679 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700680
681 return 0;
682}
683
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800684static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
685 const struct ovs_key_tcp *key,
686 const struct ovs_key_tcp *mask)
Jesse Grossccb13522011-10-25 19:26:31 -0700687{
688 struct tcphdr *th;
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800689 __be16 src, dst;
Jesse Grossccb13522011-10-25 19:26:31 -0700690 int err;
691
Jiri Pirkoe2195122014-11-19 14:05:01 +0100692 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
693 sizeof(struct tcphdr));
Jesse Grossccb13522011-10-25 19:26:31 -0700694 if (unlikely(err))
695 return err;
696
697 th = tcp_hdr(skb);
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700698 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800699 if (likely(src != th->source)) {
700 set_tp_port(skb, &th->source, src, &th->check);
701 flow_key->tp.src = src;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800702 }
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700703 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800704 if (likely(dst != th->dest)) {
705 set_tp_port(skb, &th->dest, dst, &th->check);
706 flow_key->tp.dst = dst;
Pravin B Shelarfff06c32014-11-06 06:55:14 -0800707 }
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800708 skb_clear_hash(skb);
Jesse Grossccb13522011-10-25 19:26:31 -0700709
710 return 0;
711}
712
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800713static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
714 const struct ovs_key_sctp *key,
715 const struct ovs_key_sctp *mask)
Joe Stringera175a722013-08-22 12:30:48 -0700716{
Joe Stringera175a722013-08-22 12:30:48 -0700717 unsigned int sctphoff = skb_transport_offset(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800718 struct sctphdr *sh;
719 __le32 old_correct_csum, new_csum, old_csum;
720 int err;
Joe Stringera175a722013-08-22 12:30:48 -0700721
Jiri Pirkoe2195122014-11-19 14:05:01 +0100722 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
Joe Stringera175a722013-08-22 12:30:48 -0700723 if (unlikely(err))
724 return err;
725
726 sh = sctp_hdr(skb);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800727 old_csum = sh->checksum;
728 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700729
Joe Stringerbe26b9a2015-08-26 11:31:45 -0700730 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
731 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
Joe Stringera175a722013-08-22 12:30:48 -0700732
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800733 new_csum = sctp_compute_cksum(skb, sctphoff);
Joe Stringera175a722013-08-22 12:30:48 -0700734
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800735 /* Carry any checksum errors through. */
736 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
Joe Stringera175a722013-08-22 12:30:48 -0700737
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -0800738 skb_clear_hash(skb);
739 flow_key->tp.src = sh->source;
740 flow_key->tp.dst = sh->dest;
Joe Stringera175a722013-08-22 12:30:48 -0700741
742 return 0;
743}
744
Eric W. Biederman188515f2015-09-14 20:08:51 -0500745static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700746{
747 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
748 struct vport *vport = data->vport;
749
750 if (skb_cow_head(skb, data->l2_len) < 0) {
751 kfree_skb(skb);
752 return -ENOMEM;
753 }
754
755 __skb_dst_copy(skb, data->dst);
756 *OVS_CB(skb) = data->cb;
757 skb->inner_protocol = data->inner_protocol;
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100758 if (data->vlan_tci & VLAN_CFI_MASK)
759 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
760 else
761 __vlan_hwaccel_clear_tag(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700762
763 /* Reconstruct the MAC header. */
764 skb_push(skb, data->l2_len);
765 memcpy(skb->data, &data->l2_data, data->l2_len);
Daniel Borkmann6b83d282016-02-20 00:29:30 +0100766 skb_postpush_rcsum(skb, skb->data, data->l2_len);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700767 skb_reset_mac_header(skb);
768
Jiri Bencc66549f2016-10-05 15:01:57 +0200769 if (eth_p_mpls(skb->protocol)) {
770 skb->inner_network_header = skb->network_header;
771 skb_set_network_header(skb, data->network_offset);
772 skb_reset_mac_len(skb);
773 }
774
Jiri Bence2d9d832016-11-10 16:28:19 +0100775 ovs_vport_send(vport, skb, data->mac_proto);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700776 return 0;
777}
778
779static unsigned int
780ovs_dst_get_mtu(const struct dst_entry *dst)
781{
782 return dst->dev->mtu;
783}
784
785static struct dst_ops ovs_dst_ops = {
786 .family = AF_UNSPEC,
787 .mtu = ovs_dst_get_mtu,
788};
789
790/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
791 * ovs_vport_output(), which is called once per fragmented packet.
792 */
Jiri Bencc66549f2016-10-05 15:01:57 +0200793static void prepare_frag(struct vport *vport, struct sk_buff *skb,
Jiri Bence2d9d832016-11-10 16:28:19 +0100794 u16 orig_network_offset, u8 mac_proto)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700795{
796 unsigned int hlen = skb_network_offset(skb);
797 struct ovs_frag_data *data;
798
799 data = this_cpu_ptr(&ovs_frag_data_storage);
800 data->dst = skb->_skb_refdst;
801 data->vport = vport;
802 data->cb = *OVS_CB(skb);
803 data->inner_protocol = skb->inner_protocol;
Jiri Bencc66549f2016-10-05 15:01:57 +0200804 data->network_offset = orig_network_offset;
Michał Mirosław9df46ae2018-11-08 18:44:50 +0100805 if (skb_vlan_tag_present(skb))
806 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
807 else
808 data->vlan_tci = 0;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700809 data->vlan_proto = skb->vlan_proto;
Jiri Bence2d9d832016-11-10 16:28:19 +0100810 data->mac_proto = mac_proto;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700811 data->l2_len = hlen;
812 memcpy(&data->l2_data, skb->data, hlen);
813
814 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
815 skb_pull(skb, hlen);
816}
817
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500818static void ovs_fragment(struct net *net, struct vport *vport,
Jiri Bence2d9d832016-11-10 16:28:19 +0100819 struct sk_buff *skb, u16 mru,
820 struct sw_flow_key *key)
Joe Stringer7f8a4362015-08-26 11:31:48 -0700821{
Jiri Bencc66549f2016-10-05 15:01:57 +0200822 u16 orig_network_offset = 0;
823
824 if (eth_p_mpls(skb->protocol)) {
825 orig_network_offset = skb_network_offset(skb);
826 skb->network_header = skb->inner_network_header;
827 }
828
Joe Stringer7f8a4362015-08-26 11:31:48 -0700829 if (skb_network_offset(skb) > MAX_L2_LEN) {
830 OVS_NLERR(1, "L2 header too long to fragment");
Joe Stringerb8f22572015-10-06 10:59:57 -0700831 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700832 }
833
Jiri Bence2d9d832016-11-10 16:28:19 +0100834 if (key->eth.type == htons(ETH_P_IP)) {
Joe Stringer7f8a4362015-08-26 11:31:48 -0700835 struct dst_entry ovs_dst;
836 unsigned long orig_dst;
837
Jiri Bence2d9d832016-11-10 16:28:19 +0100838 prepare_frag(vport, skb, orig_network_offset,
839 ovs_key_mac_proto(key));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700840 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
841 DST_OBSOLETE_NONE, DST_NOCOUNT);
842 ovs_dst.dev = vport->dev;
843
844 orig_dst = skb->_skb_refdst;
845 skb_dst_set_noref(skb, &ovs_dst);
846 IPCB(skb)->frag_max_size = mru;
847
Eric W. Biederman694869b2015-06-12 21:55:31 -0500848 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700849 refdst_drop(orig_dst);
Jiri Bence2d9d832016-11-10 16:28:19 +0100850 } else if (key->eth.type == htons(ETH_P_IPV6)) {
Joe Stringer7f8a4362015-08-26 11:31:48 -0700851 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
852 unsigned long orig_dst;
853 struct rt6_info ovs_rt;
854
Peter Downsf1304f72017-03-01 01:01:17 -0800855 if (!v6ops)
Joe Stringerb8f22572015-10-06 10:59:57 -0700856 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700857
Jiri Bence2d9d832016-11-10 16:28:19 +0100858 prepare_frag(vport, skb, orig_network_offset,
859 ovs_key_mac_proto(key));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700860 memset(&ovs_rt, 0, sizeof(ovs_rt));
861 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
862 DST_OBSOLETE_NONE, DST_NOCOUNT);
863 ovs_rt.dst.dev = vport->dev;
864
865 orig_dst = skb->_skb_refdst;
866 skb_dst_set_noref(skb, &ovs_rt.dst);
867 IP6CB(skb)->frag_max_size = mru;
868
Eric W. Biederman7d8c6e32015-06-12 22:12:04 -0500869 v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700870 refdst_drop(orig_dst);
871 } else {
872 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
Jiri Bence2d9d832016-11-10 16:28:19 +0100873 ovs_vport_name(vport), ntohs(key->eth.type), mru,
Joe Stringer7f8a4362015-08-26 11:31:48 -0700874 vport->dev->mtu);
Joe Stringerb8f22572015-10-06 10:59:57 -0700875 goto err;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700876 }
Joe Stringerb8f22572015-10-06 10:59:57 -0700877
878 return;
879err:
880 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700881}
882
883static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
884 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -0700885{
Andy Zhou738967b2014-09-08 00:35:02 -0700886 struct vport *vport = ovs_vport_rcu(dp, out_port);
Jesse Grossccb13522011-10-25 19:26:31 -0700887
Joe Stringer7f8a4362015-08-26 11:31:48 -0700888 if (likely(vport)) {
889 u16 mru = OVS_CB(skb)->mru;
William Tuf2a4d082016-06-10 11:49:33 -0700890 u32 cutlen = OVS_CB(skb)->cutlen;
891
892 if (unlikely(cutlen > 0)) {
Jiri Bence2d9d832016-11-10 16:28:19 +0100893 if (skb->len - cutlen > ovs_mac_header_len(key))
William Tuf2a4d082016-06-10 11:49:33 -0700894 pskb_trim(skb, skb->len - cutlen);
895 else
Jiri Bence2d9d832016-11-10 16:28:19 +0100896 pskb_trim(skb, ovs_mac_header_len(key));
William Tuf2a4d082016-06-10 11:49:33 -0700897 }
Joe Stringer7f8a4362015-08-26 11:31:48 -0700898
Jiri Benc738314a2016-11-10 16:28:17 +0100899 if (likely(!mru ||
900 (skb->len <= mru + vport->dev->hard_header_len))) {
Jiri Bence2d9d832016-11-10 16:28:19 +0100901 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
Joe Stringer7f8a4362015-08-26 11:31:48 -0700902 } else if (mru <= vport->dev->mtu) {
Eric W. Biedermanc559cd32015-09-14 20:10:28 -0500903 struct net *net = read_pnet(&dp->net);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700904
Jiri Bence2d9d832016-11-10 16:28:19 +0100905 ovs_fragment(net, vport, skb, mru, key);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700906 } else {
907 kfree_skb(skb);
908 }
909 } else {
Jesse Grossccb13522011-10-25 19:26:31 -0700910 kfree_skb(skb);
Joe Stringer7f8a4362015-08-26 11:31:48 -0700911 }
Jesse Grossccb13522011-10-25 19:26:31 -0700912}
913
914static int output_userspace(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700915 struct sw_flow_key *key, const struct nlattr *attr,
William Tuf2a4d082016-06-10 11:49:33 -0700916 const struct nlattr *actions, int actions_len,
917 uint32_t cutlen)
Jesse Grossccb13522011-10-25 19:26:31 -0700918{
919 struct dp_upcall_info upcall;
920 const struct nlattr *a;
921 int rem;
922
Neil McKeeccea7442015-05-26 20:59:43 -0700923 memset(&upcall, 0, sizeof(upcall));
Jesse Grossccb13522011-10-25 19:26:31 -0700924 upcall.cmd = OVS_PACKET_CMD_ACTION;
Joe Stringer7f8a4362015-08-26 11:31:48 -0700925 upcall.mru = OVS_CB(skb)->mru;
Jesse Grossccb13522011-10-25 19:26:31 -0700926
927 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
928 a = nla_next(a, &rem)) {
929 switch (nla_type(a)) {
930 case OVS_USERSPACE_ATTR_USERDATA:
931 upcall.userdata = a;
932 break;
933
934 case OVS_USERSPACE_ATTR_PID:
Eric W. Biederman15e47302012-09-07 20:12:54 +0000935 upcall.portid = nla_get_u32(a);
Jesse Grossccb13522011-10-25 19:26:31 -0700936 break;
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800937
938 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
939 /* Get out tunnel info. */
940 struct vport *vport;
941
942 vport = ovs_vport_rcu(dp, nla_get_u32(a));
943 if (vport) {
944 int err;
945
Pravin B Shelarfc4099f2015-10-22 18:17:16 -0700946 err = dev_fill_metadata_dst(vport->dev, skb);
947 if (!err)
948 upcall.egress_tun_info = skb_tunnel_info(skb);
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800949 }
Pravin B Shelar4c222792015-08-30 18:09:38 -0700950
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800951 break;
Jesse Grossccb13522011-10-25 19:26:31 -0700952 }
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800953
Neil McKeeccea7442015-05-26 20:59:43 -0700954 case OVS_USERSPACE_ATTR_ACTIONS: {
955 /* Include actions. */
956 upcall.actions = actions;
957 upcall.actions_len = actions_len;
958 break;
959 }
960
Wenyu Zhang8f0aad62014-11-06 06:51:24 -0800961 } /* End of switch. */
Jesse Grossccb13522011-10-25 19:26:31 -0700962 }
963
William Tuf2a4d082016-06-10 11:49:33 -0700964 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen);
Jesse Grossccb13522011-10-25 19:26:31 -0700965}
966
Matteo Croce744676e2020-02-15 14:20:56 +0100967static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
968 struct sw_flow_key *key,
969 const struct nlattr *attr, bool last)
970{
971 /* The first action is always 'OVS_DEC_TTL_ATTR_ARG'. */
972 struct nlattr *dec_ttl_arg = nla_data(attr);
973 int rem = nla_len(attr);
974
975 if (nla_len(dec_ttl_arg)) {
976 struct nlattr *actions = nla_next(dec_ttl_arg, &rem);
977
978 if (actions)
979 return clone_execute(dp, skb, key, 0, actions, rem,
980 last, false);
981 }
982 consume_skb(skb);
983 return 0;
984}
985
andy zhou798c1662017-03-20 16:32:29 -0700986/* When 'last' is true, sample() should always consume the 'skb'.
987 * Otherwise, sample() should keep 'skb' intact regardless what
988 * actions are executed within sample().
989 */
Jesse Grossccb13522011-10-25 19:26:31 -0700990static int sample(struct datapath *dp, struct sk_buff *skb,
Neil McKeeccea7442015-05-26 20:59:43 -0700991 struct sw_flow_key *key, const struct nlattr *attr,
andy zhou798c1662017-03-20 16:32:29 -0700992 bool last)
Jesse Grossccb13522011-10-25 19:26:31 -0700993{
andy zhou798c1662017-03-20 16:32:29 -0700994 struct nlattr *actions;
995 struct nlattr *sample_arg;
andy zhou798c1662017-03-20 16:32:29 -0700996 int rem = nla_len(attr);
andy zhou798c1662017-03-20 16:32:29 -0700997 const struct sample_arg *arg;
andy zhoubef7f752017-03-20 16:32:30 -0700998 bool clone_flow_key;
Jesse Grossccb13522011-10-25 19:26:31 -0700999
andy zhou798c1662017-03-20 16:32:29 -07001000 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1001 sample_arg = nla_data(attr);
1002 arg = nla_data(sample_arg);
1003 actions = nla_next(sample_arg, &rem);
Wenyu Zhange05176a2015-08-05 00:30:47 -07001004
andy zhou798c1662017-03-20 16:32:29 -07001005 if ((arg->probability != U32_MAX) &&
1006 (!arg->probability || prandom_u32() > arg->probability)) {
1007 if (last)
1008 consume_skb(skb);
1009 return 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001010 }
1011
andy zhoubef7f752017-03-20 16:32:30 -07001012 clone_flow_key = !arg->exec;
1013 return clone_execute(dp, skb, key, 0, actions, rem, last,
1014 clone_flow_key);
Andy Zhou971427f32014-09-15 19:37:25 -07001015}
1016
Yifeng Sunb2335042018-07-02 08:18:03 -07001017/* When 'last' is true, clone() should always consume the 'skb'.
1018 * Otherwise, clone() should keep 'skb' intact regardless what
1019 * actions are executed within clone().
1020 */
1021static int clone(struct datapath *dp, struct sk_buff *skb,
1022 struct sw_flow_key *key, const struct nlattr *attr,
1023 bool last)
1024{
1025 struct nlattr *actions;
1026 struct nlattr *clone_arg;
1027 int rem = nla_len(attr);
1028 bool dont_clone_flow_key;
1029
1030 /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
1031 clone_arg = nla_data(attr);
1032 dont_clone_flow_key = nla_get_u32(clone_arg);
1033 actions = nla_next(clone_arg, &rem);
1034
1035 return clone_execute(dp, skb, key, 0, actions, rem, last,
1036 !dont_clone_flow_key);
1037}
1038
Andy Zhou971427f32014-09-15 19:37:25 -07001039static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
1040 const struct nlattr *attr)
1041{
1042 struct ovs_action_hash *hash_act = nla_data(attr);
1043 u32 hash = 0;
1044
1045 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1046 hash = skb_get_hash(skb);
1047 hash = jhash_1word(hash, hash_act->hash_basis);
1048 if (!hash)
1049 hash = 0x1;
1050
1051 key->ovs_flow_hash = hash;
Jesse Grossccb13522011-10-25 19:26:31 -07001052}
1053
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001054static int execute_set_action(struct sk_buff *skb,
1055 struct sw_flow_key *flow_key,
1056 const struct nlattr *a)
1057{
1058 /* Only tunnel set execution is supported without a mask. */
1059 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
Thomas Graf34ae9322015-07-21 10:44:03 +02001060 struct ovs_tunnel_info *tun = nla_data(a);
1061
1062 skb_dst_drop(skb);
1063 dst_hold((struct dst_entry *)tun->tun_dst);
1064 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001065 return 0;
1066 }
1067
1068 return -EINVAL;
1069}
1070
1071/* Mask is at the midpoint of the data. */
1072#define get_mask(a, type) ((const type)nla_data(a) + 1)
1073
1074static int execute_masked_set_action(struct sk_buff *skb,
1075 struct sw_flow_key *flow_key,
1076 const struct nlattr *a)
Jesse Grossccb13522011-10-25 19:26:31 -07001077{
1078 int err = 0;
1079
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001080 switch (nla_type(a)) {
Jesse Grossccb13522011-10-25 19:26:31 -07001081 case OVS_KEY_ATTR_PRIORITY:
Joe Stringerbe26b9a2015-08-26 11:31:45 -07001082 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
1083 *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001084 flow_key->phy.priority = skb->priority;
Jesse Grossccb13522011-10-25 19:26:31 -07001085 break;
1086
Ansis Atteka39c7caeb2012-11-26 11:24:11 -08001087 case OVS_KEY_ATTR_SKB_MARK:
Joe Stringerbe26b9a2015-08-26 11:31:45 -07001088 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001089 flow_key->phy.skb_mark = skb->mark;
Ansis Atteka39c7caeb2012-11-26 11:24:11 -08001090 break;
1091
Jesse Grossf0b128c2014-10-03 15:35:31 -07001092 case OVS_KEY_ATTR_TUNNEL_INFO:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001093 /* Masked data not supported for tunnel. */
1094 err = -EINVAL;
Pravin B Shelar7d5437c2013-06-17 17:50:18 -07001095 break;
1096
Jesse Grossccb13522011-10-25 19:26:31 -07001097 case OVS_KEY_ATTR_ETHERNET:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001098 err = set_eth_addr(skb, flow_key, nla_data(a),
1099 get_mask(a, struct ovs_key_ethernet *));
Jesse Grossccb13522011-10-25 19:26:31 -07001100 break;
1101
Yi Yangb2d0f5d2017-11-07 21:07:02 +08001102 case OVS_KEY_ATTR_NSH:
1103 err = set_nsh(skb, flow_key, a);
1104 break;
1105
Jesse Grossccb13522011-10-25 19:26:31 -07001106 case OVS_KEY_ATTR_IPV4:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001107 err = set_ipv4(skb, flow_key, nla_data(a),
1108 get_mask(a, struct ovs_key_ipv4 *));
Jesse Grossccb13522011-10-25 19:26:31 -07001109 break;
1110
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -08001111 case OVS_KEY_ATTR_IPV6:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001112 err = set_ipv6(skb, flow_key, nla_data(a),
1113 get_mask(a, struct ovs_key_ipv6 *));
Ansis Atteka3fdbd1c2012-11-13 15:44:14 -08001114 break;
1115
Jesse Grossccb13522011-10-25 19:26:31 -07001116 case OVS_KEY_ATTR_TCP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001117 err = set_tcp(skb, flow_key, nla_data(a),
1118 get_mask(a, struct ovs_key_tcp *));
Jesse Grossccb13522011-10-25 19:26:31 -07001119 break;
1120
1121 case OVS_KEY_ATTR_UDP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001122 err = set_udp(skb, flow_key, nla_data(a),
1123 get_mask(a, struct ovs_key_udp *));
Jesse Grossccb13522011-10-25 19:26:31 -07001124 break;
Joe Stringera175a722013-08-22 12:30:48 -07001125
1126 case OVS_KEY_ATTR_SCTP:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001127 err = set_sctp(skb, flow_key, nla_data(a),
1128 get_mask(a, struct ovs_key_sctp *));
Joe Stringera175a722013-08-22 12:30:48 -07001129 break;
Simon Horman25cd9ba2014-10-06 05:05:13 -07001130
1131 case OVS_KEY_ATTR_MPLS:
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001132 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
1133 __be32 *));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001134 break;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001135
1136 case OVS_KEY_ATTR_CT_STATE:
1137 case OVS_KEY_ATTR_CT_ZONE:
Joe Stringer182e3042015-08-26 11:31:49 -07001138 case OVS_KEY_ATTR_CT_MARK:
Joe Stringer33db4122015-10-01 15:00:37 -07001139 case OVS_KEY_ATTR_CT_LABELS:
Jarno Rajahalme9dd7f892017-02-09 11:21:59 -08001140 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
1141 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
Joe Stringer7f8a4362015-08-26 11:31:48 -07001142 err = -EINVAL;
1143 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001144 }
1145
1146 return err;
1147}
1148
Andy Zhou971427f32014-09-15 19:37:25 -07001149static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
1150 struct sw_flow_key *key,
andy zhoubef7f752017-03-20 16:32:30 -07001151 const struct nlattr *a, bool last)
Andy Zhou971427f32014-09-15 19:37:25 -07001152{
andy zhoubef7f752017-03-20 16:32:30 -07001153 u32 recirc_id;
Andy Zhou971427f32014-09-15 19:37:25 -07001154
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001155 if (!is_flow_key_valid(key)) {
1156 int err;
1157
1158 err = ovs_flow_key_update(skb, key);
1159 if (err)
1160 return err;
1161 }
1162 BUG_ON(!is_flow_key_valid(key));
Andy Zhou971427f32014-09-15 19:37:25 -07001163
andy zhoubef7f752017-03-20 16:32:30 -07001164 recirc_id = nla_get_u32(a);
1165 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true);
Andy Zhou971427f32014-09-15 19:37:25 -07001166}
1167
Numan Siddique4d5ec892019-03-26 06:13:46 +05301168static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
1169 struct sw_flow_key *key,
1170 const struct nlattr *attr, bool last)
1171{
1172 const struct nlattr *actions, *cpl_arg;
1173 const struct check_pkt_len_arg *arg;
1174 int rem = nla_len(attr);
1175 bool clone_flow_key;
1176
1177 /* The first netlink attribute in 'attr' is always
1178 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1179 */
1180 cpl_arg = nla_data(attr);
1181 arg = nla_data(cpl_arg);
1182
1183 if (skb->len <= arg->pkt_len) {
1184 /* Second netlink attribute in 'attr' is always
1185 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1186 */
1187 actions = nla_next(cpl_arg, &rem);
1188 clone_flow_key = !arg->exec_for_lesser_equal;
1189 } else {
1190 /* Third netlink attribute in 'attr' is always
1191 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1192 */
1193 actions = nla_next(cpl_arg, &rem);
1194 actions = nla_next(actions, &rem);
1195 clone_flow_key = !arg->exec_for_greater;
1196 }
1197
1198 return clone_execute(dp, skb, key, 0, nla_data(actions),
1199 nla_len(actions), last, clone_flow_key);
1200}
1201
Matteo Croce744676e2020-02-15 14:20:56 +01001202static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
1203{
1204 int err;
1205
1206 if (skb->protocol == htons(ETH_P_IPV6)) {
1207 struct ipv6hdr *nh;
1208
1209 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1210 sizeof(*nh));
1211 if (unlikely(err))
1212 return err;
1213
1214 nh = ipv6_hdr(skb);
1215
1216 if (nh->hop_limit <= 1)
1217 return -EHOSTUNREACH;
1218
1219 key->ip.ttl = --nh->hop_limit;
1220 } else {
1221 struct iphdr *nh;
1222 u8 old_ttl;
1223
1224 err = skb_ensure_writable(skb, skb_network_offset(skb) +
1225 sizeof(*nh));
1226 if (unlikely(err))
1227 return err;
1228
1229 nh = ip_hdr(skb);
1230 if (nh->ttl <= 1)
1231 return -EHOSTUNREACH;
1232
1233 old_ttl = nh->ttl--;
1234 csum_replace2(&nh->check, htons(old_ttl << 8),
1235 htons(nh->ttl << 8));
1236 key->ip.ttl = nh->ttl;
1237 }
1238 return 0;
1239}
1240
Jesse Grossccb13522011-10-25 19:26:31 -07001241/* Execute a list of actions against 'skb'. */
1242static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001243 struct sw_flow_key *key,
Simon Horman651887b2014-07-21 15:12:34 -07001244 const struct nlattr *attr, int len)
Jesse Grossccb13522011-10-25 19:26:31 -07001245{
Jesse Grossccb13522011-10-25 19:26:31 -07001246 const struct nlattr *a;
1247 int rem;
1248
1249 for (a = attr, rem = len; rem > 0;
1250 a = nla_next(a, &rem)) {
1251 int err = 0;
1252
Jesse Grossccb13522011-10-25 19:26:31 -07001253 switch (nla_type(a)) {
andy zhou5b8784a2017-01-27 13:45:28 -08001254 case OVS_ACTION_ATTR_OUTPUT: {
1255 int port = nla_get_u32(a);
1256 struct sk_buff *clone;
1257
1258 /* Every output action needs a separate clone
1259 * of 'skb', In case the output action is the
1260 * last action, cloning can be avoided.
1261 */
1262 if (nla_is_last(a, rem)) {
1263 do_output(dp, skb, port, key);
1264 /* 'skb' has been used for output.
1265 */
1266 return 0;
1267 }
1268
1269 clone = skb_clone(skb, GFP_ATOMIC);
1270 if (clone)
1271 do_output(dp, clone, port, key);
1272 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001273 break;
andy zhou5b8784a2017-01-27 13:45:28 -08001274 }
Jesse Grossccb13522011-10-25 19:26:31 -07001275
William Tuf2a4d082016-06-10 11:49:33 -07001276 case OVS_ACTION_ATTR_TRUNC: {
1277 struct ovs_action_trunc *trunc = nla_data(a);
1278
1279 if (skb->len > trunc->max_len)
1280 OVS_CB(skb)->cutlen = skb->len - trunc->max_len;
1281 break;
1282 }
1283
Jesse Grossccb13522011-10-25 19:26:31 -07001284 case OVS_ACTION_ATTR_USERSPACE:
William Tuf2a4d082016-06-10 11:49:33 -07001285 output_userspace(dp, skb, key, a, attr,
1286 len, OVS_CB(skb)->cutlen);
1287 OVS_CB(skb)->cutlen = 0;
Jesse Grossccb13522011-10-25 19:26:31 -07001288 break;
1289
Andy Zhou971427f32014-09-15 19:37:25 -07001290 case OVS_ACTION_ATTR_HASH:
1291 execute_hash(skb, key, a);
1292 break;
1293
Martin Varghesef66b53f2019-12-21 08:50:46 +05301294 case OVS_ACTION_ATTR_PUSH_MPLS: {
1295 struct ovs_action_push_mpls *mpls = nla_data(a);
Simon Horman25cd9ba2014-10-06 05:05:13 -07001296
Martin Varghesef66b53f2019-12-21 08:50:46 +05301297 err = push_mpls(skb, key, mpls->mpls_lse,
1298 mpls->mpls_ethertype, skb->mac_len);
1299 break;
1300 }
1301 case OVS_ACTION_ATTR_ADD_MPLS: {
1302 struct ovs_action_add_mpls *mpls = nla_data(a);
1303 __u16 mac_len = 0;
1304
1305 if (mpls->tun_flags & OVS_MPLS_L3_TUNNEL_FLAG_MASK)
1306 mac_len = skb->mac_len;
1307
1308 err = push_mpls(skb, key, mpls->mpls_lse,
1309 mpls->mpls_ethertype, mac_len);
1310 break;
1311 }
Simon Horman25cd9ba2014-10-06 05:05:13 -07001312 case OVS_ACTION_ATTR_POP_MPLS:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001313 err = pop_mpls(skb, key, nla_get_be16(a));
Simon Horman25cd9ba2014-10-06 05:05:13 -07001314 break;
1315
Jesse Grossccb13522011-10-25 19:26:31 -07001316 case OVS_ACTION_ATTR_PUSH_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001317 err = push_vlan(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001318 break;
1319
1320 case OVS_ACTION_ATTR_POP_VLAN:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001321 err = pop_vlan(skb, key);
Jesse Grossccb13522011-10-25 19:26:31 -07001322 break;
1323
andy zhoubef7f752017-03-20 16:32:30 -07001324 case OVS_ACTION_ATTR_RECIRC: {
1325 bool last = nla_is_last(a, rem);
1326
1327 err = execute_recirc(dp, skb, key, a, last);
1328 if (last) {
Andy Zhou971427f32014-09-15 19:37:25 -07001329 /* If this is the last action, the skb has
1330 * been consumed or freed.
1331 * Return immediately.
1332 */
1333 return err;
1334 }
1335 break;
andy zhoubef7f752017-03-20 16:32:30 -07001336 }
Andy Zhou971427f32014-09-15 19:37:25 -07001337
Jesse Grossccb13522011-10-25 19:26:31 -07001338 case OVS_ACTION_ATTR_SET:
Pravin B Shelarfff06c32014-11-06 06:55:14 -08001339 err = execute_set_action(skb, key, nla_data(a));
Jesse Grossccb13522011-10-25 19:26:31 -07001340 break;
1341
Jarno Rajahalme83d2b9b2015-02-05 13:40:49 -08001342 case OVS_ACTION_ATTR_SET_MASKED:
1343 case OVS_ACTION_ATTR_SET_TO_MASKED:
1344 err = execute_masked_set_action(skb, key, nla_data(a));
1345 break;
1346
andy zhou798c1662017-03-20 16:32:29 -07001347 case OVS_ACTION_ATTR_SAMPLE: {
1348 bool last = nla_is_last(a, rem);
1349
1350 err = sample(dp, skb, key, a, last);
1351 if (last)
1352 return err;
1353
Jesse Grossccb13522011-10-25 19:26:31 -07001354 break;
andy zhou798c1662017-03-20 16:32:29 -07001355 }
Joe Stringer7f8a4362015-08-26 11:31:48 -07001356
1357 case OVS_ACTION_ATTR_CT:
Joe Stringerec0d0432015-10-06 10:59:58 -07001358 if (!is_flow_key_valid(key)) {
1359 err = ovs_flow_key_update(skb, key);
1360 if (err)
1361 return err;
1362 }
1363
Joe Stringer7f8a4362015-08-26 11:31:48 -07001364 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1365 nla_data(a));
1366
1367 /* Hide stolen IP fragments from user space. */
Joe Stringer74c16612015-10-25 20:21:48 -07001368 if (err)
1369 return err == -EINPROGRESS ? 0 : err;
Joe Stringer7f8a4362015-08-26 11:31:48 -07001370 break;
Jiri Benc91820da2016-11-10 16:28:23 +01001371
Eric Garverb8226962017-10-10 16:54:44 -04001372 case OVS_ACTION_ATTR_CT_CLEAR:
1373 err = ovs_ct_clear(skb, key);
1374 break;
1375
Jiri Benc91820da2016-11-10 16:28:23 +01001376 case OVS_ACTION_ATTR_PUSH_ETH:
1377 err = push_eth(skb, key, nla_data(a));
1378 break;
1379
1380 case OVS_ACTION_ATTR_POP_ETH:
1381 err = pop_eth(skb, key);
1382 break;
Yi Yangb2d0f5d2017-11-07 21:07:02 +08001383
1384 case OVS_ACTION_ATTR_PUSH_NSH: {
1385 u8 buffer[NSH_HDR_MAX_LEN];
1386 struct nshhdr *nh = (struct nshhdr *)buffer;
1387
1388 err = nsh_hdr_from_nlattr(nla_data(a), nh,
1389 NSH_HDR_MAX_LEN);
1390 if (unlikely(err))
1391 break;
1392 err = push_nsh(skb, key, nh);
1393 break;
1394 }
1395
1396 case OVS_ACTION_ATTR_POP_NSH:
1397 err = pop_nsh(skb, key);
1398 break;
Andy Zhoucd8a6c32017-11-10 12:09:43 -08001399
1400 case OVS_ACTION_ATTR_METER:
1401 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
1402 consume_skb(skb);
1403 return 0;
1404 }
Yifeng Sunb2335042018-07-02 08:18:03 -07001405 break;
1406
1407 case OVS_ACTION_ATTR_CLONE: {
1408 bool last = nla_is_last(a, rem);
1409
1410 err = clone(dp, skb, key, a, last);
1411 if (last)
1412 return err;
1413
1414 break;
1415 }
Numan Siddique4d5ec892019-03-26 06:13:46 +05301416
1417 case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
1418 bool last = nla_is_last(a, rem);
1419
1420 err = execute_check_pkt_len(dp, skb, key, a, last);
1421 if (last)
1422 return err;
1423
1424 break;
1425 }
Matteo Croce744676e2020-02-15 14:20:56 +01001426
1427 case OVS_ACTION_ATTR_DEC_TTL:
1428 err = execute_dec_ttl(skb, key);
1429 if (err == -EHOSTUNREACH) {
1430 err = dec_ttl_exception_handler(dp, skb, key,
1431 a, true);
1432 return err;
1433 }
1434 break;
Jesse Grossccb13522011-10-25 19:26:31 -07001435 }
1436
1437 if (unlikely(err)) {
1438 kfree_skb(skb);
1439 return err;
1440 }
1441 }
1442
andy zhou5b8784a2017-01-27 13:45:28 -08001443 consume_skb(skb);
Jesse Grossccb13522011-10-25 19:26:31 -07001444 return 0;
1445}
1446
andy zhoubef7f752017-03-20 16:32:30 -07001447/* Execute the actions on the clone of the packet. The effect of the
1448 * execution does not affect the original 'skb' nor the original 'key'.
1449 *
1450 * The execution may be deferred in case the actions can not be executed
1451 * immediately.
1452 */
1453static int clone_execute(struct datapath *dp, struct sk_buff *skb,
1454 struct sw_flow_key *key, u32 recirc_id,
1455 const struct nlattr *actions, int len,
1456 bool last, bool clone_flow_key)
1457{
1458 struct deferred_action *da;
1459 struct sw_flow_key *clone;
1460
1461 skb = last ? skb : skb_clone(skb, GFP_ATOMIC);
1462 if (!skb) {
1463 /* Out of memory, skip this action.
1464 */
1465 return 0;
1466 }
1467
1468 /* When clone_flow_key is false, the 'key' will not be change
1469 * by the actions, then the 'key' can be used directly.
1470 * Otherwise, try to clone key from the next recursion level of
1471 * 'flow_keys'. If clone is successful, execute the actions
1472 * without deferring.
1473 */
1474 clone = clone_flow_key ? clone_key(key) : key;
1475 if (clone) {
1476 int err = 0;
1477
1478 if (actions) { /* Sample action */
1479 if (clone_flow_key)
1480 __this_cpu_inc(exec_actions_level);
1481
1482 err = do_execute_actions(dp, skb, clone,
1483 actions, len);
1484
1485 if (clone_flow_key)
1486 __this_cpu_dec(exec_actions_level);
1487 } else { /* Recirc action */
1488 clone->recirc_id = recirc_id;
1489 ovs_dp_process_packet(skb, clone);
1490 }
1491 return err;
1492 }
1493
1494 /* Out of 'flow_keys' space. Defer actions */
1495 da = add_deferred_actions(skb, key, actions, len);
1496 if (da) {
1497 if (!actions) { /* Recirc action */
1498 key = &da->pkt_key;
1499 key->recirc_id = recirc_id;
1500 }
1501 } else {
1502 /* Out of per CPU action FIFO space. Drop the 'skb' and
1503 * log an error.
1504 */
1505 kfree_skb(skb);
1506
1507 if (net_ratelimit()) {
1508 if (actions) { /* Sample action */
1509 pr_warn("%s: deferred action limit reached, drop sample action\n",
1510 ovs_dp_name(dp));
1511 } else { /* Recirc action */
1512 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1513 ovs_dp_name(dp));
1514 }
1515 }
1516 }
1517 return 0;
1518}
1519
Andy Zhou971427f32014-09-15 19:37:25 -07001520static void process_deferred_actions(struct datapath *dp)
1521{
1522 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1523
1524 /* Do not touch the FIFO in case there is no deferred actions. */
1525 if (action_fifo_is_empty(fifo))
1526 return;
1527
1528 /* Finishing executing all deferred actions. */
1529 do {
1530 struct deferred_action *da = action_fifo_get(fifo);
1531 struct sk_buff *skb = da->skb;
1532 struct sw_flow_key *key = &da->pkt_key;
1533 const struct nlattr *actions = da->actions;
andy zhou47c697a2017-03-20 16:32:27 -07001534 int actions_len = da->actions_len;
Andy Zhou971427f32014-09-15 19:37:25 -07001535
1536 if (actions)
andy zhou47c697a2017-03-20 16:32:27 -07001537 do_execute_actions(dp, skb, key, actions, actions_len);
Andy Zhou971427f32014-09-15 19:37:25 -07001538 else
1539 ovs_dp_process_packet(skb, key);
1540 } while (!action_fifo_is_empty(fifo));
1541
1542 /* Reset FIFO for the next packet. */
1543 action_fifo_init(fifo);
1544}
1545
Jesse Grossccb13522011-10-25 19:26:31 -07001546/* Execute a list of actions against 'skb'. */
Pravin B Shelar2ff3e4e2014-09-15 19:15:28 -07001547int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
Thomas Graf12eb18f2014-11-06 06:58:52 -08001548 const struct sw_flow_actions *acts,
1549 struct sw_flow_key *key)
Jesse Grossccb13522011-10-25 19:26:31 -07001550{
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001551 int err, level;
Jesse Grossccb13522011-10-25 19:26:31 -07001552
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001553 level = __this_cpu_inc_return(exec_actions_level);
Lance Richardson2679d042016-09-13 10:08:54 -04001554 if (unlikely(level > OVS_RECURSION_LIMIT)) {
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001555 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1556 ovs_dp_name(dp));
1557 kfree_skb(skb);
1558 err = -ENETDOWN;
1559 goto out;
1560 }
1561
Liping Zhang494bea32017-08-16 13:30:07 +08001562 OVS_CB(skb)->acts_origlen = acts->orig_len;
Andy Zhou971427f32014-09-15 19:37:25 -07001563 err = do_execute_actions(dp, skb, key,
1564 acts->actions, acts->actions_len);
1565
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001566 if (level == 1)
Andy Zhou971427f32014-09-15 19:37:25 -07001567 process_deferred_actions(dp);
1568
Hannes Frederic Sowab064d0d2016-01-18 18:03:48 +01001569out:
1570 __this_cpu_dec(exec_actions_level);
Andy Zhou971427f32014-09-15 19:37:25 -07001571 return err;
1572}
1573
1574int action_fifos_init(void)
1575{
1576 action_fifos = alloc_percpu(struct action_fifo);
1577 if (!action_fifos)
1578 return -ENOMEM;
1579
andy zhou4572ef52017-03-20 16:32:28 -07001580 flow_keys = alloc_percpu(struct action_flow_keys);
1581 if (!flow_keys) {
Lance Richardson2679d042016-09-13 10:08:54 -04001582 free_percpu(action_fifos);
1583 return -ENOMEM;
1584 }
1585
Andy Zhou971427f32014-09-15 19:37:25 -07001586 return 0;
1587}
1588
1589void action_fifos_exit(void)
1590{
1591 free_percpu(action_fifos);
andy zhou4572ef52017-03-20 16:32:28 -07001592 free_percpu(flow_keys);
Jesse Grossccb13522011-10-25 19:26:31 -07001593}