blob: 2ea453dac87624ba1612fe008f08b1c7de9c0b1e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Tom Herbert8024e022014-07-13 19:49:37 -07002#ifndef __NET_UDP_TUNNEL_H
3#define __NET_UDP_TUNNEL_H
4
Andy Zhou6a93cc92014-09-16 17:31:17 -07005#include <net/ip_tunnels.h>
6#include <net/udp.h>
7
8#if IS_ENABLED(CONFIG_IPV6)
9#include <net/ipv6.h>
David Ahern3616d082019-03-22 06:06:09 -070010#include <net/ipv6_stubs.h>
Andy Zhou6a93cc92014-09-16 17:31:17 -070011#endif
12
Tom Herbert8024e022014-07-13 19:49:37 -070013struct udp_port_cfg {
14 u8 family;
15
16 /* Used only for kernel-created sockets */
17 union {
18 struct in_addr local_ip;
19#if IS_ENABLED(CONFIG_IPV6)
20 struct in6_addr local_ip6;
21#endif
22 };
23
24 union {
25 struct in_addr peer_ip;
26#if IS_ENABLED(CONFIG_IPV6)
27 struct in6_addr peer_ip6;
28#endif
29 };
30
31 __be16 local_udp_port;
32 __be16 peer_udp_port;
Alexis Bauvinda5095d052018-12-03 10:54:38 +010033 int bind_ifindex;
Tom Herbert8024e022014-07-13 19:49:37 -070034 unsigned int use_udp_checksums:1,
35 use_udp6_tx_checksums:1,
Jiri Benca43a9ef2015-08-28 20:48:22 +020036 use_udp6_rx_checksums:1,
37 ipv6_v6only:1;
Tom Herbert8024e022014-07-13 19:49:37 -070038};
39
Andy Zhoufd384412014-09-16 17:31:16 -070040int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
41 struct socket **sockp);
42
43#if IS_ENABLED(CONFIG_IPV6)
44int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
45 struct socket **sockp);
46#else
47static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
48 struct socket **sockp)
49{
50 return 0;
51}
52#endif
53
54static inline int udp_sock_create(struct net *net,
55 struct udp_port_cfg *cfg,
56 struct socket **sockp)
57{
58 if (cfg->family == AF_INET)
59 return udp_sock_create4(net, cfg, sockp);
60
61 if (cfg->family == AF_INET6)
62 return udp_sock_create6(net, cfg, sockp);
63
64 return -EPFNOSUPPORT;
65}
Tom Herbert8024e022014-07-13 19:49:37 -070066
Andy Zhou6a93cc92014-09-16 17:31:17 -070067typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
Stefano Brivioa36e1852018-11-08 12:19:14 +010068typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
69 struct sk_buff *skb);
Andy Zhou6a93cc92014-09-16 17:31:17 -070070typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
David Millerd4546c22018-06-24 14:13:49 +090071typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
72 struct list_head *head,
73 struct sk_buff *skb);
Tom Herbert38fd2af2016-04-05 08:22:52 -070074typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
75 int nhoff);
Andy Zhou6a93cc92014-09-16 17:31:17 -070076
77struct udp_tunnel_sock_cfg {
78 void *sk_user_data; /* user data used by encap_rcv call back */
79 /* Used for setting up udp_sock fields, see udp.h for details */
80 __u8 encap_type;
81 udp_tunnel_encap_rcv_t encap_rcv;
Stefano Brivioa36e1852018-11-08 12:19:14 +010082 udp_tunnel_encap_err_lookup_t encap_err_lookup;
Andy Zhou6a93cc92014-09-16 17:31:17 -070083 udp_tunnel_encap_destroy_t encap_destroy;
Tom Herbert38fd2af2016-04-05 08:22:52 -070084 udp_tunnel_gro_receive_t gro_receive;
85 udp_tunnel_gro_complete_t gro_complete;
Andy Zhou6a93cc92014-09-16 17:31:17 -070086};
87
88/* Setup the given (UDP) sock to receive UDP encapsulated packets */
89void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
90 struct udp_tunnel_sock_cfg *sock_cfg);
91
Alexander Duycke7b3db52016-06-16 12:20:52 -070092/* -- List of parsable UDP tunnel types --
93 *
94 * Adding to this list will result in serious debate. The main issue is
95 * that this list is essentially a list of workarounds for either poorly
96 * designed tunnels, or poorly designed device offloads.
97 *
98 * The parsing supported via these types should really be used for Rx
99 * traffic only as the network stack will have already inserted offsets for
100 * the location of the headers in the skb. In addition any ports that are
101 * pushed should be kept within the namespace without leaking to other
102 * devices such as VFs or other ports on the same device.
103 *
104 * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
105 * need to use this for Rx checksum offload. It should not be necessary to
106 * call this function to perform Tx offloads on outgoing traffic.
107 */
108enum udp_parsable_tunnel_type {
Jakub Kicinski84a41602020-07-09 17:42:45 -0700109 UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */
110 UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */
111 UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */
Alexander Duycke7b3db52016-06-16 12:20:52 -0700112};
113
114struct udp_tunnel_info {
115 unsigned short type;
116 sa_family_t sa_family;
117 __be16 port;
Jakub Kicinskicc4e3832020-07-09 17:42:46 -0700118 u8 hw_priv;
Alexander Duycke7b3db52016-06-16 12:20:52 -0700119};
120
121/* Notify network devices of offloadable types */
122void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
123 unsigned short type);
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200124void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
125 unsigned short type);
Alexander Duycke7b3db52016-06-16 12:20:52 -0700126void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
127void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
128
Alexander Duyck7c46a642016-06-16 12:21:00 -0700129static inline void udp_tunnel_get_rx_info(struct net_device *dev)
130{
131 ASSERT_RTNL();
132 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
133}
134
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200135static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
136{
137 ASSERT_RTNL();
138 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
139}
140
Andy Zhou6a93cc92014-09-16 17:31:17 -0700141/* Transmit the skb using UDP encapsulation. */
Pravin B Shelar039f5062015-12-24 14:34:54 -0800142void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
143 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
144 __be16 df, __be16 src_port, __be16 dst_port,
145 bool xnet, bool nocheck);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700146
David Miller79b16aa2015-04-05 22:19:09 -0400147int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
148 struct sk_buff *skb,
Tom Herbertd998f8e2015-01-20 11:23:04 -0800149 struct net_device *dev, struct in6_addr *saddr,
150 struct in6_addr *daddr,
Daniel Borkmann13461142016-03-09 03:00:02 +0100151 __u8 prio, __u8 ttl, __be32 label,
152 __be16 src_port, __be16 dst_port, bool nocheck);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700153
154void udp_tunnel_sock_release(struct socket *sock);
155
Pravin B Shelarc29a70d2015-08-26 23:46:50 -0700156struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
157 __be16 flags, __be64 tunnel_id,
158 int md_size);
159
Alexander Duyck86a98052016-06-16 12:20:44 -0700160#ifdef CONFIG_INET
Alexander Duyckaed069d2016-04-14 15:33:37 -0400161static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
Andy Zhou6a93cc92014-09-16 17:31:17 -0700162{
163 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
164
Edward Cree6fa79662016-02-11 21:02:31 +0000165 return iptunnel_handle_offloads(skb, type);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700166}
Alexander Duyck86a98052016-06-16 12:20:44 -0700167#endif
Andy Zhou6a93cc92014-09-16 17:31:17 -0700168
169static inline void udp_tunnel_encap_enable(struct socket *sock)
170{
Paolo Abeni60fb9562018-11-07 12:38:28 +0100171 struct udp_sock *up = udp_sk(sock->sk);
172
173 if (up->encap_enabled)
174 return;
175
176 up->encap_enabled = 1;
Andy Zhou6a93cc92014-09-16 17:31:17 -0700177#if IS_ENABLED(CONFIG_IPV6)
178 if (sock->sk->sk_family == PF_INET6)
179 ipv6_stub->udpv6_encap_enable();
180 else
181#endif
182 udp_encap_enable();
183}
184
Jakub Kicinskicc4e3832020-07-09 17:42:46 -0700185#define UDP_TUNNEL_NIC_MAX_TABLES 4
186
187enum udp_tunnel_nic_info_flags {
188 /* Device callbacks may sleep */
189 UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0),
190 /* Device only supports offloads when it's open, all ports
191 * will be removed before close and re-added after open.
192 */
193 UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1),
194 /* Device supports only IPv4 tunnels */
195 UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2),
Jakub Kicinski966e5052020-07-28 14:47:58 -0700196 /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
197 * This port must not be counted towards n_entries of any table.
198 * Driver will not receive any callback associated with port 4789.
199 */
200 UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
Jakub Kicinskicc4e3832020-07-09 17:42:46 -0700201};
202
Jakub Kicinski74cc6d12020-09-25 17:56:40 -0700203struct udp_tunnel_nic;
204
205#define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2)
206
207struct udp_tunnel_nic_shared {
208 struct udp_tunnel_nic *udp_tunnel_nic_info;
209
210 struct list_head devices;
211};
212
213struct udp_tunnel_nic_shared_node {
214 struct net_device *dev;
215 struct list_head list;
216};
217
Jakub Kicinskicc4e3832020-07-09 17:42:46 -0700218/**
219 * struct udp_tunnel_nic_info - driver UDP tunnel offload information
220 * @set_port: callback for adding a new port
221 * @unset_port: callback for removing a port
222 * @sync_table: callback for syncing the entire port table at once
Jakub Kicinski74cc6d12020-09-25 17:56:40 -0700223 * @shared: reference to device global state (optional)
Jakub Kicinskicc4e3832020-07-09 17:42:46 -0700224 * @flags: device flags from enum udp_tunnel_nic_info_flags
225 * @tables: UDP port tables this device has
226 * @tables.n_entries: number of entries in this table
227 * @tables.tunnel_types: types of tunnels this table accepts
228 *
229 * Drivers are expected to provide either @set_port and @unset_port callbacks
230 * or the @sync_table callback. Callbacks are invoked with rtnl lock held.
231 *
Jakub Kicinski74cc6d12020-09-25 17:56:40 -0700232 * Devices which (misguidedly) share the UDP tunnel port table across multiple
233 * netdevs should allocate an instance of struct udp_tunnel_nic_shared and
234 * point @shared at it.
235 * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
236 * sharing a table.
237 *
Jakub Kicinskicc4e3832020-07-09 17:42:46 -0700238 * Known limitations:
239 * - UDP tunnel port notifications are fundamentally best-effort -
240 * it is likely the driver will both see skbs which use a UDP tunnel port,
241 * while not being a tunneled skb, and tunnel skbs from other ports -
242 * drivers should only use these ports for non-critical RX-side offloads,
243 * e.g. the checksum offload;
244 * - none of the devices care about the socket family at present, so we don't
245 * track it. Please extend this code if you care.
246 */
247struct udp_tunnel_nic_info {
248 /* one-by-one */
249 int (*set_port)(struct net_device *dev,
250 unsigned int table, unsigned int entry,
251 struct udp_tunnel_info *ti);
252 int (*unset_port)(struct net_device *dev,
253 unsigned int table, unsigned int entry,
254 struct udp_tunnel_info *ti);
255
256 /* all at once */
257 int (*sync_table)(struct net_device *dev, unsigned int table);
258
Jakub Kicinski74cc6d12020-09-25 17:56:40 -0700259 struct udp_tunnel_nic_shared *shared;
260
Jakub Kicinskicc4e3832020-07-09 17:42:46 -0700261 unsigned int flags;
262
263 struct udp_tunnel_nic_table_info {
264 unsigned int n_entries;
265 unsigned int tunnel_types;
266 } tables[UDP_TUNNEL_NIC_MAX_TABLES];
267};
268
269/* UDP tunnel module dependencies
270 *
271 * Tunnel drivers are expected to have a hard dependency on the udp_tunnel
272 * module. NIC drivers are not, they just attach their
273 * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come.
274 * Loading a tunnel driver will cause the udp_tunnel module to be loaded
275 * and only then will all the required state structures be allocated.
276 * Since we want a weak dependency from the drivers and the core to udp_tunnel
277 * we call things through the following stubs.
278 */
279struct udp_tunnel_nic_ops {
280 void (*get_port)(struct net_device *dev, unsigned int table,
281 unsigned int idx, struct udp_tunnel_info *ti);
282 void (*set_port_priv)(struct net_device *dev, unsigned int table,
283 unsigned int idx, u8 priv);
284 void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti);
285 void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti);
286 void (*reset_ntf)(struct net_device *dev);
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700287
288 size_t (*dump_size)(struct net_device *dev, unsigned int table);
289 int (*dump_write)(struct net_device *dev, unsigned int table,
290 struct sk_buff *skb);
Jakub Kicinskicc4e3832020-07-09 17:42:46 -0700291};
292
293#ifdef CONFIG_INET
294extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
295#else
296#define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL)
297#endif
298
299static inline void
300udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
301 unsigned int idx, struct udp_tunnel_info *ti)
302{
303 /* This helper is used from .sync_table, we indicate empty entries
304 * by zero'ed @ti. Drivers which need to know the details of a port
305 * when it gets deleted should use the .set_port / .unset_port
306 * callbacks.
307 * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings.
308 */
309 memset(ti, 0, sizeof(*ti));
310
311 if (udp_tunnel_nic_ops)
312 udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
313}
314
315static inline void
316udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
317 unsigned int idx, u8 priv)
318{
319 if (udp_tunnel_nic_ops)
320 udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
321}
322
323static inline void
324udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
325{
326 if (udp_tunnel_nic_ops)
327 udp_tunnel_nic_ops->add_port(dev, ti);
328}
329
330static inline void
331udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
332{
333 if (udp_tunnel_nic_ops)
334 udp_tunnel_nic_ops->del_port(dev, ti);
335}
336
337/**
338 * udp_tunnel_nic_reset_ntf() - device-originating reset notification
339 * @dev: network interface device structure
340 *
341 * Called by the driver to inform the core that the entire UDP tunnel port
342 * state has been lost, usually due to device reset. Core will assume device
343 * forgot all the ports and issue .set_port and .sync_table callbacks as
344 * necessary.
345 *
346 * This function must be called with rtnl lock held, and will issue all
347 * the callbacks before returning.
348 */
349static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
350{
351 if (udp_tunnel_nic_ops)
352 udp_tunnel_nic_ops->reset_ntf(dev);
353}
Jakub Kicinskic7d759e2020-07-09 17:42:47 -0700354
355static inline size_t
356udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
357{
358 if (!udp_tunnel_nic_ops)
359 return 0;
360 return udp_tunnel_nic_ops->dump_size(dev, table);
361}
362
363static inline int
364udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
365 struct sk_buff *skb)
366{
367 if (!udp_tunnel_nic_ops)
368 return 0;
369 return udp_tunnel_nic_ops->dump_write(dev, table, skb);
370}
Tom Herbert8024e022014-07-13 19:49:37 -0700371#endif