blob: c95c67aa52d218f077f172b03a0e1854db29e608 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07002/*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070012#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070014#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
Eric Dumazetcf05c702011-06-19 22:48:34 -070016#include <linux/u64_stats_sync.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070017
Jiri Pirkof7b12602014-02-18 20:53:18 +010018#include <net/rtnetlink.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070019#include <net/dst.h>
20#include <net/xfrm.h>
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +090021#include <net/xdp.h>
Stephen Hemmingerecef9692007-12-25 17:23:59 -080022#include <linux/veth.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040023#include <linux/module.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090024#include <linux/bpf.h>
25#include <linux/filter.h>
26#include <linux/ptr_ring.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090027#include <linux/bpf_trace.h>
Michael Walleaa4e6892018-08-29 17:24:11 +020028#include <linux/net_tstamp.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070029
30#define DRV_NAME "veth"
31#define DRV_VERSION "1.0"
32
Toshiaki Makita9fc8d512018-08-03 16:58:13 +090033#define VETH_XDP_FLAG BIT(0)
Toshiaki Makita948d4f22018-08-03 16:58:10 +090034#define VETH_RING_SIZE 256
35#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
Toshiaki Makita9cda7802019-06-13 18:39:59 +090037#define VETH_XDP_TX_BULK_SIZE 16
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +010038#define VETH_XDP_BATCH 16
Toshiaki Makita9cda7802019-06-13 18:39:59 +090039
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010040struct veth_stats {
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010041 u64 rx_drops;
42 /* xdp */
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010043 u64 xdp_packets;
44 u64 xdp_bytes;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010045 u64 xdp_redirect;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010046 u64 xdp_drops;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010047 u64 xdp_tx;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +010048 u64 xdp_tx_err;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010049 u64 peer_tq_xdp_xmit;
50 u64 peer_tq_xdp_xmit_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010051};
52
Toshiaki Makita4195e542018-10-11 18:36:49 +090053struct veth_rq_stats {
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010054 struct veth_stats vs;
Toshiaki Makita4195e542018-10-11 18:36:49 +090055 struct u64_stats_sync syncp;
56};
57
Toshiaki Makita638264d2018-08-03 16:58:18 +090058struct veth_rq {
Toshiaki Makita948d4f22018-08-03 16:58:10 +090059 struct napi_struct xdp_napi;
Paolo Abenid3256ef2021-04-09 13:04:38 +020060 struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
Toshiaki Makita948d4f22018-08-03 16:58:10 +090061 struct net_device *dev;
62 struct bpf_prog __rcu *xdp_prog;
Toshiaki Makitad1396002018-08-03 16:58:17 +090063 struct xdp_mem_info xdp_mem;
Toshiaki Makita4195e542018-10-11 18:36:49 +090064 struct veth_rq_stats stats;
Toshiaki Makita948d4f22018-08-03 16:58:10 +090065 bool rx_notify_masked;
66 struct ptr_ring xdp_ring;
67 struct xdp_rxq_info xdp_rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070068};
69
Toshiaki Makita638264d2018-08-03 16:58:18 +090070struct veth_priv {
71 struct net_device __rcu *peer;
72 atomic64_t dropped;
73 struct bpf_prog *_xdp_prog;
74 struct veth_rq *rq;
75 unsigned int requested_headroom;
76};
77
Toshiaki Makita9cda7802019-06-13 18:39:59 +090078struct veth_xdp_tx_bq {
79 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
80 unsigned int count;
81};
82
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070083/*
84 * ethtool interface
85 */
86
Toshiaki Makitad397b962018-10-11 18:36:50 +090087struct veth_q_stat_desc {
88 char desc[ETH_GSTRING_LEN];
89 size_t offset;
90};
91
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010092#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
Toshiaki Makitad397b962018-10-11 18:36:50 +090093
94static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
95 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
96 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010097 { "drops", VETH_RQ_STAT(rx_drops) },
98 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
99 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
100 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
101 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
Toshiaki Makitad397b962018-10-11 18:36:50 +0900102};
103
104#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
105
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100106static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
107 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
108 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
109};
110
111#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
112
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700113static struct {
114 const char string[ETH_GSTRING_LEN];
115} ethtool_stats_keys[] = {
116 { "peer_ifindex" },
117};
118
Philippe Reynes56607b92017-03-29 08:24:21 +0200119static int veth_get_link_ksettings(struct net_device *dev,
120 struct ethtool_link_ksettings *cmd)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700121{
Philippe Reynes56607b92017-03-29 08:24:21 +0200122 cmd->base.speed = SPEED_10000;
123 cmd->base.duplex = DUPLEX_FULL;
124 cmd->base.port = PORT_TP;
125 cmd->base.autoneg = AUTONEG_DISABLE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700126 return 0;
127}
128
129static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
130{
Rick Jones33a5ba12011-11-15 14:59:53 +0000131 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
132 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700133}
134
135static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
136{
Toshiaki Makitad397b962018-10-11 18:36:50 +0900137 char *p = (char *)buf;
138 int i, j;
139
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700140 switch(stringset) {
141 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900142 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
143 p += sizeof(ethtool_stats_keys);
144 for (i = 0; i < dev->real_num_rx_queues; i++) {
145 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
Florian Fainelliabdf47a2019-02-21 20:09:29 -0800146 snprintf(p, ETH_GSTRING_LEN,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100147 "rx_queue_%u_%.18s",
Toshiaki Makitad397b962018-10-11 18:36:50 +0900148 i, veth_rq_stats_desc[j].desc);
149 p += ETH_GSTRING_LEN;
150 }
151 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100152 for (i = 0; i < dev->real_num_tx_queues; i++) {
153 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
154 snprintf(p, ETH_GSTRING_LEN,
155 "tx_queue_%u_%.18s",
156 i, veth_tq_stats_desc[j].desc);
157 p += ETH_GSTRING_LEN;
158 }
159 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700160 break;
161 }
162}
163
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700164static int veth_get_sset_count(struct net_device *dev, int sset)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700165{
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700166 switch (sset) {
167 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900168 return ARRAY_SIZE(ethtool_stats_keys) +
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100169 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
170 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700171 default:
172 return -EOPNOTSUPP;
173 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700174}
175
176static void veth_get_ethtool_stats(struct net_device *dev,
177 struct ethtool_stats *stats, u64 *data)
178{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100179 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000180 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makitad397b962018-10-11 18:36:50 +0900181 int i, j, idx;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700182
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000183 data[0] = peer ? peer->ifindex : 0;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900184 idx = 1;
185 for (i = 0; i < dev->real_num_rx_queues; i++) {
186 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100187 const void *stats_base = (void *)&rq_stats->vs;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900188 unsigned int start;
189 size_t offset;
190
191 do {
192 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
193 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
194 offset = veth_rq_stats_desc[j].offset;
195 data[idx + j] = *(u64 *)(stats_base + offset);
196 }
197 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
198 idx += VETH_RQ_STATS_LEN;
199 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100200
201 if (!peer)
202 return;
203
204 rcv_priv = netdev_priv(peer);
205 for (i = 0; i < peer->real_num_rx_queues; i++) {
206 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
207 const void *base = (void *)&rq_stats->vs;
208 unsigned int start, tx_idx = idx;
209 size_t offset;
210
211 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
212 do {
213 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
214 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
215 offset = veth_tq_stats_desc[j].offset;
216 data[tx_idx + j] += *(u64 *)(base + offset);
217 }
218 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
219 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700220}
221
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200222static void veth_get_channels(struct net_device *dev,
223 struct ethtool_channels *channels)
224{
225 channels->tx_count = dev->real_num_tx_queues;
226 channels->rx_count = dev->real_num_rx_queues;
227 channels->max_tx = dev->real_num_tx_queues;
228 channels->max_rx = dev->real_num_rx_queues;
229 channels->combined_count = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
230 channels->max_combined = min(dev->real_num_rx_queues, dev->real_num_tx_queues);
231}
232
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700233static const struct ethtool_ops veth_ethtool_ops = {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700234 .get_drvinfo = veth_get_drvinfo,
235 .get_link = ethtool_op_get_link,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700236 .get_strings = veth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700237 .get_sset_count = veth_get_sset_count,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700238 .get_ethtool_stats = veth_get_ethtool_stats,
Philippe Reynes56607b92017-03-29 08:24:21 +0200239 .get_link_ksettings = veth_get_link_ksettings,
Julian Wiedmann056b21f2019-04-12 13:06:15 +0200240 .get_ts_info = ethtool_op_get_ts_info,
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200241 .get_channels = veth_get_channels,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700242};
243
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900244/* general routines */
245
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900246static bool veth_is_xdp_frame(void *ptr)
247{
248 return (unsigned long)ptr & VETH_XDP_FLAG;
249}
250
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700251static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900252{
253 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
254}
255
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700256static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900257{
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700258 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900259}
260
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900261static void veth_ptr_free(void *ptr)
262{
263 if (veth_is_xdp_frame(ptr))
264 xdp_return_frame(veth_ptr_to_xdp(ptr));
265 else
266 kfree_skb(ptr);
267}
268
Toshiaki Makita638264d2018-08-03 16:58:18 +0900269static void __veth_xdp_flush(struct veth_rq *rq)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900270{
271 /* Write ptr_ring before reading rx_notify_masked */
272 smp_mb();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900273 if (!rq->rx_notify_masked) {
274 rq->rx_notify_masked = true;
275 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900276 }
277}
278
Toshiaki Makita638264d2018-08-03 16:58:18 +0900279static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900280{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900281 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900282 dev_kfree_skb_any(skb);
283 return NET_RX_DROP;
284 }
285
286 return NET_RX_SUCCESS;
287}
288
Toshiaki Makita638264d2018-08-03 16:58:18 +0900289static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
290 struct veth_rq *rq, bool xdp)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700291{
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900292 return __dev_forward_skb(dev, skb) ?: xdp ?
Toshiaki Makita638264d2018-08-03 16:58:18 +0900293 veth_xdp_rx(rq, skb) :
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900294 netif_rx(skb);
295}
296
297static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
298{
299 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900300 struct veth_rq *rq = NULL;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000301 struct net_device *rcv;
Eric Dumazet26811282012-12-29 16:02:43 +0000302 int length = skb->len;
Paolo Abenid3256ef2021-04-09 13:04:38 +0200303 bool use_napi = false;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900304 int rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700305
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000306 rcu_read_lock();
307 rcv = rcu_dereference(priv->peer);
308 if (unlikely(!rcv)) {
309 kfree_skb(skb);
310 goto drop;
311 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700312
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900313 rcv_priv = netdev_priv(rcv);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900314 rxq = skb_get_queue_mapping(skb);
315 if (rxq < rcv->real_num_rx_queues) {
316 rq = &rcv_priv->rq[rxq];
Paolo Abenid3256ef2021-04-09 13:04:38 +0200317
318 /* The napi pointer is available when an XDP program is
319 * attached or when GRO is enabled
320 */
321 use_napi = rcu_access_pointer(rq->napi);
Maciej Fijalkowskiedbea922021-03-03 16:29:03 +0100322 skb_record_rx_queue(skb, rxq);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900323 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900324
Michael Walleaa4e6892018-08-29 17:24:11 +0200325 skb_tx_timestamp(skb);
Paolo Abenid3256ef2021-04-09 13:04:38 +0200326 if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
327 if (!use_napi)
Eric Dumazetb4fba472019-11-07 16:27:17 -0800328 dev_lstats_add(dev, length);
Eric Dumazet26811282012-12-29 16:02:43 +0000329 } else {
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000330drop:
Eric Dumazet26811282012-12-29 16:02:43 +0000331 atomic64_inc(&priv->dropped);
332 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900333
Paolo Abenid3256ef2021-04-09 13:04:38 +0200334 if (use_napi)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900335 __veth_xdp_flush(rq);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900336
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000337 rcu_read_unlock();
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900338
Patrick McHardy6ed10652009-06-23 06:03:08 +0000339 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700340}
341
Eric Dumazetb4fba472019-11-07 16:27:17 -0800342static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700343{
Eric Dumazetcf05c702011-06-19 22:48:34 -0700344 struct veth_priv *priv = netdev_priv(dev);
David S. Miller11687a12009-06-25 02:45:42 -0700345
Eric Dumazetb4fba472019-11-07 16:27:17 -0800346 dev_lstats_read(dev, packets, bytes);
Eric Dumazet26811282012-12-29 16:02:43 +0000347 return atomic64_read(&priv->dropped);
348}
349
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100350static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
Toshiaki Makita4195e542018-10-11 18:36:49 +0900351{
352 struct veth_priv *priv = netdev_priv(dev);
353 int i;
354
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100355 result->peer_tq_xdp_xmit_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900356 result->xdp_packets = 0;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100357 result->xdp_tx_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900358 result->xdp_bytes = 0;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100359 result->rx_drops = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900360 for (i = 0; i < dev->num_rx_queues; i++) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100361 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900362 struct veth_rq_stats *stats = &priv->rq[i].stats;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900363 unsigned int start;
364
365 do {
366 start = u64_stats_fetch_begin_irq(&stats->syncp);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100367 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100368 xdp_tx_err = stats->vs.xdp_tx_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100369 packets = stats->vs.xdp_packets;
370 bytes = stats->vs.xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100371 drops = stats->vs.rx_drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900372 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100373 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100374 result->xdp_tx_err += xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900375 result->xdp_packets += packets;
376 result->xdp_bytes += bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100377 result->rx_drops += drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900378 }
379}
380
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800381static void veth_get_stats64(struct net_device *dev,
382 struct rtnl_link_stats64 *tot)
Eric Dumazet26811282012-12-29 16:02:43 +0000383{
384 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000385 struct net_device *peer;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100386 struct veth_stats rx;
Eric Dumazetb4fba472019-11-07 16:27:17 -0800387 u64 packets, bytes;
Eric Dumazet26811282012-12-29 16:02:43 +0000388
Eric Dumazetb4fba472019-11-07 16:27:17 -0800389 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
390 tot->tx_bytes = bytes;
391 tot->tx_packets = packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900392
393 veth_stats_rx(&rx, dev);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100394 tot->tx_dropped += rx.xdp_tx_err;
395 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900396 tot->rx_bytes = rx.xdp_bytes;
397 tot->rx_packets = rx.xdp_packets;
Eric Dumazet26811282012-12-29 16:02:43 +0000398
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000399 rcu_read_lock();
400 peer = rcu_dereference(priv->peer);
401 if (peer) {
Jiang Lidonge25d5db2020-03-04 09:49:29 +0800402 veth_stats_tx(peer, &packets, &bytes);
Eric Dumazetb4fba472019-11-07 16:27:17 -0800403 tot->rx_bytes += bytes;
404 tot->rx_packets += packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900405
406 veth_stats_rx(&rx, peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100407 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
408 tot->rx_dropped += rx.xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900409 tot->tx_bytes += rx.xdp_bytes;
410 tot->tx_packets += rx.xdp_packets;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000411 }
412 rcu_read_unlock();
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700413}
414
Gao feng5c70ef82013-10-04 16:52:24 +0800415/* fake multicast ability */
416static void veth_set_multicast_list(struct net_device *dev)
417{
418}
419
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900420static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
421 int buflen)
422{
423 struct sk_buff *skb;
424
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900425 skb = build_skb(head, buflen);
426 if (!skb)
427 return NULL;
428
429 skb_reserve(skb, headroom);
430 skb_put(skb, len);
431
432 return skb;
433}
434
Toshiaki Makita638264d2018-08-03 16:58:18 +0900435static int veth_select_rxq(struct net_device *dev)
436{
437 return smp_processor_id() % dev->real_num_rx_queues;
438}
439
Daniel Borkmann9aa12062020-10-11 01:40:02 +0200440static struct net_device *veth_peer_dev(struct net_device *dev)
441{
442 struct veth_priv *priv = netdev_priv(dev);
443
444 /* Callers must be under RCU read side. */
445 return rcu_dereference(priv->peer);
446}
447
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900448static int veth_xdp_xmit(struct net_device *dev, int n,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100449 struct xdp_frame **frames,
450 u32 flags, bool ndo_xmit)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900451{
452 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100453 int i, ret = -ENXIO, nxmit = 0;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900454 struct net_device *rcv;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100455 unsigned int max_len;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900456 struct veth_rq *rq;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900457
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100458 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100459 return -EINVAL;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900460
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100461 rcu_read_lock();
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900462 rcv = rcu_dereference(priv->peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100463 if (unlikely(!rcv))
464 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900465
466 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100467 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900468 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
469 * side. This means an XDP program is loaded on the peer and the peer
470 * device is up.
471 */
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100472 if (!rcu_access_pointer(rq->xdp_prog))
473 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900474
475 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
476
Toshiaki Makita638264d2018-08-03 16:58:18 +0900477 spin_lock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900478 for (i = 0; i < n; i++) {
479 struct xdp_frame *frame = frames[i];
480 void *ptr = veth_xdp_to_ptr(frame);
481
482 if (unlikely(frame->len > max_len ||
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100483 __ptr_ring_produce(&rq->xdp_ring, ptr)))
484 break;
485 nxmit++;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900486 }
Toshiaki Makita638264d2018-08-03 16:58:18 +0900487 spin_unlock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900488
489 if (flags & XDP_XMIT_FLUSH)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900490 __veth_xdp_flush(rq);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900491
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100492 ret = nxmit;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100493 if (ndo_xmit) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100494 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100495 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
496 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100497 u64_stats_update_end(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100498 }
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100499
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100500out:
John Fastabendb23bfa52020-01-26 16:14:02 -0800501 rcu_read_unlock();
Toshiaki Makita21314792018-10-11 18:36:48 +0900502
503 return ret;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900504}
505
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100506static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
507 struct xdp_frame **frames, u32 flags)
508{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100509 int err;
510
511 err = veth_xdp_xmit(dev, n, frames, flags, true);
512 if (err < 0) {
513 struct veth_priv *priv = netdev_priv(dev);
514
515 atomic64_add(n, &priv->dropped);
516 }
517
518 return err;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100519}
520
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100521static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900522{
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100523 int sent, i, err = 0, drops;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900524
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100525 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900526 if (sent < 0) {
527 err = sent;
528 sent = 0;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900529 }
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100530
531 for (i = sent; unlikely(i < bq->count); i++)
532 xdp_return_frame(bq->q[i]);
533
534 drops = bq->count - sent;
535 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900536
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100537 u64_stats_update_begin(&rq->stats.syncp);
538 rq->stats.vs.xdp_tx += sent;
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100539 rq->stats.vs.xdp_tx_err += drops;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100540 u64_stats_update_end(&rq->stats.syncp);
541
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900542 bq->count = 0;
543}
544
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100545static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900546{
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100547 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900548 struct net_device *rcv;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100549 struct veth_rq *rcv_rq;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900550
551 rcu_read_lock();
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100552 veth_xdp_flush_bq(rq, bq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900553 rcv = rcu_dereference(priv->peer);
554 if (unlikely(!rcv))
555 goto out;
556
557 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100558 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toshiaki Makitad1396002018-08-03 16:58:17 +0900559 /* xdp_ring is initialized on receive side? */
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100560 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
Toshiaki Makitad1396002018-08-03 16:58:17 +0900561 goto out;
562
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100563 __veth_xdp_flush(rcv_rq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900564out:
565 rcu_read_unlock();
566}
567
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100568static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900569 struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900570{
Lorenzo Bianconi1b698fa2020-05-28 22:47:29 +0200571 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900572
573 if (unlikely(!frame))
574 return -EOVERFLOW;
575
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900576 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100577 veth_xdp_flush_bq(rq, bq);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900578
579 bq->q[bq->count++] = frame;
580
581 return 0;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900582}
583
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100584static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
585 struct xdp_frame *frame,
586 struct veth_xdp_tx_bq *bq,
587 struct veth_stats *stats)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900588{
Toshiaki Makitad1396002018-08-03 16:58:17 +0900589 struct xdp_frame orig_frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900590 struct bpf_prog *xdp_prog;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900591
592 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900593 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900594 if (likely(xdp_prog)) {
595 struct xdp_buff xdp;
596 u32 act;
597
Lorenzo Bianconifc379872020-05-28 22:47:28 +0200598 xdp_convert_frame_to_buff(frame, &xdp);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900599 xdp.rxq = &rq->xdp_rxq;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900600
601 act = bpf_prog_run_xdp(xdp_prog, &xdp);
602
603 switch (act) {
604 case XDP_PASS:
Lorenzo Bianconi89f479f2021-01-12 19:26:13 +0100605 if (xdp_update_frame_from_buff(&xdp, frame))
606 goto err_xdp;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900607 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900608 case XDP_TX:
609 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900610 xdp.rxq->mem = frame->mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100611 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900612 trace_xdp_exception(rq->dev, xdp_prog, act);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900613 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100614 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900615 goto err_xdp;
616 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100617 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900618 rcu_read_unlock();
619 goto xdp_xmit;
620 case XDP_REDIRECT:
621 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900622 xdp.rxq->mem = frame->mem;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900623 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
Toshiaki Makitad1396002018-08-03 16:58:17 +0900624 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100625 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900626 goto err_xdp;
627 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100628 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900629 rcu_read_unlock();
630 goto xdp_xmit;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900631 default:
632 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500633 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900634 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900635 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500636 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900637 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100638 stats->xdp_drops++;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900639 goto err_xdp;
640 }
641 }
642 rcu_read_unlock();
643
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100644 return frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900645err_xdp:
646 rcu_read_unlock();
647 xdp_return_frame(frame);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900648xdp_xmit:
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900649 return NULL;
650}
651
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100652/* frames array contains VETH_XDP_BATCH at most */
653static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
654 int n_xdpf, struct veth_xdp_tx_bq *bq,
655 struct veth_stats *stats)
656{
657 void *skbs[VETH_XDP_BATCH];
658 int i;
659
660 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
661 GFP_ATOMIC | __GFP_ZERO) < 0) {
662 for (i = 0; i < n_xdpf; i++)
663 xdp_return_frame(frames[i]);
664 stats->rx_drops += n_xdpf;
665
666 return;
667 }
668
669 for (i = 0; i < n_xdpf; i++) {
670 struct sk_buff *skb = skbs[i];
671
672 skb = __xdp_build_skb_from_frame(frames[i], skb,
673 rq->dev);
674 if (!skb) {
675 xdp_return_frame(frames[i]);
676 stats->rx_drops++;
677 continue;
678 }
679 napi_gro_receive(&rq->xdp_napi, skb);
680 }
681}
682
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100683static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
684 struct sk_buff *skb,
685 struct veth_xdp_tx_bq *bq,
686 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900687{
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100688 u32 pktlen, headroom, act, metalen, frame_sz;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900689 void *orig_data, *orig_data_end;
690 struct bpf_prog *xdp_prog;
691 int mac_len, delta, off;
692 struct xdp_buff xdp;
693
Paolo Abenic75fb322021-04-09 13:04:37 +0200694 skb_orphan_partial(skb);
Toshiaki Makita4bf9ffa2018-09-14 13:33:44 +0900695
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900696 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900697 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900698 if (unlikely(!xdp_prog)) {
699 rcu_read_unlock();
700 goto out;
701 }
702
703 mac_len = skb->data - skb_mac_header(skb);
704 pktlen = skb->len + mac_len;
705 headroom = skb_headroom(skb) - mac_len;
706
707 if (skb_shared(skb) || skb_head_is_locked(skb) ||
708 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
709 struct sk_buff *nskb;
710 int size, head_off;
711 void *head, *start;
712 struct page *page;
713
714 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
715 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
716 if (size > PAGE_SIZE)
717 goto drop;
718
719 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
720 if (!page)
721 goto drop;
722
723 head = page_address(page);
724 start = head + VETH_XDP_HEADROOM;
725 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
726 page_frag_free(head);
727 goto drop;
728 }
729
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200730 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
731 skb->len, PAGE_SIZE);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900732 if (!nskb) {
733 page_frag_free(head);
734 goto drop;
735 }
736
737 skb_copy_header(nskb, skb);
738 head_off = skb_headroom(nskb) - skb_headroom(skb);
739 skb_headers_offset_update(nskb, head_off);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900740 consume_skb(skb);
741 skb = nskb;
742 }
743
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200744 /* SKB "head" area always have tailroom for skb_shared_info */
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100745 frame_sz = skb_end_pointer(skb) - skb->head;
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100746 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
747 xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100748 xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200749
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900750 orig_data = xdp.data;
751 orig_data_end = xdp.data_end;
752
753 act = bpf_prog_run_xdp(xdp_prog, &xdp);
754
755 switch (act) {
756 case XDP_PASS:
757 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900758 case XDP_TX:
759 get_page(virt_to_page(xdp.data));
760 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900761 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100762 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900763 trace_xdp_exception(rq->dev, xdp_prog, act);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100764 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900765 goto err_xdp;
766 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100767 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900768 rcu_read_unlock();
769 goto xdp_xmit;
770 case XDP_REDIRECT:
771 get_page(virt_to_page(xdp.data));
772 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900773 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100774 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
775 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900776 goto err_xdp;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100777 }
778 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900779 rcu_read_unlock();
780 goto xdp_xmit;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900781 default:
782 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500783 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900784 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900785 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500786 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900787 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100788 stats->xdp_drops++;
789 goto xdp_drop;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900790 }
791 rcu_read_unlock();
792
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200793 /* check if bpf_xdp_adjust_head was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900794 delta = orig_data - xdp.data;
795 off = mac_len + delta;
796 if (off > 0)
797 __skb_push(skb, off);
798 else if (off < 0)
799 __skb_pull(skb, -off);
800 skb->mac_header -= delta;
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200801
802 /* check if bpf_xdp_adjust_tail was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900803 off = xdp.data_end - orig_data_end;
804 if (off != 0)
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200805 __skb_put(skb, off); /* positive on grow, negative on shrink */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900806 skb->protocol = eth_type_trans(skb, rq->dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900807
808 metalen = xdp.data - xdp.data_meta;
809 if (metalen)
810 skb_metadata_set(skb, metalen);
811out:
812 return skb;
813drop:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100814 stats->rx_drops++;
815xdp_drop:
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900816 rcu_read_unlock();
817 kfree_skb(skb);
818 return NULL;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900819err_xdp:
820 rcu_read_unlock();
821 page_frag_free(xdp.data);
822xdp_xmit:
823 return NULL;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900824}
825
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100826static int veth_xdp_rcv(struct veth_rq *rq, int budget,
827 struct veth_xdp_tx_bq *bq,
828 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900829{
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100830 int i, done = 0, n_xdpf = 0;
831 void *xdpf[VETH_XDP_BATCH];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900832
833 for (i = 0; i < budget; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900834 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900835
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900836 if (!ptr)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900837 break;
838
Toshiaki Makitad1396002018-08-03 16:58:17 +0900839 if (veth_is_xdp_frame(ptr)) {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100840 /* ndo_xdp_xmit */
Toshiaki Makita4195e542018-10-11 18:36:49 +0900841 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
842
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100843 stats->xdp_bytes += frame->len;
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100844 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
845 if (frame) {
846 /* XDP_PASS */
847 xdpf[n_xdpf++] = frame;
848 if (n_xdpf == VETH_XDP_BATCH) {
849 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
850 bq, stats);
851 n_xdpf = 0;
852 }
853 }
Toshiaki Makitad1396002018-08-03 16:58:17 +0900854 } else {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100855 /* ndo_start_xmit */
856 struct sk_buff *skb = ptr;
857
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100858 stats->xdp_bytes += skb->len;
859 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100860 if (skb)
861 napi_gro_receive(&rq->xdp_napi, skb);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900862 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900863 done++;
864 }
865
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100866 if (n_xdpf)
867 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
868
Toshiaki Makita4195e542018-10-11 18:36:49 +0900869 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100870 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100871 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100872 rq->stats.vs.xdp_drops += stats->xdp_drops;
873 rq->stats.vs.rx_drops += stats->rx_drops;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100874 rq->stats.vs.xdp_packets += done;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900875 u64_stats_update_end(&rq->stats.syncp);
876
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900877 return done;
878}
879
880static int veth_poll(struct napi_struct *napi, int budget)
881{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900882 struct veth_rq *rq =
883 container_of(napi, struct veth_rq, xdp_napi);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100884 struct veth_stats stats = {};
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900885 struct veth_xdp_tx_bq bq;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900886 int done;
887
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900888 bq.count = 0;
889
Toshiaki Makitad1396002018-08-03 16:58:17 +0900890 xdp_set_return_frame_no_direct();
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100891 done = veth_xdp_rcv(rq, budget, &bq, &stats);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900892
893 if (done < budget && napi_complete_done(napi, done)) {
894 /* Write rx_notify_masked before reading ptr_ring */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900895 smp_store_mb(rq->rx_notify_masked, false);
896 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
897 rq->rx_notify_masked = true;
898 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900899 }
900 }
901
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100902 if (stats.xdp_tx > 0)
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100903 veth_xdp_flush(rq, &bq);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100904 if (stats.xdp_redirect > 0)
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100905 xdp_do_flush();
Toshiaki Makitad1396002018-08-03 16:58:17 +0900906 xdp_clear_return_frame_no_direct();
907
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900908 return done;
909}
910
Paolo Abenid3256ef2021-04-09 13:04:38 +0200911static int __veth_napi_enable(struct net_device *dev)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900912{
913 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900914 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900915
Toshiaki Makita638264d2018-08-03 16:58:18 +0900916 for (i = 0; i < dev->real_num_rx_queues; i++) {
917 struct veth_rq *rq = &priv->rq[i];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900918
Toshiaki Makita638264d2018-08-03 16:58:18 +0900919 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
920 if (err)
921 goto err_xdp_ring;
922 }
923
924 for (i = 0; i < dev->real_num_rx_queues; i++) {
925 struct veth_rq *rq = &priv->rq[i];
926
Toshiaki Makita638264d2018-08-03 16:58:18 +0900927 napi_enable(&rq->xdp_napi);
Paolo Abenid3256ef2021-04-09 13:04:38 +0200928 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900929 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900930
931 return 0;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900932err_xdp_ring:
933 for (i--; i >= 0; i--)
934 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
935
936 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900937}
938
939static void veth_napi_del(struct net_device *dev)
940{
941 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900942 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900943
Toshiaki Makita638264d2018-08-03 16:58:18 +0900944 for (i = 0; i < dev->real_num_rx_queues; i++) {
945 struct veth_rq *rq = &priv->rq[i];
946
Paolo Abenid3256ef2021-04-09 13:04:38 +0200947 rcu_assign_pointer(priv->rq[i].napi, NULL);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900948 napi_disable(&rq->xdp_napi);
Jakub Kicinski5198d5452020-09-09 10:37:51 -0700949 __netif_napi_del(&rq->xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900950 }
951 synchronize_net();
952
953 for (i = 0; i < dev->real_num_rx_queues; i++) {
954 struct veth_rq *rq = &priv->rq[i];
955
Toshiaki Makita638264d2018-08-03 16:58:18 +0900956 rq->rx_notify_masked = false;
957 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
958 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900959}
960
Paolo Abenid3256ef2021-04-09 13:04:38 +0200961static bool veth_gro_requested(const struct net_device *dev)
962{
963 return !!(dev->wanted_features & NETIF_F_GRO);
964}
965
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900966static int veth_enable_xdp(struct net_device *dev)
967{
Paolo Abenid3256ef2021-04-09 13:04:38 +0200968 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900969 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900970 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900971
Toshiaki Makita638264d2018-08-03 16:58:18 +0900972 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
973 for (i = 0; i < dev->real_num_rx_queues; i++) {
974 struct veth_rq *rq = &priv->rq[i];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900975
Paolo Abenid3256ef2021-04-09 13:04:38 +0200976 if (!napi_already_on)
977 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
Björn Töpelb02e5a02020-11-30 19:52:01 +0100978 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900979 if (err < 0)
980 goto err_rxq_reg;
981
982 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
983 MEM_TYPE_PAGE_SHARED,
984 NULL);
985 if (err < 0)
986 goto err_reg_mem;
987
988 /* Save original mem info as it can be overwritten */
989 rq->xdp_mem = rq->xdp_rxq.mem;
990 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900991
Paolo Abenid3256ef2021-04-09 13:04:38 +0200992 if (!napi_already_on) {
993 err = __veth_napi_enable(dev);
994 if (err)
995 goto err_rxq_reg;
996
997 if (!veth_gro_requested(dev)) {
998 /* user-space did not require GRO, but adding XDP
999 * is supposed to get GRO working
1000 */
1001 dev->features |= NETIF_F_GRO;
1002 netdev_features_change(dev);
1003 }
1004 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001005 }
1006
Paolo Abenid3256ef2021-04-09 13:04:38 +02001007 for (i = 0; i < dev->real_num_rx_queues; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +09001008 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001009 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1010 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001011
1012 return 0;
Toshiaki Makita638264d2018-08-03 16:58:18 +09001013err_reg_mem:
1014 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1015err_rxq_reg:
Björn Töpelb02e5a02020-11-30 19:52:01 +01001016 for (i--; i >= 0; i--) {
1017 struct veth_rq *rq = &priv->rq[i];
1018
1019 xdp_rxq_info_unreg(&rq->xdp_rxq);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001020 if (!napi_already_on)
1021 netif_napi_del(&rq->xdp_napi);
Björn Töpelb02e5a02020-11-30 19:52:01 +01001022 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001023
1024 return err;
1025}
1026
1027static void veth_disable_xdp(struct net_device *dev)
1028{
1029 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +09001030 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001031
Toshiaki Makita638264d2018-08-03 16:58:18 +09001032 for (i = 0; i < dev->real_num_rx_queues; i++)
1033 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001034
1035 if (!netif_running(dev) || !veth_gro_requested(dev)) {
1036 veth_napi_del(dev);
1037
1038 /* if user-space did not require GRO, since adding XDP
1039 * enabled it, clear it now
1040 */
1041 if (!veth_gro_requested(dev) && netif_running(dev)) {
1042 dev->features &= ~NETIF_F_GRO;
1043 netdev_features_change(dev);
1044 }
1045 }
1046
Toshiaki Makita638264d2018-08-03 16:58:18 +09001047 for (i = 0; i < dev->real_num_rx_queues; i++) {
1048 struct veth_rq *rq = &priv->rq[i];
1049
1050 rq->xdp_rxq.mem = rq->xdp_mem;
1051 xdp_rxq_info_unreg(&rq->xdp_rxq);
1052 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001053}
1054
Paolo Abenid3256ef2021-04-09 13:04:38 +02001055static int veth_napi_enable(struct net_device *dev)
1056{
1057 struct veth_priv *priv = netdev_priv(dev);
1058 int err, i;
1059
1060 for (i = 0; i < dev->real_num_rx_queues; i++) {
1061 struct veth_rq *rq = &priv->rq[i];
1062
1063 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1064 }
1065
1066 err = __veth_napi_enable(dev);
1067 if (err) {
1068 for (i = 0; i < dev->real_num_rx_queues; i++) {
1069 struct veth_rq *rq = &priv->rq[i];
1070
1071 netif_napi_del(&rq->xdp_napi);
1072 }
1073 return err;
1074 }
1075 return err;
1076}
1077
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001078static int veth_open(struct net_device *dev)
1079{
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001080 struct veth_priv *priv = netdev_priv(dev);
1081 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001082 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001083
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001084 if (!peer)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001085 return -ENOTCONN;
1086
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001087 if (priv->_xdp_prog) {
1088 err = veth_enable_xdp(dev);
1089 if (err)
1090 return err;
Paolo Abenid3256ef2021-04-09 13:04:38 +02001091 } else if (veth_gro_requested(dev)) {
1092 err = veth_napi_enable(dev);
1093 if (err)
1094 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001095 }
1096
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001097 if (peer->flags & IFF_UP) {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001098 netif_carrier_on(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001099 netif_carrier_on(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001100 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001101
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001102 return 0;
1103}
1104
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001105static int veth_close(struct net_device *dev)
1106{
1107 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001108 struct net_device *peer = rtnl_dereference(priv->peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001109
1110 netif_carrier_off(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001111 if (peer)
1112 netif_carrier_off(peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001113
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001114 if (priv->_xdp_prog)
1115 veth_disable_xdp(dev);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001116 else if (veth_gro_requested(dev))
1117 veth_napi_del(dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001118
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001119 return 0;
1120}
1121
Jarod Wilson91572082016-10-20 13:55:20 -04001122static int is_valid_veth_mtu(int mtu)
Eric Biederman38d40812009-03-03 23:36:04 -08001123{
Jarod Wilson91572082016-10-20 13:55:20 -04001124 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
Eric Biederman38d40812009-03-03 23:36:04 -08001125}
1126
Toshiaki Makita7797b932018-08-15 17:07:29 +09001127static int veth_alloc_queues(struct net_device *dev)
1128{
1129 struct veth_priv *priv = netdev_priv(dev);
1130 int i;
1131
1132 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1133 if (!priv->rq)
1134 return -ENOMEM;
1135
Toshiaki Makita4195e542018-10-11 18:36:49 +09001136 for (i = 0; i < dev->num_rx_queues; i++) {
Toshiaki Makita7797b932018-08-15 17:07:29 +09001137 priv->rq[i].dev = dev;
Toshiaki Makita4195e542018-10-11 18:36:49 +09001138 u64_stats_init(&priv->rq[i].stats.syncp);
1139 }
Toshiaki Makita7797b932018-08-15 17:07:29 +09001140
1141 return 0;
1142}
1143
1144static void veth_free_queues(struct net_device *dev)
1145{
1146 struct veth_priv *priv = netdev_priv(dev);
1147
1148 kfree(priv->rq);
1149}
1150
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001151static int veth_dev_init(struct net_device *dev)
1152{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001153 int err;
1154
Li RongQing14d73412018-09-17 18:46:55 +08001155 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1156 if (!dev->lstats)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001157 return -ENOMEM;
Toshiaki Makita7797b932018-08-15 17:07:29 +09001158
1159 err = veth_alloc_queues(dev);
1160 if (err) {
Li RongQing14d73412018-09-17 18:46:55 +08001161 free_percpu(dev->lstats);
Toshiaki Makita7797b932018-08-15 17:07:29 +09001162 return err;
1163 }
1164
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001165 return 0;
1166}
1167
David S. Miller11687a12009-06-25 02:45:42 -07001168static void veth_dev_free(struct net_device *dev)
1169{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001170 veth_free_queues(dev);
Li RongQing14d73412018-09-17 18:46:55 +08001171 free_percpu(dev->lstats);
David S. Miller11687a12009-06-25 02:45:42 -07001172}
1173
WANG Congbb446c12014-06-23 15:36:02 -07001174#ifdef CONFIG_NET_POLL_CONTROLLER
1175static void veth_poll_controller(struct net_device *dev)
1176{
1177 /* veth only receives frames when its peer sends one
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001178 * Since it has nothing to do with disabling irqs, we are guaranteed
WANG Congbb446c12014-06-23 15:36:02 -07001179 * never to have pending data when we poll for it so
1180 * there is nothing to do here.
1181 *
1182 * We need this though so netpoll recognizes us as an interface that
1183 * supports polling, which enables bridge devices in virt setups to
1184 * still use netconsole
1185 */
1186}
1187#endif /* CONFIG_NET_POLL_CONTROLLER */
1188
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001189static int veth_get_iflink(const struct net_device *dev)
1190{
1191 struct veth_priv *priv = netdev_priv(dev);
1192 struct net_device *peer;
1193 int iflink;
1194
1195 rcu_read_lock();
1196 peer = rcu_dereference(priv->peer);
1197 iflink = peer ? peer->ifindex : 0;
1198 rcu_read_unlock();
1199
1200 return iflink;
1201}
1202
Toshiaki Makitadc224822018-08-03 16:58:11 +09001203static netdev_features_t veth_fix_features(struct net_device *dev,
1204 netdev_features_t features)
1205{
1206 struct veth_priv *priv = netdev_priv(dev);
1207 struct net_device *peer;
1208
1209 peer = rtnl_dereference(priv->peer);
1210 if (peer) {
1211 struct veth_priv *peer_priv = netdev_priv(peer);
1212
1213 if (peer_priv->_xdp_prog)
1214 features &= ~NETIF_F_GSO_SOFTWARE;
1215 }
Paolo Abenid3256ef2021-04-09 13:04:38 +02001216 if (priv->_xdp_prog)
1217 features |= NETIF_F_GRO;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001218
1219 return features;
1220}
1221
Paolo Abenid3256ef2021-04-09 13:04:38 +02001222static int veth_set_features(struct net_device *dev,
1223 netdev_features_t features)
1224{
1225 netdev_features_t changed = features ^ dev->features;
1226 struct veth_priv *priv = netdev_priv(dev);
1227 int err;
1228
1229 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1230 return 0;
1231
1232 if (features & NETIF_F_GRO) {
1233 err = veth_napi_enable(dev);
1234 if (err)
1235 return err;
1236 } else {
1237 veth_napi_del(dev);
1238 }
1239 return 0;
1240}
1241
Paolo Abeni163e5292016-02-26 10:45:41 +01001242static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1243{
1244 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1245 struct net_device *peer;
1246
1247 if (new_hr < 0)
1248 new_hr = 0;
1249
1250 rcu_read_lock();
1251 peer = rcu_dereference(priv->peer);
1252 if (unlikely(!peer))
1253 goto out;
1254
1255 peer_priv = netdev_priv(peer);
1256 priv->requested_headroom = new_hr;
1257 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1258 dev->needed_headroom = new_hr;
1259 peer->needed_headroom = new_hr;
1260
1261out:
1262 rcu_read_unlock();
1263}
1264
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001265static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1266 struct netlink_ext_ack *extack)
1267{
1268 struct veth_priv *priv = netdev_priv(dev);
1269 struct bpf_prog *old_prog;
1270 struct net_device *peer;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001271 unsigned int max_mtu;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001272 int err;
1273
1274 old_prog = priv->_xdp_prog;
1275 priv->_xdp_prog = prog;
1276 peer = rtnl_dereference(priv->peer);
1277
1278 if (prog) {
1279 if (!peer) {
1280 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1281 err = -ENOTCONN;
1282 goto err;
1283 }
1284
Toshiaki Makitadc224822018-08-03 16:58:11 +09001285 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1286 peer->hard_header_len -
1287 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1288 if (peer->mtu > max_mtu) {
1289 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1290 err = -ERANGE;
1291 goto err;
1292 }
1293
Toshiaki Makita638264d2018-08-03 16:58:18 +09001294 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1295 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1296 err = -ENOSPC;
1297 goto err;
1298 }
1299
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001300 if (dev->flags & IFF_UP) {
1301 err = veth_enable_xdp(dev);
1302 if (err) {
1303 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1304 goto err;
1305 }
1306 }
Toshiaki Makitadc224822018-08-03 16:58:11 +09001307
1308 if (!old_prog) {
1309 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1310 peer->max_mtu = max_mtu;
1311 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001312 }
1313
1314 if (old_prog) {
Toshiaki Makitadc224822018-08-03 16:58:11 +09001315 if (!prog) {
1316 if (dev->flags & IFF_UP)
1317 veth_disable_xdp(dev);
1318
1319 if (peer) {
1320 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1321 peer->max_mtu = ETH_MAX_MTU;
1322 }
1323 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001324 bpf_prog_put(old_prog);
1325 }
1326
Toshiaki Makitadc224822018-08-03 16:58:11 +09001327 if ((!!old_prog ^ !!prog) && peer)
1328 netdev_update_features(peer);
1329
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001330 return 0;
1331err:
1332 priv->_xdp_prog = old_prog;
1333
1334 return err;
1335}
1336
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001337static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1338{
1339 switch (xdp->command) {
1340 case XDP_SETUP_PROG:
1341 return veth_xdp_set(dev, xdp->prog, xdp->extack);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001342 default:
1343 return -EINVAL;
1344 }
1345}
1346
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001347static const struct net_device_ops veth_netdev_ops = {
Daniel Lezcanoee923622009-02-22 00:04:45 -08001348 .ndo_init = veth_dev_init,
1349 .ndo_open = veth_open,
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001350 .ndo_stop = veth_close,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001351 .ndo_start_xmit = veth_xmit,
stephen hemminger6311cc42011-06-08 14:53:59 +00001352 .ndo_get_stats64 = veth_get_stats64,
Gao feng5c70ef82013-10-04 16:52:24 +08001353 .ndo_set_rx_mode = veth_set_multicast_list,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001354 .ndo_set_mac_address = eth_mac_addr,
WANG Congbb446c12014-06-23 15:36:02 -07001355#ifdef CONFIG_NET_POLL_CONTROLLER
1356 .ndo_poll_controller = veth_poll_controller,
1357#endif
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001358 .ndo_get_iflink = veth_get_iflink,
Toshiaki Makitadc224822018-08-03 16:58:11 +09001359 .ndo_fix_features = veth_fix_features,
Paolo Abenid3256ef2021-04-09 13:04:38 +02001360 .ndo_set_features = veth_set_features,
Toshiaki Makita1a04a822015-07-31 15:03:25 +09001361 .ndo_features_check = passthru_features_check,
Paolo Abeni163e5292016-02-26 10:45:41 +01001362 .ndo_set_rx_headroom = veth_set_rx_headroom,
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001363 .ndo_bpf = veth_xdp,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +01001364 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
Daniel Borkmann9aa12062020-10-11 01:40:02 +02001365 .ndo_get_peer_dev = veth_peer_dev,
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001366};
1367
Alexander Duyck732912d72016-04-19 14:02:26 -04001368#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
Xin Longc80fafb2016-08-25 13:21:49 +08001369 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
Alexander Duyck732912d72016-04-19 14:02:26 -04001370 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +00001371 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1372 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
Eric Dumazet80933152012-12-29 16:26:10 +00001373
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001374static void veth_setup(struct net_device *dev)
1375{
1376 ether_setup(dev);
1377
Neil Horman550fd082011-07-26 06:05:38 +00001378 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
Hannes Frederic Sowa23ea5a92012-10-30 16:22:01 +00001379 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Phil Sutter02f01ec2015-08-18 10:30:29 +02001380 dev->priv_flags |= IFF_NO_QUEUE;
Paolo Abeni163e5292016-02-26 10:45:41 +01001381 dev->priv_flags |= IFF_PHONY_HEADROOM;
Neil Horman550fd082011-07-26 06:05:38 +00001382
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001383 dev->netdev_ops = &veth_netdev_ops;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001384 dev->ethtool_ops = &veth_ethtool_ops;
1385 dev->features |= NETIF_F_LLTX;
Eric Dumazet80933152012-12-29 16:26:10 +00001386 dev->features |= VETH_FEATURES;
Toshiaki Makita8d0d21f2014-02-18 21:20:08 +09001387 dev->vlan_features = dev->features &
Vlad Yasevich3f8c7072014-03-27 22:14:48 -04001388 ~(NETIF_F_HW_VLAN_CTAG_TX |
1389 NETIF_F_HW_VLAN_STAG_TX |
1390 NETIF_F_HW_VLAN_CTAG_RX |
1391 NETIF_F_HW_VLAN_STAG_RX);
David S. Millercf124db2017-05-08 12:52:56 -04001392 dev->needs_free_netdev = true;
1393 dev->priv_destructor = veth_dev_free;
Jarod Wilson91572082016-10-20 13:55:20 -04001394 dev->max_mtu = ETH_MAX_MTU;
Michał Mirosława2c725f2011-03-31 01:01:35 +00001395
Eric Dumazet80933152012-12-29 16:26:10 +00001396 dev->hw_features = VETH_FEATURES;
Eric Dumazet82d81892013-10-25 18:25:03 -07001397 dev->hw_enc_features = VETH_FEATURES;
David Ahern607fca92016-08-24 20:10:45 -07001398 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001399}
1400
1401/*
1402 * netlink interface
1403 */
1404
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001405static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1406 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001407{
1408 if (tb[IFLA_ADDRESS]) {
1409 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1410 return -EINVAL;
1411 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1412 return -EADDRNOTAVAIL;
1413 }
Eric Biederman38d40812009-03-03 23:36:04 -08001414 if (tb[IFLA_MTU]) {
1415 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1416 return -EINVAL;
1417 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001418 return 0;
1419}
1420
1421static struct rtnl_link_ops veth_link_ops;
1422
Paolo Abenid3256ef2021-04-09 13:04:38 +02001423static void veth_disable_gro(struct net_device *dev)
1424{
1425 dev->features &= ~NETIF_F_GRO;
1426 dev->wanted_features &= ~NETIF_F_GRO;
1427 netdev_update_features(dev);
1428}
1429
Eric W. Biederman81adee42009-11-08 00:53:51 -08001430static int veth_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +02001431 struct nlattr *tb[], struct nlattr *data[],
1432 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001433{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001434 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001435 struct net_device *peer;
1436 struct veth_priv *priv;
1437 char ifname[IFNAMSIZ];
1438 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
Tom Gundersen55177502014-07-14 16:37:25 +02001439 unsigned char name_assign_type;
Patrick McHardy3729d502010-02-26 06:34:54 +00001440 struct ifinfomsg *ifmp;
Eric W. Biederman81adee42009-11-08 00:53:51 -08001441 struct net *net;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001442
1443 /*
1444 * create and register peer first
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001445 */
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001446 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1447 struct nlattr *nla_peer;
1448
1449 nla_peer = data[VETH_INFO_PEER];
Patrick McHardy3729d502010-02-26 06:34:54 +00001450 ifmp = nla_data(nla_peer);
Jiri Pirkof7b12602014-02-18 20:53:18 +01001451 err = rtnl_nla_parse_ifla(peer_tb,
1452 nla_data(nla_peer) + sizeof(struct ifinfomsg),
Johannes Bergfceb6432017-04-12 14:34:07 +02001453 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1454 NULL);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001455 if (err < 0)
1456 return err;
1457
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001458 err = veth_validate(peer_tb, NULL, extack);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001459 if (err < 0)
1460 return err;
1461
1462 tbp = peer_tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001463 } else {
1464 ifmp = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001465 tbp = tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001466 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001467
Serhey Popovych191cdb32017-06-21 12:12:24 +03001468 if (ifmp && tbp[IFLA_IFNAME]) {
Francis Laniel872f6902020-11-15 18:08:06 +01001469 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
Tom Gundersen55177502014-07-14 16:37:25 +02001470 name_assign_type = NET_NAME_USER;
1471 } else {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001472 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
Tom Gundersen55177502014-07-14 16:37:25 +02001473 name_assign_type = NET_NAME_ENUM;
1474 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001475
Eric W. Biederman81adee42009-11-08 00:53:51 -08001476 net = rtnl_link_get_net(src_net, tbp);
1477 if (IS_ERR(net))
1478 return PTR_ERR(net);
1479
Tom Gundersen55177502014-07-14 16:37:25 +02001480 peer = rtnl_create_link(net, ifname, name_assign_type,
David Ahernd0522f12018-11-06 12:51:14 -08001481 &veth_link_ops, tbp, extack);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001482 if (IS_ERR(peer)) {
1483 put_net(net);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001484 return PTR_ERR(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001485 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001486
Serhey Popovych191cdb32017-06-21 12:12:24 +03001487 if (!ifmp || !tbp[IFLA_ADDRESS])
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001488 eth_hw_addr_random(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001489
Pavel Emelyanove6f8f1a2012-08-08 21:53:03 +00001490 if (ifmp && (dev->ifindex != 0))
1491 peer->ifindex = ifmp->ifi_index;
1492
Stephen Hemminger72d249552017-12-07 15:40:20 -08001493 peer->gso_max_size = dev->gso_max_size;
1494 peer->gso_max_segs = dev->gso_max_segs;
1495
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001496 err = register_netdevice(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001497 put_net(net);
1498 net = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001499 if (err < 0)
1500 goto err_register_peer;
1501
Paolo Abenid3256ef2021-04-09 13:04:38 +02001502 /* keep GRO disabled by default to be consistent with the established
1503 * veth behavior
1504 */
1505 veth_disable_gro(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001506 netif_carrier_off(peer);
1507
Patrick McHardy3729d502010-02-26 06:34:54 +00001508 err = rtnl_configure_link(peer, ifmp);
1509 if (err < 0)
1510 goto err_configure_peer;
1511
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001512 /*
1513 * register dev last
1514 *
1515 * note, that since we've registered new device the dev's name
1516 * should be re-allocated
1517 */
1518
1519 if (tb[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001520 eth_hw_addr_random(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001521
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001522 if (tb[IFLA_IFNAME])
Francis Laniel872f6902020-11-15 18:08:06 +01001523 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001524 else
1525 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1526
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001527 err = register_netdevice(dev);
1528 if (err < 0)
1529 goto err_register_dev;
1530
1531 netif_carrier_off(dev);
1532
1533 /*
1534 * tie the deviced together
1535 */
1536
1537 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001538 rcu_assign_pointer(priv->peer, peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001539
1540 priv = netdev_priv(peer);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001541 rcu_assign_pointer(priv->peer, dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001542
Paolo Abenid3256ef2021-04-09 13:04:38 +02001543 veth_disable_gro(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001544 return 0;
1545
1546err_register_dev:
1547 /* nothing to do */
Patrick McHardy3729d502010-02-26 06:34:54 +00001548err_configure_peer:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001549 unregister_netdevice(peer);
1550 return err;
1551
1552err_register_peer:
1553 free_netdev(peer);
1554 return err;
1555}
1556
Eric Dumazet23289a32009-10-27 07:06:36 +00001557static void veth_dellink(struct net_device *dev, struct list_head *head)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001558{
1559 struct veth_priv *priv;
1560 struct net_device *peer;
1561
1562 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001563 peer = rtnl_dereference(priv->peer);
1564
1565 /* Note : dellink() is called from default_device_exit_batch(),
1566 * before a rcu_synchronize() point. The devices are guaranteed
1567 * not being freed before one RCU grace period.
1568 */
1569 RCU_INIT_POINTER(priv->peer, NULL);
Eric Dumazet24540532009-10-30 01:00:27 -07001570 unregister_netdevice_queue(dev, head);
Eric Dumazetf45a5c22013-02-08 20:10:49 +00001571
1572 if (peer) {
1573 priv = netdev_priv(peer);
1574 RCU_INIT_POINTER(priv->peer, NULL);
1575 unregister_netdevice_queue(peer, head);
1576 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001577}
1578
Thomas Graf23711432012-02-15 04:09:46 +00001579static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1580 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1581};
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001582
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001583static struct net *veth_get_link_net(const struct net_device *dev)
1584{
1585 struct veth_priv *priv = netdev_priv(dev);
1586 struct net_device *peer = rtnl_dereference(priv->peer);
1587
1588 return peer ? dev_net(peer) : dev_net(dev);
1589}
1590
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001591static struct rtnl_link_ops veth_link_ops = {
1592 .kind = DRV_NAME,
1593 .priv_size = sizeof(struct veth_priv),
1594 .setup = veth_setup,
1595 .validate = veth_validate,
1596 .newlink = veth_newlink,
1597 .dellink = veth_dellink,
1598 .policy = veth_policy,
1599 .maxtype = VETH_INFO_MAX,
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001600 .get_link_net = veth_get_link_net,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001601};
1602
1603/*
1604 * init/fini
1605 */
1606
1607static __init int veth_init(void)
1608{
1609 return rtnl_link_register(&veth_link_ops);
1610}
1611
1612static __exit void veth_exit(void)
1613{
Patrick McHardy68365452008-01-20 17:25:14 -08001614 rtnl_link_unregister(&veth_link_ops);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001615}
1616
1617module_init(veth_init);
1618module_exit(veth_exit);
1619
1620MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1621MODULE_LICENSE("GPL v2");
1622MODULE_ALIAS_RTNL_LINK(DRV_NAME);