blob: f2f66fb293fd474bfc5b54594b33f925228b8e47 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07002/*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070012#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070014#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
Eric Dumazetcf05c702011-06-19 22:48:34 -070016#include <linux/u64_stats_sync.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070017
Jiri Pirkof7b12602014-02-18 20:53:18 +010018#include <net/rtnetlink.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070019#include <net/dst.h>
20#include <net/xfrm.h>
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +090021#include <net/xdp.h>
Stephen Hemmingerecef9692007-12-25 17:23:59 -080022#include <linux/veth.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040023#include <linux/module.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090024#include <linux/bpf.h>
25#include <linux/filter.h>
26#include <linux/ptr_ring.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090027#include <linux/bpf_trace.h>
Michael Walleaa4e6892018-08-29 17:24:11 +020028#include <linux/net_tstamp.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070029
30#define DRV_NAME "veth"
31#define DRV_VERSION "1.0"
32
Toshiaki Makita9fc8d512018-08-03 16:58:13 +090033#define VETH_XDP_FLAG BIT(0)
Toshiaki Makita948d4f22018-08-03 16:58:10 +090034#define VETH_RING_SIZE 256
35#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
Toshiaki Makita9cda7802019-06-13 18:39:59 +090037#define VETH_XDP_TX_BULK_SIZE 16
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +010038#define VETH_XDP_BATCH 16
Toshiaki Makita9cda7802019-06-13 18:39:59 +090039
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010040struct veth_stats {
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010041 u64 rx_drops;
42 /* xdp */
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010043 u64 xdp_packets;
44 u64 xdp_bytes;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010045 u64 xdp_redirect;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010046 u64 xdp_drops;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010047 u64 xdp_tx;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +010048 u64 xdp_tx_err;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010049 u64 peer_tq_xdp_xmit;
50 u64 peer_tq_xdp_xmit_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010051};
52
Toshiaki Makita4195e542018-10-11 18:36:49 +090053struct veth_rq_stats {
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010054 struct veth_stats vs;
Toshiaki Makita4195e542018-10-11 18:36:49 +090055 struct u64_stats_sync syncp;
56};
57
Toshiaki Makita638264d2018-08-03 16:58:18 +090058struct veth_rq {
Toshiaki Makita948d4f22018-08-03 16:58:10 +090059 struct napi_struct xdp_napi;
Paolo Abenid3256ef2021-04-09 13:04:38 +020060 struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
Toshiaki Makita948d4f22018-08-03 16:58:10 +090061 struct net_device *dev;
62 struct bpf_prog __rcu *xdp_prog;
Toshiaki Makitad1396002018-08-03 16:58:17 +090063 struct xdp_mem_info xdp_mem;
Toshiaki Makita4195e542018-10-11 18:36:49 +090064 struct veth_rq_stats stats;
Toshiaki Makita948d4f22018-08-03 16:58:10 +090065 bool rx_notify_masked;
66 struct ptr_ring xdp_ring;
67 struct xdp_rxq_info xdp_rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070068};
69
Toshiaki Makita638264d2018-08-03 16:58:18 +090070struct veth_priv {
71 struct net_device __rcu *peer;
72 atomic64_t dropped;
73 struct bpf_prog *_xdp_prog;
74 struct veth_rq *rq;
75 unsigned int requested_headroom;
76};
77
Toshiaki Makita9cda7802019-06-13 18:39:59 +090078struct veth_xdp_tx_bq {
79 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
80 unsigned int count;
81};
82
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070083/*
84 * ethtool interface
85 */
86
Toshiaki Makitad397b962018-10-11 18:36:50 +090087struct veth_q_stat_desc {
88 char desc[ETH_GSTRING_LEN];
89 size_t offset;
90};
91
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010092#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
Toshiaki Makitad397b962018-10-11 18:36:50 +090093
94static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
95 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
96 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010097 { "drops", VETH_RQ_STAT(rx_drops) },
98 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
99 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
100 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
101 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
Toshiaki Makitad397b962018-10-11 18:36:50 +0900102};
103
104#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
105
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100106static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
107 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
108 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
109};
110
111#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
112
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700113static struct {
114 const char string[ETH_GSTRING_LEN];
115} ethtool_stats_keys[] = {
116 { "peer_ifindex" },
117};
118
Philippe Reynes56607b92017-03-29 08:24:21 +0200119static int veth_get_link_ksettings(struct net_device *dev,
120 struct ethtool_link_ksettings *cmd)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700121{
Philippe Reynes56607b92017-03-29 08:24:21 +0200122 cmd->base.speed = SPEED_10000;
123 cmd->base.duplex = DUPLEX_FULL;
124 cmd->base.port = PORT_TP;
125 cmd->base.autoneg = AUTONEG_DISABLE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700126 return 0;
127}
128
129static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
130{
Rick Jones33a5ba12011-11-15 14:59:53 +0000131 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
132 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700133}
134
135static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
136{
Toshiaki Makitad397b962018-10-11 18:36:50 +0900137 char *p = (char *)buf;
138 int i, j;
139
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700140 switch(stringset) {
141 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900142 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
143 p += sizeof(ethtool_stats_keys);
144 for (i = 0; i < dev->real_num_rx_queues; i++) {
145 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
Florian Fainelliabdf47a2019-02-21 20:09:29 -0800146 snprintf(p, ETH_GSTRING_LEN,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100147 "rx_queue_%u_%.18s",
Toshiaki Makitad397b962018-10-11 18:36:50 +0900148 i, veth_rq_stats_desc[j].desc);
149 p += ETH_GSTRING_LEN;
150 }
151 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100152 for (i = 0; i < dev->real_num_tx_queues; i++) {
153 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
154 snprintf(p, ETH_GSTRING_LEN,
155 "tx_queue_%u_%.18s",
156 i, veth_tq_stats_desc[j].desc);
157 p += ETH_GSTRING_LEN;
158 }
159 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700160 break;
161 }
162}
163
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700164static int veth_get_sset_count(struct net_device *dev, int sset)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700165{
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700166 switch (sset) {
167 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900168 return ARRAY_SIZE(ethtool_stats_keys) +
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100169 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
170 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700171 default:
172 return -EOPNOTSUPP;
173 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700174}
175
176static void veth_get_ethtool_stats(struct net_device *dev,
177 struct ethtool_stats *stats, u64 *data)
178{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100179 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000180 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makitad397b962018-10-11 18:36:50 +0900181 int i, j, idx;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700182
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000183 data[0] = peer ? peer->ifindex : 0;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900184 idx = 1;
185 for (i = 0; i < dev->real_num_rx_queues; i++) {
186 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100187 const void *stats_base = (void *)&rq_stats->vs;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900188 unsigned int start;
189 size_t offset;
190
191 do {
192 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
193 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
194 offset = veth_rq_stats_desc[j].offset;
195 data[idx + j] = *(u64 *)(stats_base + offset);
196 }
197 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
198 idx += VETH_RQ_STATS_LEN;
199 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100200
201 if (!peer)
202 return;
203
204 rcv_priv = netdev_priv(peer);
205 for (i = 0; i < peer->real_num_rx_queues; i++) {
206 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
207 const void *base = (void *)&rq_stats->vs;
208 unsigned int start, tx_idx = idx;
209 size_t offset;
210
211 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
212 do {
213 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
214 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
215 offset = veth_tq_stats_desc[j].offset;
216 data[tx_idx + j] += *(u64 *)(base + offset);
217 }
218 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
219 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700220}
221
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200222static void veth_get_channels(struct net_device *dev,
223 struct ethtool_channels *channels)
224{
225 channels->tx_count = dev->real_num_tx_queues;
226 channels->rx_count = dev->real_num_rx_queues;
Paolo Abeni4752eeb2021-07-20 10:41:50 +0200227 channels->max_tx = dev->num_tx_queues;
228 channels->max_rx = dev->num_rx_queues;
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200229}
230
Paolo Abeni4752eeb2021-07-20 10:41:50 +0200231static int veth_set_channels(struct net_device *dev,
232 struct ethtool_channels *ch);
233
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700234static const struct ethtool_ops veth_ethtool_ops = {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700235 .get_drvinfo = veth_get_drvinfo,
236 .get_link = ethtool_op_get_link,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700237 .get_strings = veth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700238 .get_sset_count = veth_get_sset_count,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700239 .get_ethtool_stats = veth_get_ethtool_stats,
Philippe Reynes56607b92017-03-29 08:24:21 +0200240 .get_link_ksettings = veth_get_link_ksettings,
Julian Wiedmann056b21f2019-04-12 13:06:15 +0200241 .get_ts_info = ethtool_op_get_ts_info,
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200242 .get_channels = veth_get_channels,
Paolo Abeni4752eeb2021-07-20 10:41:50 +0200243 .set_channels = veth_set_channels,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700244};
245
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900246/* general routines */
247
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900248static bool veth_is_xdp_frame(void *ptr)
249{
250 return (unsigned long)ptr & VETH_XDP_FLAG;
251}
252
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700253static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900254{
255 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
256}
257
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700258static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900259{
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700260 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900261}
262
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900263static void veth_ptr_free(void *ptr)
264{
265 if (veth_is_xdp_frame(ptr))
266 xdp_return_frame(veth_ptr_to_xdp(ptr));
267 else
268 kfree_skb(ptr);
269}
270
Toshiaki Makita638264d2018-08-03 16:58:18 +0900271static void __veth_xdp_flush(struct veth_rq *rq)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900272{
273 /* Write ptr_ring before reading rx_notify_masked */
274 smp_mb();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900275 if (!rq->rx_notify_masked) {
276 rq->rx_notify_masked = true;
277 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900278 }
279}
280
Toshiaki Makita638264d2018-08-03 16:58:18 +0900281static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900282{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900283 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900284 dev_kfree_skb_any(skb);
285 return NET_RX_DROP;
286 }
287
288 return NET_RX_SUCCESS;
289}
290
Toshiaki Makita638264d2018-08-03 16:58:18 +0900291static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
292 struct veth_rq *rq, bool xdp)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700293{
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900294 return __dev_forward_skb(dev, skb) ?: xdp ?
Toshiaki Makita638264d2018-08-03 16:58:18 +0900295 veth_xdp_rx(rq, skb) :
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900296 netif_rx(skb);
297}
298
Paolo Abeni47e550e2021-04-09 13:04:39 +0200299/* return true if the specified skb has chances of GRO aggregation
300 * Don't strive for accuracy, but try to avoid GRO overhead in the most
301 * common scenarios.
302 * When XDP is enabled, all traffic is considered eligible, as the xmit
303 * device has TSO off.
304 * When TSO is enabled on the xmit device, we are likely interested only
305 * in UDP aggregation, explicitly check for that if the skb is suspected
306 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
307 * to belong to locally generated UDP traffic.
308 */
309static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
310 const struct net_device *rcv,
311 const struct sk_buff *skb)
312{
313 return !(dev->features & NETIF_F_ALL_TSO) ||
314 (skb->destructor == sock_wfree &&
315 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
316}
317
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900318static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
319{
320 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900321 struct veth_rq *rq = NULL;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000322 struct net_device *rcv;
Eric Dumazet26811282012-12-29 16:02:43 +0000323 int length = skb->len;
Paolo Abenid3256ef2021-04-09 13:04:38 +0200324 bool use_napi = false;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900325 int rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700326
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000327 rcu_read_lock();
328 rcv = rcu_dereference(priv->peer);
329 if (unlikely(!rcv)) {
330 kfree_skb(skb);
331 goto drop;
332 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700333
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900334 rcv_priv = netdev_priv(rcv);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900335 rxq = skb_get_queue_mapping(skb);
336 if (rxq < rcv->real_num_rx_queues) {
337 rq = &rcv_priv->rq[rxq];
Paolo Abenid3256ef2021-04-09 13:04:38 +0200338
339 /* The napi pointer is available when an XDP program is
340 * attached or when GRO is enabled
Paolo Abeni47e550e2021-04-09 13:04:39 +0200341 * Don't bother with napi/GRO if the skb can't be aggregated
Paolo Abenid3256ef2021-04-09 13:04:38 +0200342 */
Paolo Abeni47e550e2021-04-09 13:04:39 +0200343 use_napi = rcu_access_pointer(rq->napi) &&
344 veth_skb_is_eligible_for_gro(dev, rcv, skb);
Maciej Fijalkowskiedbea922021-03-03 16:29:03 +0100345 skb_record_rx_queue(skb, rxq);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900346 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900347
Michael Walleaa4e6892018-08-29 17:24:11 +0200348 skb_tx_timestamp(skb);
Paolo Abenid3256ef2021-04-09 13:04:38 +0200349 if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
350 if (!use_napi)
Eric Dumazetb4fba472019-11-07 16:27:17 -0800351 dev_lstats_add(dev, length);
Eric Dumazet26811282012-12-29 16:02:43 +0000352 } else {
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000353drop:
Eric Dumazet26811282012-12-29 16:02:43 +0000354 atomic64_inc(&priv->dropped);
355 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900356
Paolo Abenid3256ef2021-04-09 13:04:38 +0200357 if (use_napi)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900358 __veth_xdp_flush(rq);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900359
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000360 rcu_read_unlock();
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900361
Patrick McHardy6ed10652009-06-23 06:03:08 +0000362 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700363}
364
Eric Dumazetb4fba472019-11-07 16:27:17 -0800365static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700366{
Eric Dumazetcf05c702011-06-19 22:48:34 -0700367 struct veth_priv *priv = netdev_priv(dev);
David S. Miller11687a12009-06-25 02:45:42 -0700368
Eric Dumazetb4fba472019-11-07 16:27:17 -0800369 dev_lstats_read(dev, packets, bytes);
Eric Dumazet26811282012-12-29 16:02:43 +0000370 return atomic64_read(&priv->dropped);
371}
372
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100373static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
Toshiaki Makita4195e542018-10-11 18:36:49 +0900374{
375 struct veth_priv *priv = netdev_priv(dev);
376 int i;
377
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100378 result->peer_tq_xdp_xmit_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900379 result->xdp_packets = 0;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100380 result->xdp_tx_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900381 result->xdp_bytes = 0;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100382 result->rx_drops = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900383 for (i = 0; i < dev->num_rx_queues; i++) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100384 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900385 struct veth_rq_stats *stats = &priv->rq[i].stats;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900386 unsigned int start;
387
388 do {
389 start = u64_stats_fetch_begin_irq(&stats->syncp);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100390 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100391 xdp_tx_err = stats->vs.xdp_tx_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100392 packets = stats->vs.xdp_packets;
393 bytes = stats->vs.xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100394 drops = stats->vs.rx_drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900395 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100396 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100397 result->xdp_tx_err += xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900398 result->xdp_packets += packets;
399 result->xdp_bytes += bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100400 result->rx_drops += drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900401 }
402}
403
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800404static void veth_get_stats64(struct net_device *dev,
405 struct rtnl_link_stats64 *tot)
Eric Dumazet26811282012-12-29 16:02:43 +0000406{
407 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000408 struct net_device *peer;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100409 struct veth_stats rx;
Eric Dumazetb4fba472019-11-07 16:27:17 -0800410 u64 packets, bytes;
Eric Dumazet26811282012-12-29 16:02:43 +0000411
Eric Dumazetb4fba472019-11-07 16:27:17 -0800412 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
413 tot->tx_bytes = bytes;
414 tot->tx_packets = packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900415
416 veth_stats_rx(&rx, dev);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100417 tot->tx_dropped += rx.xdp_tx_err;
418 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900419 tot->rx_bytes = rx.xdp_bytes;
420 tot->rx_packets = rx.xdp_packets;
Eric Dumazet26811282012-12-29 16:02:43 +0000421
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000422 rcu_read_lock();
423 peer = rcu_dereference(priv->peer);
424 if (peer) {
Jiang Lidonge25d5db2020-03-04 09:49:29 +0800425 veth_stats_tx(peer, &packets, &bytes);
Eric Dumazetb4fba472019-11-07 16:27:17 -0800426 tot->rx_bytes += bytes;
427 tot->rx_packets += packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900428
429 veth_stats_rx(&rx, peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100430 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
431 tot->rx_dropped += rx.xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900432 tot->tx_bytes += rx.xdp_bytes;
433 tot->tx_packets += rx.xdp_packets;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000434 }
435 rcu_read_unlock();
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700436}
437
Gao feng5c70ef82013-10-04 16:52:24 +0800438/* fake multicast ability */
439static void veth_set_multicast_list(struct net_device *dev)
440{
441}
442
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900443static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
444 int buflen)
445{
446 struct sk_buff *skb;
447
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900448 skb = build_skb(head, buflen);
449 if (!skb)
450 return NULL;
451
452 skb_reserve(skb, headroom);
453 skb_put(skb, len);
454
455 return skb;
456}
457
Toshiaki Makita638264d2018-08-03 16:58:18 +0900458static int veth_select_rxq(struct net_device *dev)
459{
460 return smp_processor_id() % dev->real_num_rx_queues;
461}
462
Daniel Borkmann9aa12062020-10-11 01:40:02 +0200463static struct net_device *veth_peer_dev(struct net_device *dev)
464{
465 struct veth_priv *priv = netdev_priv(dev);
466
467 /* Callers must be under RCU read side. */
468 return rcu_dereference(priv->peer);
469}
470
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900471static int veth_xdp_xmit(struct net_device *dev, int n,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100472 struct xdp_frame **frames,
473 u32 flags, bool ndo_xmit)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900474{
475 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100476 int i, ret = -ENXIO, nxmit = 0;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900477 struct net_device *rcv;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100478 unsigned int max_len;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900479 struct veth_rq *rq;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900480
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100481 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100482 return -EINVAL;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900483
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100484 rcu_read_lock();
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900485 rcv = rcu_dereference(priv->peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100486 if (unlikely(!rcv))
487 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900488
489 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100490 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toke Høiland-Jørgensen0e672f32021-04-16 17:47:45 +0200491 /* The napi pointer is set if NAPI is enabled, which ensures that
492 * xdp_ring is initialized on receive side and the peer device is up.
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900493 */
Toke Høiland-Jørgensen0e672f32021-04-16 17:47:45 +0200494 if (!rcu_access_pointer(rq->napi))
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100495 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900496
497 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
498
Toshiaki Makita638264d2018-08-03 16:58:18 +0900499 spin_lock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900500 for (i = 0; i < n; i++) {
501 struct xdp_frame *frame = frames[i];
502 void *ptr = veth_xdp_to_ptr(frame);
503
504 if (unlikely(frame->len > max_len ||
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100505 __ptr_ring_produce(&rq->xdp_ring, ptr)))
506 break;
507 nxmit++;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900508 }
Toshiaki Makita638264d2018-08-03 16:58:18 +0900509 spin_unlock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900510
511 if (flags & XDP_XMIT_FLUSH)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900512 __veth_xdp_flush(rq);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900513
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100514 ret = nxmit;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100515 if (ndo_xmit) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100516 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100517 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
518 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100519 u64_stats_update_end(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100520 }
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100521
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100522out:
John Fastabendb23bfa52020-01-26 16:14:02 -0800523 rcu_read_unlock();
Toshiaki Makita21314792018-10-11 18:36:48 +0900524
525 return ret;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900526}
527
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100528static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
529 struct xdp_frame **frames, u32 flags)
530{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100531 int err;
532
533 err = veth_xdp_xmit(dev, n, frames, flags, true);
534 if (err < 0) {
535 struct veth_priv *priv = netdev_priv(dev);
536
537 atomic64_add(n, &priv->dropped);
538 }
539
540 return err;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100541}
542
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100543static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900544{
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100545 int sent, i, err = 0, drops;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900546
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100547 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900548 if (sent < 0) {
549 err = sent;
550 sent = 0;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900551 }
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100552
553 for (i = sent; unlikely(i < bq->count); i++)
554 xdp_return_frame(bq->q[i]);
555
556 drops = bq->count - sent;
557 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900558
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100559 u64_stats_update_begin(&rq->stats.syncp);
560 rq->stats.vs.xdp_tx += sent;
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100561 rq->stats.vs.xdp_tx_err += drops;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100562 u64_stats_update_end(&rq->stats.syncp);
563
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900564 bq->count = 0;
565}
566
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100567static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900568{
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100569 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900570 struct net_device *rcv;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100571 struct veth_rq *rcv_rq;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900572
573 rcu_read_lock();
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100574 veth_xdp_flush_bq(rq, bq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900575 rcv = rcu_dereference(priv->peer);
576 if (unlikely(!rcv))
577 goto out;
578
579 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100580 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toshiaki Makitad1396002018-08-03 16:58:17 +0900581 /* xdp_ring is initialized on receive side? */
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100582 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
Toshiaki Makitad1396002018-08-03 16:58:17 +0900583 goto out;
584
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100585 __veth_xdp_flush(rcv_rq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900586out:
587 rcu_read_unlock();
588}
589
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100590static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900591 struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900592{
Lorenzo Bianconi1b698fa2020-05-28 22:47:29 +0200593 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900594
595 if (unlikely(!frame))
596 return -EOVERFLOW;
597
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900598 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100599 veth_xdp_flush_bq(rq, bq);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900600
601 bq->q[bq->count++] = frame;
602
603 return 0;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900604}
605
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100606static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
607 struct xdp_frame *frame,
608 struct veth_xdp_tx_bq *bq,
609 struct veth_stats *stats)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900610{
Toshiaki Makitad1396002018-08-03 16:58:17 +0900611 struct xdp_frame orig_frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900612 struct bpf_prog *xdp_prog;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900613
614 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900615 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900616 if (likely(xdp_prog)) {
617 struct xdp_buff xdp;
618 u32 act;
619
Lorenzo Bianconifc379872020-05-28 22:47:28 +0200620 xdp_convert_frame_to_buff(frame, &xdp);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900621 xdp.rxq = &rq->xdp_rxq;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900622
623 act = bpf_prog_run_xdp(xdp_prog, &xdp);
624
625 switch (act) {
626 case XDP_PASS:
Lorenzo Bianconi89f479f2021-01-12 19:26:13 +0100627 if (xdp_update_frame_from_buff(&xdp, frame))
628 goto err_xdp;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900629 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900630 case XDP_TX:
631 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900632 xdp.rxq->mem = frame->mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100633 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900634 trace_xdp_exception(rq->dev, xdp_prog, act);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900635 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100636 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900637 goto err_xdp;
638 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100639 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900640 rcu_read_unlock();
641 goto xdp_xmit;
642 case XDP_REDIRECT:
643 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900644 xdp.rxq->mem = frame->mem;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900645 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
Toshiaki Makitad1396002018-08-03 16:58:17 +0900646 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100647 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900648 goto err_xdp;
649 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100650 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900651 rcu_read_unlock();
652 goto xdp_xmit;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900653 default:
654 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500655 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900656 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900657 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500658 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900659 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100660 stats->xdp_drops++;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900661 goto err_xdp;
662 }
663 }
664 rcu_read_unlock();
665
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100666 return frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900667err_xdp:
668 rcu_read_unlock();
669 xdp_return_frame(frame);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900670xdp_xmit:
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900671 return NULL;
672}
673
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100674/* frames array contains VETH_XDP_BATCH at most */
675static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
676 int n_xdpf, struct veth_xdp_tx_bq *bq,
677 struct veth_stats *stats)
678{
679 void *skbs[VETH_XDP_BATCH];
680 int i;
681
682 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
683 GFP_ATOMIC | __GFP_ZERO) < 0) {
684 for (i = 0; i < n_xdpf; i++)
685 xdp_return_frame(frames[i]);
686 stats->rx_drops += n_xdpf;
687
688 return;
689 }
690
691 for (i = 0; i < n_xdpf; i++) {
692 struct sk_buff *skb = skbs[i];
693
694 skb = __xdp_build_skb_from_frame(frames[i], skb,
695 rq->dev);
696 if (!skb) {
697 xdp_return_frame(frames[i]);
698 stats->rx_drops++;
699 continue;
700 }
701 napi_gro_receive(&rq->xdp_napi, skb);
702 }
703}
704
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100705static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
706 struct sk_buff *skb,
707 struct veth_xdp_tx_bq *bq,
708 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900709{
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100710 u32 pktlen, headroom, act, metalen, frame_sz;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900711 void *orig_data, *orig_data_end;
712 struct bpf_prog *xdp_prog;
713 int mac_len, delta, off;
714 struct xdp_buff xdp;
715
Paolo Abenid504fff2021-07-28 18:24:04 +0200716 skb_prepare_for_gro(skb);
Toshiaki Makita4bf9ffa2018-09-14 13:33:44 +0900717
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900718 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900719 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900720 if (unlikely(!xdp_prog)) {
721 rcu_read_unlock();
722 goto out;
723 }
724
725 mac_len = skb->data - skb_mac_header(skb);
726 pktlen = skb->len + mac_len;
727 headroom = skb_headroom(skb) - mac_len;
728
729 if (skb_shared(skb) || skb_head_is_locked(skb) ||
730 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
731 struct sk_buff *nskb;
732 int size, head_off;
733 void *head, *start;
734 struct page *page;
735
736 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
737 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
738 if (size > PAGE_SIZE)
739 goto drop;
740
741 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
742 if (!page)
743 goto drop;
744
745 head = page_address(page);
746 start = head + VETH_XDP_HEADROOM;
747 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
748 page_frag_free(head);
749 goto drop;
750 }
751
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200752 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
753 skb->len, PAGE_SIZE);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900754 if (!nskb) {
755 page_frag_free(head);
756 goto drop;
757 }
758
759 skb_copy_header(nskb, skb);
760 head_off = skb_headroom(nskb) - skb_headroom(skb);
761 skb_headers_offset_update(nskb, head_off);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900762 consume_skb(skb);
763 skb = nskb;
764 }
765
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200766 /* SKB "head" area always have tailroom for skb_shared_info */
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100767 frame_sz = skb_end_pointer(skb) - skb->head;
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100768 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
769 xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100770 xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200771
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900772 orig_data = xdp.data;
773 orig_data_end = xdp.data_end;
774
775 act = bpf_prog_run_xdp(xdp_prog, &xdp);
776
777 switch (act) {
778 case XDP_PASS:
779 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900780 case XDP_TX:
781 get_page(virt_to_page(xdp.data));
782 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900783 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100784 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900785 trace_xdp_exception(rq->dev, xdp_prog, act);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100786 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900787 goto err_xdp;
788 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100789 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900790 rcu_read_unlock();
791 goto xdp_xmit;
792 case XDP_REDIRECT:
793 get_page(virt_to_page(xdp.data));
794 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900795 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100796 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
797 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900798 goto err_xdp;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100799 }
800 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900801 rcu_read_unlock();
802 goto xdp_xmit;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900803 default:
804 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500805 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900806 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900807 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500808 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900809 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100810 stats->xdp_drops++;
811 goto xdp_drop;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900812 }
813 rcu_read_unlock();
814
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200815 /* check if bpf_xdp_adjust_head was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900816 delta = orig_data - xdp.data;
817 off = mac_len + delta;
818 if (off > 0)
819 __skb_push(skb, off);
820 else if (off < 0)
821 __skb_pull(skb, -off);
822 skb->mac_header -= delta;
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200823
824 /* check if bpf_xdp_adjust_tail was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900825 off = xdp.data_end - orig_data_end;
826 if (off != 0)
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200827 __skb_put(skb, off); /* positive on grow, negative on shrink */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900828 skb->protocol = eth_type_trans(skb, rq->dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900829
830 metalen = xdp.data - xdp.data_meta;
831 if (metalen)
832 skb_metadata_set(skb, metalen);
833out:
834 return skb;
835drop:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100836 stats->rx_drops++;
837xdp_drop:
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900838 rcu_read_unlock();
839 kfree_skb(skb);
840 return NULL;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900841err_xdp:
842 rcu_read_unlock();
843 page_frag_free(xdp.data);
844xdp_xmit:
845 return NULL;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900846}
847
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100848static int veth_xdp_rcv(struct veth_rq *rq, int budget,
849 struct veth_xdp_tx_bq *bq,
850 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900851{
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100852 int i, done = 0, n_xdpf = 0;
853 void *xdpf[VETH_XDP_BATCH];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900854
855 for (i = 0; i < budget; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900856 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900857
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900858 if (!ptr)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900859 break;
860
Toshiaki Makitad1396002018-08-03 16:58:17 +0900861 if (veth_is_xdp_frame(ptr)) {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100862 /* ndo_xdp_xmit */
Toshiaki Makita4195e542018-10-11 18:36:49 +0900863 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
864
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100865 stats->xdp_bytes += frame->len;
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100866 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
867 if (frame) {
868 /* XDP_PASS */
869 xdpf[n_xdpf++] = frame;
870 if (n_xdpf == VETH_XDP_BATCH) {
871 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
872 bq, stats);
873 n_xdpf = 0;
874 }
875 }
Toshiaki Makitad1396002018-08-03 16:58:17 +0900876 } else {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100877 /* ndo_start_xmit */
878 struct sk_buff *skb = ptr;
879
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100880 stats->xdp_bytes += skb->len;
881 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100882 if (skb)
883 napi_gro_receive(&rq->xdp_napi, skb);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900884 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900885 done++;
886 }
887
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100888 if (n_xdpf)
889 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
890
Toshiaki Makita4195e542018-10-11 18:36:49 +0900891 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100892 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100893 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100894 rq->stats.vs.xdp_drops += stats->xdp_drops;
895 rq->stats.vs.rx_drops += stats->rx_drops;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100896 rq->stats.vs.xdp_packets += done;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900897 u64_stats_update_end(&rq->stats.syncp);
898
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900899 return done;
900}
901
902static int veth_poll(struct napi_struct *napi, int budget)
903{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900904 struct veth_rq *rq =
905 container_of(napi, struct veth_rq, xdp_napi);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100906 struct veth_stats stats = {};
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900907 struct veth_xdp_tx_bq bq;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900908 int done;
909
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900910 bq.count = 0;
911
Toshiaki Makitad1396002018-08-03 16:58:17 +0900912 xdp_set_return_frame_no_direct();
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100913 done = veth_xdp_rcv(rq, budget, &bq, &stats);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900914
915 if (done < budget && napi_complete_done(napi, done)) {
916 /* Write rx_notify_masked before reading ptr_ring */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900917 smp_store_mb(rq->rx_notify_masked, false);
918 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
919 rq->rx_notify_masked = true;
920 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900921 }
922 }
923
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100924 if (stats.xdp_tx > 0)
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100925 veth_xdp_flush(rq, &bq);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100926 if (stats.xdp_redirect > 0)
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100927 xdp_do_flush();
Toshiaki Makitad1396002018-08-03 16:58:17 +0900928 xdp_clear_return_frame_no_direct();
929
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900930 return done;
931}
932
Paolo Abenidedd53c2021-07-20 10:41:49 +0200933static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900934{
935 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900936 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900937
Paolo Abenidedd53c2021-07-20 10:41:49 +0200938 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900939 struct veth_rq *rq = &priv->rq[i];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900940
Toshiaki Makita638264d2018-08-03 16:58:18 +0900941 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
942 if (err)
943 goto err_xdp_ring;
944 }
945
Paolo Abenidedd53c2021-07-20 10:41:49 +0200946 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900947 struct veth_rq *rq = &priv->rq[i];
948
Toshiaki Makita638264d2018-08-03 16:58:18 +0900949 napi_enable(&rq->xdp_napi);
Paolo Abenid3256ef2021-04-09 13:04:38 +0200950 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900951 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900952
953 return 0;
Paolo Abenidedd53c2021-07-20 10:41:49 +0200954
Toshiaki Makita638264d2018-08-03 16:58:18 +0900955err_xdp_ring:
Paolo Abenidedd53c2021-07-20 10:41:49 +0200956 for (i--; i >= start; i--)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900957 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
958
959 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900960}
961
Paolo Abenidedd53c2021-07-20 10:41:49 +0200962static int __veth_napi_enable(struct net_device *dev)
963{
964 return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
965}
966
967static void veth_napi_del_range(struct net_device *dev, int start, int end)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900968{
969 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900970 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900971
Paolo Abenidedd53c2021-07-20 10:41:49 +0200972 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900973 struct veth_rq *rq = &priv->rq[i];
974
Paolo Abenid3256ef2021-04-09 13:04:38 +0200975 rcu_assign_pointer(priv->rq[i].napi, NULL);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900976 napi_disable(&rq->xdp_napi);
Jakub Kicinski5198d5452020-09-09 10:37:51 -0700977 __netif_napi_del(&rq->xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900978 }
979 synchronize_net();
980
Paolo Abenidedd53c2021-07-20 10:41:49 +0200981 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900982 struct veth_rq *rq = &priv->rq[i];
983
Toshiaki Makita638264d2018-08-03 16:58:18 +0900984 rq->rx_notify_masked = false;
985 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
986 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900987}
988
Paolo Abenidedd53c2021-07-20 10:41:49 +0200989static void veth_napi_del(struct net_device *dev)
990{
991 veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
992}
993
Paolo Abenid3256ef2021-04-09 13:04:38 +0200994static bool veth_gro_requested(const struct net_device *dev)
995{
996 return !!(dev->wanted_features & NETIF_F_GRO);
997}
998
Paolo Abenidedd53c2021-07-20 10:41:49 +0200999static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
1000 bool napi_already_on)
1001{
1002 struct veth_priv *priv = netdev_priv(dev);
1003 int err, i;
1004
1005 for (i = start; i < end; i++) {
1006 struct veth_rq *rq = &priv->rq[i];
1007
1008 if (!napi_already_on)
1009 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1010 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1011 if (err < 0)
1012 goto err_rxq_reg;
1013
1014 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1015 MEM_TYPE_PAGE_SHARED,
1016 NULL);
1017 if (err < 0)
1018 goto err_reg_mem;
1019
1020 /* Save original mem info as it can be overwritten */
1021 rq->xdp_mem = rq->xdp_rxq.mem;
1022 }
1023 return 0;
1024
1025err_reg_mem:
1026 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1027err_rxq_reg:
1028 for (i--; i >= start; i--) {
1029 struct veth_rq *rq = &priv->rq[i];
1030
1031 xdp_rxq_info_unreg(&rq->xdp_rxq);
1032 if (!napi_already_on)
1033 netif_napi_del(&rq->xdp_napi);
1034 }
1035
1036 return err;
1037}
1038
1039static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1040 bool delete_napi)
1041{
1042 struct veth_priv *priv = netdev_priv(dev);
1043 int i;
1044
1045 for (i = start; i < end; i++) {
1046 struct veth_rq *rq = &priv->rq[i];
1047
1048 rq->xdp_rxq.mem = rq->xdp_mem;
1049 xdp_rxq_info_unreg(&rq->xdp_rxq);
1050
1051 if (delete_napi)
1052 netif_napi_del(&rq->xdp_napi);
1053 }
1054}
1055
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001056static int veth_enable_xdp(struct net_device *dev)
1057{
Paolo Abenid3256ef2021-04-09 13:04:38 +02001058 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001059 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +09001060 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001061
Toshiaki Makita638264d2018-08-03 16:58:18 +09001062 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
Paolo Abenidedd53c2021-07-20 10:41:49 +02001063 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1064 if (err)
1065 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001066
Paolo Abenid3256ef2021-04-09 13:04:38 +02001067 if (!napi_already_on) {
1068 err = __veth_napi_enable(dev);
Paolo Abenidedd53c2021-07-20 10:41:49 +02001069 if (err) {
1070 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1071 return err;
1072 }
Paolo Abenid3256ef2021-04-09 13:04:38 +02001073
1074 if (!veth_gro_requested(dev)) {
1075 /* user-space did not require GRO, but adding XDP
1076 * is supposed to get GRO working
1077 */
1078 dev->features |= NETIF_F_GRO;
1079 netdev_features_change(dev);
1080 }
1081 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001082 }
1083
Paolo Abenid3256ef2021-04-09 13:04:38 +02001084 for (i = 0; i < dev->real_num_rx_queues; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +09001085 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001086 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1087 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001088
1089 return 0;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001090}
1091
1092static void veth_disable_xdp(struct net_device *dev)
1093{
1094 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +09001095 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001096
Toshiaki Makita638264d2018-08-03 16:58:18 +09001097 for (i = 0; i < dev->real_num_rx_queues; i++)
1098 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001099
1100 if (!netif_running(dev) || !veth_gro_requested(dev)) {
1101 veth_napi_del(dev);
1102
1103 /* if user-space did not require GRO, since adding XDP
1104 * enabled it, clear it now
1105 */
1106 if (!veth_gro_requested(dev) && netif_running(dev)) {
1107 dev->features &= ~NETIF_F_GRO;
1108 netdev_features_change(dev);
1109 }
1110 }
1111
Paolo Abenidedd53c2021-07-20 10:41:49 +02001112 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001113}
1114
Paolo Abenidedd53c2021-07-20 10:41:49 +02001115static int veth_napi_enable_range(struct net_device *dev, int start, int end)
Paolo Abenid3256ef2021-04-09 13:04:38 +02001116{
1117 struct veth_priv *priv = netdev_priv(dev);
1118 int err, i;
1119
Paolo Abenidedd53c2021-07-20 10:41:49 +02001120 for (i = start; i < end; i++) {
Paolo Abenid3256ef2021-04-09 13:04:38 +02001121 struct veth_rq *rq = &priv->rq[i];
1122
1123 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1124 }
1125
Paolo Abenidedd53c2021-07-20 10:41:49 +02001126 err = __veth_napi_enable_range(dev, start, end);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001127 if (err) {
Paolo Abenidedd53c2021-07-20 10:41:49 +02001128 for (i = start; i < end; i++) {
Paolo Abenid3256ef2021-04-09 13:04:38 +02001129 struct veth_rq *rq = &priv->rq[i];
1130
1131 netif_napi_del(&rq->xdp_napi);
1132 }
1133 return err;
1134 }
1135 return err;
1136}
1137
Paolo Abenidedd53c2021-07-20 10:41:49 +02001138static int veth_napi_enable(struct net_device *dev)
1139{
1140 return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1141}
1142
Paolo Abeni4752eeb2021-07-20 10:41:50 +02001143static void veth_disable_range_safe(struct net_device *dev, int start, int end)
1144{
1145 struct veth_priv *priv = netdev_priv(dev);
1146
1147 if (start >= end)
1148 return;
1149
1150 if (priv->_xdp_prog) {
1151 veth_napi_del_range(dev, start, end);
1152 veth_disable_xdp_range(dev, start, end, false);
1153 } else if (veth_gro_requested(dev)) {
1154 veth_napi_del_range(dev, start, end);
1155 }
1156}
1157
1158static int veth_enable_range_safe(struct net_device *dev, int start, int end)
1159{
1160 struct veth_priv *priv = netdev_priv(dev);
1161 int err;
1162
1163 if (start >= end)
1164 return 0;
1165
1166 if (priv->_xdp_prog) {
1167 /* these channels are freshly initialized, napi is not on there even
1168 * when GRO is requeste
1169 */
1170 err = veth_enable_xdp_range(dev, start, end, false);
1171 if (err)
1172 return err;
1173
1174 err = __veth_napi_enable_range(dev, start, end);
1175 if (err) {
1176 /* on error always delete the newly added napis */
1177 veth_disable_xdp_range(dev, start, end, true);
1178 return err;
1179 }
1180 } else if (veth_gro_requested(dev)) {
1181 return veth_napi_enable_range(dev, start, end);
1182 }
1183 return 0;
1184}
1185
1186static int veth_set_channels(struct net_device *dev,
1187 struct ethtool_channels *ch)
1188{
1189 struct veth_priv *priv = netdev_priv(dev);
1190 unsigned int old_rx_count, new_rx_count;
1191 struct veth_priv *peer_priv;
1192 struct net_device *peer;
1193 int err;
1194
1195 /* sanity check. Upper bounds are already enforced by the caller */
1196 if (!ch->rx_count || !ch->tx_count)
1197 return -EINVAL;
1198
1199 /* avoid braking XDP, if that is enabled */
1200 peer = rtnl_dereference(priv->peer);
1201 peer_priv = peer ? netdev_priv(peer) : NULL;
1202 if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
1203 return -EINVAL;
1204
1205 if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
1206 return -EINVAL;
1207
1208 old_rx_count = dev->real_num_rx_queues;
1209 new_rx_count = ch->rx_count;
1210 if (netif_running(dev)) {
1211 /* turn device off */
1212 netif_carrier_off(dev);
1213 if (peer)
1214 netif_carrier_off(peer);
1215
1216 /* try to allocate new resurces, as needed*/
1217 err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
1218 if (err)
1219 goto out;
1220 }
1221
1222 err = netif_set_real_num_rx_queues(dev, ch->rx_count);
1223 if (err)
1224 goto revert;
1225
1226 err = netif_set_real_num_tx_queues(dev, ch->tx_count);
1227 if (err) {
1228 int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
1229
1230 /* this error condition could happen only if rx and tx change
1231 * in opposite directions (e.g. tx nr raises, rx nr decreases)
1232 * and we can't do anything to fully restore the original
1233 * status
1234 */
1235 if (err2)
1236 pr_warn("Can't restore rx queues config %d -> %d %d",
1237 new_rx_count, old_rx_count, err2);
1238 else
1239 goto revert;
1240 }
1241
1242out:
1243 if (netif_running(dev)) {
1244 /* note that we need to swap the arguments WRT the enable part
1245 * to identify the range we have to disable
1246 */
1247 veth_disable_range_safe(dev, new_rx_count, old_rx_count);
1248 netif_carrier_on(dev);
1249 if (peer)
1250 netif_carrier_on(peer);
1251 }
1252 return err;
1253
1254revert:
1255 new_rx_count = old_rx_count;
1256 old_rx_count = ch->rx_count;
1257 goto out;
1258}
1259
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001260static int veth_open(struct net_device *dev)
1261{
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001262 struct veth_priv *priv = netdev_priv(dev);
1263 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001264 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001265
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001266 if (!peer)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001267 return -ENOTCONN;
1268
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001269 if (priv->_xdp_prog) {
1270 err = veth_enable_xdp(dev);
1271 if (err)
1272 return err;
Paolo Abenid3256ef2021-04-09 13:04:38 +02001273 } else if (veth_gro_requested(dev)) {
1274 err = veth_napi_enable(dev);
1275 if (err)
1276 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001277 }
1278
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001279 if (peer->flags & IFF_UP) {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001280 netif_carrier_on(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001281 netif_carrier_on(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001282 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001283
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001284 return 0;
1285}
1286
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001287static int veth_close(struct net_device *dev)
1288{
1289 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001290 struct net_device *peer = rtnl_dereference(priv->peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001291
1292 netif_carrier_off(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001293 if (peer)
1294 netif_carrier_off(peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001295
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001296 if (priv->_xdp_prog)
1297 veth_disable_xdp(dev);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001298 else if (veth_gro_requested(dev))
1299 veth_napi_del(dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001300
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001301 return 0;
1302}
1303
Jarod Wilson91572082016-10-20 13:55:20 -04001304static int is_valid_veth_mtu(int mtu)
Eric Biederman38d40812009-03-03 23:36:04 -08001305{
Jarod Wilson91572082016-10-20 13:55:20 -04001306 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
Eric Biederman38d40812009-03-03 23:36:04 -08001307}
1308
Toshiaki Makita7797b932018-08-15 17:07:29 +09001309static int veth_alloc_queues(struct net_device *dev)
1310{
1311 struct veth_priv *priv = netdev_priv(dev);
1312 int i;
1313
1314 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1315 if (!priv->rq)
1316 return -ENOMEM;
1317
Toshiaki Makita4195e542018-10-11 18:36:49 +09001318 for (i = 0; i < dev->num_rx_queues; i++) {
Toshiaki Makita7797b932018-08-15 17:07:29 +09001319 priv->rq[i].dev = dev;
Toshiaki Makita4195e542018-10-11 18:36:49 +09001320 u64_stats_init(&priv->rq[i].stats.syncp);
1321 }
Toshiaki Makita7797b932018-08-15 17:07:29 +09001322
1323 return 0;
1324}
1325
1326static void veth_free_queues(struct net_device *dev)
1327{
1328 struct veth_priv *priv = netdev_priv(dev);
1329
1330 kfree(priv->rq);
1331}
1332
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001333static int veth_dev_init(struct net_device *dev)
1334{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001335 int err;
1336
Li RongQing14d73412018-09-17 18:46:55 +08001337 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1338 if (!dev->lstats)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001339 return -ENOMEM;
Toshiaki Makita7797b932018-08-15 17:07:29 +09001340
1341 err = veth_alloc_queues(dev);
1342 if (err) {
Li RongQing14d73412018-09-17 18:46:55 +08001343 free_percpu(dev->lstats);
Toshiaki Makita7797b932018-08-15 17:07:29 +09001344 return err;
1345 }
1346
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001347 return 0;
1348}
1349
David S. Miller11687a12009-06-25 02:45:42 -07001350static void veth_dev_free(struct net_device *dev)
1351{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001352 veth_free_queues(dev);
Li RongQing14d73412018-09-17 18:46:55 +08001353 free_percpu(dev->lstats);
David S. Miller11687a12009-06-25 02:45:42 -07001354}
1355
WANG Congbb446c12014-06-23 15:36:02 -07001356#ifdef CONFIG_NET_POLL_CONTROLLER
1357static void veth_poll_controller(struct net_device *dev)
1358{
1359 /* veth only receives frames when its peer sends one
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001360 * Since it has nothing to do with disabling irqs, we are guaranteed
WANG Congbb446c12014-06-23 15:36:02 -07001361 * never to have pending data when we poll for it so
1362 * there is nothing to do here.
1363 *
1364 * We need this though so netpoll recognizes us as an interface that
1365 * supports polling, which enables bridge devices in virt setups to
1366 * still use netconsole
1367 */
1368}
1369#endif /* CONFIG_NET_POLL_CONTROLLER */
1370
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001371static int veth_get_iflink(const struct net_device *dev)
1372{
1373 struct veth_priv *priv = netdev_priv(dev);
1374 struct net_device *peer;
1375 int iflink;
1376
1377 rcu_read_lock();
1378 peer = rcu_dereference(priv->peer);
1379 iflink = peer ? peer->ifindex : 0;
1380 rcu_read_unlock();
1381
1382 return iflink;
1383}
1384
Toshiaki Makitadc224822018-08-03 16:58:11 +09001385static netdev_features_t veth_fix_features(struct net_device *dev,
1386 netdev_features_t features)
1387{
1388 struct veth_priv *priv = netdev_priv(dev);
1389 struct net_device *peer;
1390
1391 peer = rtnl_dereference(priv->peer);
1392 if (peer) {
1393 struct veth_priv *peer_priv = netdev_priv(peer);
1394
1395 if (peer_priv->_xdp_prog)
1396 features &= ~NETIF_F_GSO_SOFTWARE;
1397 }
Paolo Abenid3256ef2021-04-09 13:04:38 +02001398 if (priv->_xdp_prog)
1399 features |= NETIF_F_GRO;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001400
1401 return features;
1402}
1403
Paolo Abenid3256ef2021-04-09 13:04:38 +02001404static int veth_set_features(struct net_device *dev,
1405 netdev_features_t features)
1406{
1407 netdev_features_t changed = features ^ dev->features;
1408 struct veth_priv *priv = netdev_priv(dev);
1409 int err;
1410
1411 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1412 return 0;
1413
1414 if (features & NETIF_F_GRO) {
1415 err = veth_napi_enable(dev);
1416 if (err)
1417 return err;
1418 } else {
1419 veth_napi_del(dev);
1420 }
1421 return 0;
1422}
1423
Paolo Abeni163e5292016-02-26 10:45:41 +01001424static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1425{
1426 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1427 struct net_device *peer;
1428
1429 if (new_hr < 0)
1430 new_hr = 0;
1431
1432 rcu_read_lock();
1433 peer = rcu_dereference(priv->peer);
1434 if (unlikely(!peer))
1435 goto out;
1436
1437 peer_priv = netdev_priv(peer);
1438 priv->requested_headroom = new_hr;
1439 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1440 dev->needed_headroom = new_hr;
1441 peer->needed_headroom = new_hr;
1442
1443out:
1444 rcu_read_unlock();
1445}
1446
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001447static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1448 struct netlink_ext_ack *extack)
1449{
1450 struct veth_priv *priv = netdev_priv(dev);
1451 struct bpf_prog *old_prog;
1452 struct net_device *peer;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001453 unsigned int max_mtu;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001454 int err;
1455
1456 old_prog = priv->_xdp_prog;
1457 priv->_xdp_prog = prog;
1458 peer = rtnl_dereference(priv->peer);
1459
1460 if (prog) {
1461 if (!peer) {
1462 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1463 err = -ENOTCONN;
1464 goto err;
1465 }
1466
Toshiaki Makitadc224822018-08-03 16:58:11 +09001467 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1468 peer->hard_header_len -
1469 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1470 if (peer->mtu > max_mtu) {
1471 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1472 err = -ERANGE;
1473 goto err;
1474 }
1475
Toshiaki Makita638264d2018-08-03 16:58:18 +09001476 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1477 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1478 err = -ENOSPC;
1479 goto err;
1480 }
1481
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001482 if (dev->flags & IFF_UP) {
1483 err = veth_enable_xdp(dev);
1484 if (err) {
1485 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1486 goto err;
1487 }
1488 }
Toshiaki Makitadc224822018-08-03 16:58:11 +09001489
1490 if (!old_prog) {
1491 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1492 peer->max_mtu = max_mtu;
1493 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001494 }
1495
1496 if (old_prog) {
Toshiaki Makitadc224822018-08-03 16:58:11 +09001497 if (!prog) {
1498 if (dev->flags & IFF_UP)
1499 veth_disable_xdp(dev);
1500
1501 if (peer) {
1502 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1503 peer->max_mtu = ETH_MAX_MTU;
1504 }
1505 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001506 bpf_prog_put(old_prog);
1507 }
1508
Toshiaki Makitadc224822018-08-03 16:58:11 +09001509 if ((!!old_prog ^ !!prog) && peer)
1510 netdev_update_features(peer);
1511
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001512 return 0;
1513err:
1514 priv->_xdp_prog = old_prog;
1515
1516 return err;
1517}
1518
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001519static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1520{
1521 switch (xdp->command) {
1522 case XDP_SETUP_PROG:
1523 return veth_xdp_set(dev, xdp->prog, xdp->extack);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001524 default:
1525 return -EINVAL;
1526 }
1527}
1528
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001529static const struct net_device_ops veth_netdev_ops = {
Daniel Lezcanoee923622009-02-22 00:04:45 -08001530 .ndo_init = veth_dev_init,
1531 .ndo_open = veth_open,
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001532 .ndo_stop = veth_close,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001533 .ndo_start_xmit = veth_xmit,
stephen hemminger6311cc42011-06-08 14:53:59 +00001534 .ndo_get_stats64 = veth_get_stats64,
Gao feng5c70ef82013-10-04 16:52:24 +08001535 .ndo_set_rx_mode = veth_set_multicast_list,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001536 .ndo_set_mac_address = eth_mac_addr,
WANG Congbb446c12014-06-23 15:36:02 -07001537#ifdef CONFIG_NET_POLL_CONTROLLER
1538 .ndo_poll_controller = veth_poll_controller,
1539#endif
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001540 .ndo_get_iflink = veth_get_iflink,
Toshiaki Makitadc224822018-08-03 16:58:11 +09001541 .ndo_fix_features = veth_fix_features,
Paolo Abenid3256ef2021-04-09 13:04:38 +02001542 .ndo_set_features = veth_set_features,
Toshiaki Makita1a04a822015-07-31 15:03:25 +09001543 .ndo_features_check = passthru_features_check,
Paolo Abeni163e5292016-02-26 10:45:41 +01001544 .ndo_set_rx_headroom = veth_set_rx_headroom,
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001545 .ndo_bpf = veth_xdp,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +01001546 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
Daniel Borkmann9aa12062020-10-11 01:40:02 +02001547 .ndo_get_peer_dev = veth_peer_dev,
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001548};
1549
Alexander Duyck732912d72016-04-19 14:02:26 -04001550#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
Xin Longc80fafb2016-08-25 13:21:49 +08001551 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
Alexander Duyck732912d72016-04-19 14:02:26 -04001552 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +00001553 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1554 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
Eric Dumazet80933152012-12-29 16:26:10 +00001555
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001556static void veth_setup(struct net_device *dev)
1557{
1558 ether_setup(dev);
1559
Neil Horman550fd082011-07-26 06:05:38 +00001560 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
Hannes Frederic Sowa23ea5a92012-10-30 16:22:01 +00001561 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Phil Sutter02f01ec2015-08-18 10:30:29 +02001562 dev->priv_flags |= IFF_NO_QUEUE;
Paolo Abeni163e5292016-02-26 10:45:41 +01001563 dev->priv_flags |= IFF_PHONY_HEADROOM;
Neil Horman550fd082011-07-26 06:05:38 +00001564
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001565 dev->netdev_ops = &veth_netdev_ops;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001566 dev->ethtool_ops = &veth_ethtool_ops;
1567 dev->features |= NETIF_F_LLTX;
Eric Dumazet80933152012-12-29 16:26:10 +00001568 dev->features |= VETH_FEATURES;
Toshiaki Makita8d0d21f2014-02-18 21:20:08 +09001569 dev->vlan_features = dev->features &
Vlad Yasevich3f8c7072014-03-27 22:14:48 -04001570 ~(NETIF_F_HW_VLAN_CTAG_TX |
1571 NETIF_F_HW_VLAN_STAG_TX |
1572 NETIF_F_HW_VLAN_CTAG_RX |
1573 NETIF_F_HW_VLAN_STAG_RX);
David S. Millercf124db2017-05-08 12:52:56 -04001574 dev->needs_free_netdev = true;
1575 dev->priv_destructor = veth_dev_free;
Jarod Wilson91572082016-10-20 13:55:20 -04001576 dev->max_mtu = ETH_MAX_MTU;
Michał Mirosława2c725f2011-03-31 01:01:35 +00001577
Eric Dumazet80933152012-12-29 16:26:10 +00001578 dev->hw_features = VETH_FEATURES;
Eric Dumazet82d81892013-10-25 18:25:03 -07001579 dev->hw_enc_features = VETH_FEATURES;
David Ahern607fca92016-08-24 20:10:45 -07001580 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001581}
1582
1583/*
1584 * netlink interface
1585 */
1586
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001587static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1588 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001589{
1590 if (tb[IFLA_ADDRESS]) {
1591 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1592 return -EINVAL;
1593 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1594 return -EADDRNOTAVAIL;
1595 }
Eric Biederman38d40812009-03-03 23:36:04 -08001596 if (tb[IFLA_MTU]) {
1597 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1598 return -EINVAL;
1599 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001600 return 0;
1601}
1602
1603static struct rtnl_link_ops veth_link_ops;
1604
Paolo Abenid3256ef2021-04-09 13:04:38 +02001605static void veth_disable_gro(struct net_device *dev)
1606{
1607 dev->features &= ~NETIF_F_GRO;
1608 dev->wanted_features &= ~NETIF_F_GRO;
1609 netdev_update_features(dev);
1610}
1611
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001612static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
1613{
1614 int err;
1615
1616 if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
1617 err = netif_set_real_num_tx_queues(dev, 1);
1618 if (err)
1619 return err;
1620 }
1621 if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
1622 err = netif_set_real_num_rx_queues(dev, 1);
1623 if (err)
1624 return err;
1625 }
1626 return 0;
1627}
1628
Eric W. Biederman81adee42009-11-08 00:53:51 -08001629static int veth_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +02001630 struct nlattr *tb[], struct nlattr *data[],
1631 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001632{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001633 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001634 struct net_device *peer;
1635 struct veth_priv *priv;
1636 char ifname[IFNAMSIZ];
1637 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
Tom Gundersen55177502014-07-14 16:37:25 +02001638 unsigned char name_assign_type;
Patrick McHardy3729d502010-02-26 06:34:54 +00001639 struct ifinfomsg *ifmp;
Eric W. Biederman81adee42009-11-08 00:53:51 -08001640 struct net *net;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001641
1642 /*
1643 * create and register peer first
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001644 */
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001645 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1646 struct nlattr *nla_peer;
1647
1648 nla_peer = data[VETH_INFO_PEER];
Patrick McHardy3729d502010-02-26 06:34:54 +00001649 ifmp = nla_data(nla_peer);
Jiri Pirkof7b12602014-02-18 20:53:18 +01001650 err = rtnl_nla_parse_ifla(peer_tb,
1651 nla_data(nla_peer) + sizeof(struct ifinfomsg),
Johannes Bergfceb6432017-04-12 14:34:07 +02001652 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1653 NULL);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001654 if (err < 0)
1655 return err;
1656
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001657 err = veth_validate(peer_tb, NULL, extack);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001658 if (err < 0)
1659 return err;
1660
1661 tbp = peer_tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001662 } else {
1663 ifmp = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001664 tbp = tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001665 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001666
Serhey Popovych191cdb32017-06-21 12:12:24 +03001667 if (ifmp && tbp[IFLA_IFNAME]) {
Francis Laniel872f6902020-11-15 18:08:06 +01001668 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
Tom Gundersen55177502014-07-14 16:37:25 +02001669 name_assign_type = NET_NAME_USER;
1670 } else {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001671 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
Tom Gundersen55177502014-07-14 16:37:25 +02001672 name_assign_type = NET_NAME_ENUM;
1673 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001674
Eric W. Biederman81adee42009-11-08 00:53:51 -08001675 net = rtnl_link_get_net(src_net, tbp);
1676 if (IS_ERR(net))
1677 return PTR_ERR(net);
1678
Tom Gundersen55177502014-07-14 16:37:25 +02001679 peer = rtnl_create_link(net, ifname, name_assign_type,
David Ahernd0522f12018-11-06 12:51:14 -08001680 &veth_link_ops, tbp, extack);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001681 if (IS_ERR(peer)) {
1682 put_net(net);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001683 return PTR_ERR(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001684 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001685
Serhey Popovych191cdb32017-06-21 12:12:24 +03001686 if (!ifmp || !tbp[IFLA_ADDRESS])
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001687 eth_hw_addr_random(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001688
Pavel Emelyanove6f8f1a2012-08-08 21:53:03 +00001689 if (ifmp && (dev->ifindex != 0))
1690 peer->ifindex = ifmp->ifi_index;
1691
Eric Dumazet4b66d212021-11-19 07:43:31 -08001692 netif_set_gso_max_size(peer, dev->gso_max_size);
Stephen Hemminger72d249552017-12-07 15:40:20 -08001693 peer->gso_max_segs = dev->gso_max_segs;
1694
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001695 err = register_netdevice(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001696 put_net(net);
1697 net = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001698 if (err < 0)
1699 goto err_register_peer;
1700
Paolo Abenid3256ef2021-04-09 13:04:38 +02001701 /* keep GRO disabled by default to be consistent with the established
1702 * veth behavior
1703 */
1704 veth_disable_gro(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001705 netif_carrier_off(peer);
1706
Patrick McHardy3729d502010-02-26 06:34:54 +00001707 err = rtnl_configure_link(peer, ifmp);
1708 if (err < 0)
1709 goto err_configure_peer;
1710
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001711 /*
1712 * register dev last
1713 *
1714 * note, that since we've registered new device the dev's name
1715 * should be re-allocated
1716 */
1717
1718 if (tb[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001719 eth_hw_addr_random(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001720
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001721 if (tb[IFLA_IFNAME])
Francis Laniel872f6902020-11-15 18:08:06 +01001722 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001723 else
1724 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1725
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001726 err = register_netdevice(dev);
1727 if (err < 0)
1728 goto err_register_dev;
1729
1730 netif_carrier_off(dev);
1731
1732 /*
1733 * tie the deviced together
1734 */
1735
1736 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001737 rcu_assign_pointer(priv->peer, peer);
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001738 err = veth_init_queues(dev, tb);
1739 if (err)
1740 goto err_queues;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001741
1742 priv = netdev_priv(peer);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001743 rcu_assign_pointer(priv->peer, dev);
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001744 err = veth_init_queues(peer, tb);
1745 if (err)
1746 goto err_queues;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001747
Paolo Abenid3256ef2021-04-09 13:04:38 +02001748 veth_disable_gro(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001749 return 0;
1750
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001751err_queues:
1752 unregister_netdevice(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001753err_register_dev:
1754 /* nothing to do */
Patrick McHardy3729d502010-02-26 06:34:54 +00001755err_configure_peer:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001756 unregister_netdevice(peer);
1757 return err;
1758
1759err_register_peer:
1760 free_netdev(peer);
1761 return err;
1762}
1763
Eric Dumazet23289a32009-10-27 07:06:36 +00001764static void veth_dellink(struct net_device *dev, struct list_head *head)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001765{
1766 struct veth_priv *priv;
1767 struct net_device *peer;
1768
1769 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001770 peer = rtnl_dereference(priv->peer);
1771
1772 /* Note : dellink() is called from default_device_exit_batch(),
1773 * before a rcu_synchronize() point. The devices are guaranteed
1774 * not being freed before one RCU grace period.
1775 */
1776 RCU_INIT_POINTER(priv->peer, NULL);
Eric Dumazet24540532009-10-30 01:00:27 -07001777 unregister_netdevice_queue(dev, head);
Eric Dumazetf45a5c22013-02-08 20:10:49 +00001778
1779 if (peer) {
1780 priv = netdev_priv(peer);
1781 RCU_INIT_POINTER(priv->peer, NULL);
1782 unregister_netdevice_queue(peer, head);
1783 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001784}
1785
Thomas Graf23711432012-02-15 04:09:46 +00001786static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1787 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1788};
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001789
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001790static struct net *veth_get_link_net(const struct net_device *dev)
1791{
1792 struct veth_priv *priv = netdev_priv(dev);
1793 struct net_device *peer = rtnl_dereference(priv->peer);
1794
1795 return peer ? dev_net(peer) : dev_net(dev);
1796}
1797
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001798static unsigned int veth_get_num_queues(void)
1799{
1800 /* enforce the same queue limit as rtnl_create_link */
1801 int queues = num_possible_cpus();
1802
1803 if (queues > 4096)
1804 queues = 4096;
1805 return queues;
1806}
1807
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001808static struct rtnl_link_ops veth_link_ops = {
1809 .kind = DRV_NAME,
1810 .priv_size = sizeof(struct veth_priv),
1811 .setup = veth_setup,
1812 .validate = veth_validate,
1813 .newlink = veth_newlink,
1814 .dellink = veth_dellink,
1815 .policy = veth_policy,
1816 .maxtype = VETH_INFO_MAX,
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001817 .get_link_net = veth_get_link_net,
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001818 .get_num_tx_queues = veth_get_num_queues,
1819 .get_num_rx_queues = veth_get_num_queues,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001820};
1821
1822/*
1823 * init/fini
1824 */
1825
1826static __init int veth_init(void)
1827{
1828 return rtnl_link_register(&veth_link_ops);
1829}
1830
1831static __exit void veth_exit(void)
1832{
Patrick McHardy68365452008-01-20 17:25:14 -08001833 rtnl_link_unregister(&veth_link_ops);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001834}
1835
1836module_init(veth_init);
1837module_exit(veth_exit);
1838
1839MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1840MODULE_LICENSE("GPL v2");
1841MODULE_ALIAS_RTNL_LINK(DRV_NAME);