blob: d29fb9759cc9557d62370f016fc160f3dbd16ce3 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07002/*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070012#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070014#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
Eric Dumazetcf05c702011-06-19 22:48:34 -070016#include <linux/u64_stats_sync.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070017
Jiri Pirkof7b12602014-02-18 20:53:18 +010018#include <net/rtnetlink.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070019#include <net/dst.h>
20#include <net/xfrm.h>
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +090021#include <net/xdp.h>
Stephen Hemmingerecef9692007-12-25 17:23:59 -080022#include <linux/veth.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040023#include <linux/module.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090024#include <linux/bpf.h>
25#include <linux/filter.h>
26#include <linux/ptr_ring.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090027#include <linux/bpf_trace.h>
Michael Walleaa4e6892018-08-29 17:24:11 +020028#include <linux/net_tstamp.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070029
30#define DRV_NAME "veth"
31#define DRV_VERSION "1.0"
32
Toshiaki Makita9fc8d512018-08-03 16:58:13 +090033#define VETH_XDP_FLAG BIT(0)
Toshiaki Makita948d4f22018-08-03 16:58:10 +090034#define VETH_RING_SIZE 256
35#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
Toshiaki Makita9cda7802019-06-13 18:39:59 +090037#define VETH_XDP_TX_BULK_SIZE 16
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +010038#define VETH_XDP_BATCH 16
Toshiaki Makita9cda7802019-06-13 18:39:59 +090039
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010040struct veth_stats {
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010041 u64 rx_drops;
42 /* xdp */
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010043 u64 xdp_packets;
44 u64 xdp_bytes;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010045 u64 xdp_redirect;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010046 u64 xdp_drops;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010047 u64 xdp_tx;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +010048 u64 xdp_tx_err;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010049 u64 peer_tq_xdp_xmit;
50 u64 peer_tq_xdp_xmit_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010051};
52
Toshiaki Makita4195e542018-10-11 18:36:49 +090053struct veth_rq_stats {
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010054 struct veth_stats vs;
Toshiaki Makita4195e542018-10-11 18:36:49 +090055 struct u64_stats_sync syncp;
56};
57
Toshiaki Makita638264d2018-08-03 16:58:18 +090058struct veth_rq {
Toshiaki Makita948d4f22018-08-03 16:58:10 +090059 struct napi_struct xdp_napi;
Paolo Abenid3256ef2021-04-09 13:04:38 +020060 struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
Toshiaki Makita948d4f22018-08-03 16:58:10 +090061 struct net_device *dev;
62 struct bpf_prog __rcu *xdp_prog;
Toshiaki Makitad1396002018-08-03 16:58:17 +090063 struct xdp_mem_info xdp_mem;
Toshiaki Makita4195e542018-10-11 18:36:49 +090064 struct veth_rq_stats stats;
Toshiaki Makita948d4f22018-08-03 16:58:10 +090065 bool rx_notify_masked;
66 struct ptr_ring xdp_ring;
67 struct xdp_rxq_info xdp_rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070068};
69
Toshiaki Makita638264d2018-08-03 16:58:18 +090070struct veth_priv {
71 struct net_device __rcu *peer;
72 atomic64_t dropped;
73 struct bpf_prog *_xdp_prog;
74 struct veth_rq *rq;
75 unsigned int requested_headroom;
76};
77
Toshiaki Makita9cda7802019-06-13 18:39:59 +090078struct veth_xdp_tx_bq {
79 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
80 unsigned int count;
81};
82
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070083/*
84 * ethtool interface
85 */
86
Toshiaki Makitad397b962018-10-11 18:36:50 +090087struct veth_q_stat_desc {
88 char desc[ETH_GSTRING_LEN];
89 size_t offset;
90};
91
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010092#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
Toshiaki Makitad397b962018-10-11 18:36:50 +090093
94static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
95 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
96 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010097 { "drops", VETH_RQ_STAT(rx_drops) },
98 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
99 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
100 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
101 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
Toshiaki Makitad397b962018-10-11 18:36:50 +0900102};
103
104#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
105
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100106static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
107 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
108 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
109};
110
111#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
112
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700113static struct {
114 const char string[ETH_GSTRING_LEN];
115} ethtool_stats_keys[] = {
116 { "peer_ifindex" },
117};
118
Philippe Reynes56607b92017-03-29 08:24:21 +0200119static int veth_get_link_ksettings(struct net_device *dev,
120 struct ethtool_link_ksettings *cmd)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700121{
Philippe Reynes56607b92017-03-29 08:24:21 +0200122 cmd->base.speed = SPEED_10000;
123 cmd->base.duplex = DUPLEX_FULL;
124 cmd->base.port = PORT_TP;
125 cmd->base.autoneg = AUTONEG_DISABLE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700126 return 0;
127}
128
129static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
130{
Rick Jones33a5ba12011-11-15 14:59:53 +0000131 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
132 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700133}
134
135static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
136{
Tonghao Zhanga0341b72021-11-25 10:54:44 +0800137 u8 *p = buf;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900138 int i, j;
139
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700140 switch(stringset) {
141 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900142 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
143 p += sizeof(ethtool_stats_keys);
Tonghao Zhanga0341b72021-11-25 10:54:44 +0800144 for (i = 0; i < dev->real_num_rx_queues; i++)
145 for (j = 0; j < VETH_RQ_STATS_LEN; j++)
146 ethtool_sprintf(&p, "rx_queue_%u_%.18s",
147 i, veth_rq_stats_desc[j].desc);
148
149 for (i = 0; i < dev->real_num_tx_queues; i++)
150 for (j = 0; j < VETH_TQ_STATS_LEN; j++)
151 ethtool_sprintf(&p, "tx_queue_%u_%.18s",
152 i, veth_tq_stats_desc[j].desc);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700153 break;
154 }
155}
156
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700157static int veth_get_sset_count(struct net_device *dev, int sset)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700158{
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700159 switch (sset) {
160 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900161 return ARRAY_SIZE(ethtool_stats_keys) +
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100162 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
163 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700164 default:
165 return -EOPNOTSUPP;
166 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700167}
168
169static void veth_get_ethtool_stats(struct net_device *dev,
170 struct ethtool_stats *stats, u64 *data)
171{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100172 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000173 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makitad397b962018-10-11 18:36:50 +0900174 int i, j, idx;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700175
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000176 data[0] = peer ? peer->ifindex : 0;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900177 idx = 1;
178 for (i = 0; i < dev->real_num_rx_queues; i++) {
179 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100180 const void *stats_base = (void *)&rq_stats->vs;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900181 unsigned int start;
182 size_t offset;
183
184 do {
185 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
186 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
187 offset = veth_rq_stats_desc[j].offset;
188 data[idx + j] = *(u64 *)(stats_base + offset);
189 }
190 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
191 idx += VETH_RQ_STATS_LEN;
192 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100193
194 if (!peer)
195 return;
196
197 rcv_priv = netdev_priv(peer);
198 for (i = 0; i < peer->real_num_rx_queues; i++) {
199 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
200 const void *base = (void *)&rq_stats->vs;
201 unsigned int start, tx_idx = idx;
202 size_t offset;
203
204 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
205 do {
206 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
207 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
208 offset = veth_tq_stats_desc[j].offset;
209 data[tx_idx + j] += *(u64 *)(base + offset);
210 }
211 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
212 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700213}
214
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200215static void veth_get_channels(struct net_device *dev,
216 struct ethtool_channels *channels)
217{
218 channels->tx_count = dev->real_num_tx_queues;
219 channels->rx_count = dev->real_num_rx_queues;
Paolo Abeni4752eeb2021-07-20 10:41:50 +0200220 channels->max_tx = dev->num_tx_queues;
221 channels->max_rx = dev->num_rx_queues;
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200222}
223
Paolo Abeni4752eeb2021-07-20 10:41:50 +0200224static int veth_set_channels(struct net_device *dev,
225 struct ethtool_channels *ch);
226
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700227static const struct ethtool_ops veth_ethtool_ops = {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700228 .get_drvinfo = veth_get_drvinfo,
229 .get_link = ethtool_op_get_link,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700230 .get_strings = veth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700231 .get_sset_count = veth_get_sset_count,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700232 .get_ethtool_stats = veth_get_ethtool_stats,
Philippe Reynes56607b92017-03-29 08:24:21 +0200233 .get_link_ksettings = veth_get_link_ksettings,
Julian Wiedmann056b21f2019-04-12 13:06:15 +0200234 .get_ts_info = ethtool_op_get_ts_info,
Maciej Fijalkowski34829ee2021-03-30 00:43:12 +0200235 .get_channels = veth_get_channels,
Paolo Abeni4752eeb2021-07-20 10:41:50 +0200236 .set_channels = veth_set_channels,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700237};
238
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900239/* general routines */
240
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900241static bool veth_is_xdp_frame(void *ptr)
242{
243 return (unsigned long)ptr & VETH_XDP_FLAG;
244}
245
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700246static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900247{
248 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
249}
250
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700251static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900252{
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700253 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900254}
255
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900256static void veth_ptr_free(void *ptr)
257{
258 if (veth_is_xdp_frame(ptr))
259 xdp_return_frame(veth_ptr_to_xdp(ptr));
260 else
261 kfree_skb(ptr);
262}
263
Toshiaki Makita638264d2018-08-03 16:58:18 +0900264static void __veth_xdp_flush(struct veth_rq *rq)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900265{
266 /* Write ptr_ring before reading rx_notify_masked */
267 smp_mb();
Eric Dumazet68468d82022-02-08 15:28:22 -0800268 if (!READ_ONCE(rq->rx_notify_masked) &&
269 napi_schedule_prep(&rq->xdp_napi)) {
270 WRITE_ONCE(rq->rx_notify_masked, true);
271 __napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900272 }
273}
274
Toshiaki Makita638264d2018-08-03 16:58:18 +0900275static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900276{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900277 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900278 dev_kfree_skb_any(skb);
279 return NET_RX_DROP;
280 }
281
282 return NET_RX_SUCCESS;
283}
284
Toshiaki Makita638264d2018-08-03 16:58:18 +0900285static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
286 struct veth_rq *rq, bool xdp)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700287{
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900288 return __dev_forward_skb(dev, skb) ?: xdp ?
Toshiaki Makita638264d2018-08-03 16:58:18 +0900289 veth_xdp_rx(rq, skb) :
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900290 netif_rx(skb);
291}
292
Paolo Abeni47e550e2021-04-09 13:04:39 +0200293/* return true if the specified skb has chances of GRO aggregation
294 * Don't strive for accuracy, but try to avoid GRO overhead in the most
295 * common scenarios.
296 * When XDP is enabled, all traffic is considered eligible, as the xmit
297 * device has TSO off.
298 * When TSO is enabled on the xmit device, we are likely interested only
299 * in UDP aggregation, explicitly check for that if the skb is suspected
300 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
301 * to belong to locally generated UDP traffic.
302 */
303static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
304 const struct net_device *rcv,
305 const struct sk_buff *skb)
306{
307 return !(dev->features & NETIF_F_ALL_TSO) ||
308 (skb->destructor == sock_wfree &&
309 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
310}
311
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900312static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
313{
314 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900315 struct veth_rq *rq = NULL;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000316 struct net_device *rcv;
Eric Dumazet26811282012-12-29 16:02:43 +0000317 int length = skb->len;
Paolo Abenid3256ef2021-04-09 13:04:38 +0200318 bool use_napi = false;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900319 int rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700320
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000321 rcu_read_lock();
322 rcv = rcu_dereference(priv->peer);
323 if (unlikely(!rcv)) {
324 kfree_skb(skb);
325 goto drop;
326 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700327
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900328 rcv_priv = netdev_priv(rcv);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900329 rxq = skb_get_queue_mapping(skb);
330 if (rxq < rcv->real_num_rx_queues) {
331 rq = &rcv_priv->rq[rxq];
Paolo Abenid3256ef2021-04-09 13:04:38 +0200332
333 /* The napi pointer is available when an XDP program is
334 * attached or when GRO is enabled
Paolo Abeni47e550e2021-04-09 13:04:39 +0200335 * Don't bother with napi/GRO if the skb can't be aggregated
Paolo Abenid3256ef2021-04-09 13:04:38 +0200336 */
Paolo Abeni47e550e2021-04-09 13:04:39 +0200337 use_napi = rcu_access_pointer(rq->napi) &&
338 veth_skb_is_eligible_for_gro(dev, rcv, skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900339 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900340
Michael Walleaa4e6892018-08-29 17:24:11 +0200341 skb_tx_timestamp(skb);
Paolo Abenid3256ef2021-04-09 13:04:38 +0200342 if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
343 if (!use_napi)
Eric Dumazetb4fba472019-11-07 16:27:17 -0800344 dev_lstats_add(dev, length);
Eric Dumazet26811282012-12-29 16:02:43 +0000345 } else {
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000346drop:
Eric Dumazet26811282012-12-29 16:02:43 +0000347 atomic64_inc(&priv->dropped);
348 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900349
Paolo Abenid3256ef2021-04-09 13:04:38 +0200350 if (use_napi)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900351 __veth_xdp_flush(rq);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900352
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000353 rcu_read_unlock();
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900354
Patrick McHardy6ed10652009-06-23 06:03:08 +0000355 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700356}
357
Eric Dumazetb4fba472019-11-07 16:27:17 -0800358static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700359{
Eric Dumazetcf05c702011-06-19 22:48:34 -0700360 struct veth_priv *priv = netdev_priv(dev);
David S. Miller11687a12009-06-25 02:45:42 -0700361
Eric Dumazetb4fba472019-11-07 16:27:17 -0800362 dev_lstats_read(dev, packets, bytes);
Eric Dumazet26811282012-12-29 16:02:43 +0000363 return atomic64_read(&priv->dropped);
364}
365
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100366static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
Toshiaki Makita4195e542018-10-11 18:36:49 +0900367{
368 struct veth_priv *priv = netdev_priv(dev);
369 int i;
370
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100371 result->peer_tq_xdp_xmit_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900372 result->xdp_packets = 0;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100373 result->xdp_tx_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900374 result->xdp_bytes = 0;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100375 result->rx_drops = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900376 for (i = 0; i < dev->num_rx_queues; i++) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100377 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900378 struct veth_rq_stats *stats = &priv->rq[i].stats;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900379 unsigned int start;
380
381 do {
382 start = u64_stats_fetch_begin_irq(&stats->syncp);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100383 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100384 xdp_tx_err = stats->vs.xdp_tx_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100385 packets = stats->vs.xdp_packets;
386 bytes = stats->vs.xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100387 drops = stats->vs.rx_drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900388 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100389 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100390 result->xdp_tx_err += xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900391 result->xdp_packets += packets;
392 result->xdp_bytes += bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100393 result->rx_drops += drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900394 }
395}
396
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800397static void veth_get_stats64(struct net_device *dev,
398 struct rtnl_link_stats64 *tot)
Eric Dumazet26811282012-12-29 16:02:43 +0000399{
400 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000401 struct net_device *peer;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100402 struct veth_stats rx;
Eric Dumazetb4fba472019-11-07 16:27:17 -0800403 u64 packets, bytes;
Eric Dumazet26811282012-12-29 16:02:43 +0000404
Eric Dumazetb4fba472019-11-07 16:27:17 -0800405 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
406 tot->tx_bytes = bytes;
407 tot->tx_packets = packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900408
409 veth_stats_rx(&rx, dev);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100410 tot->tx_dropped += rx.xdp_tx_err;
411 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900412 tot->rx_bytes = rx.xdp_bytes;
413 tot->rx_packets = rx.xdp_packets;
Eric Dumazet26811282012-12-29 16:02:43 +0000414
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000415 rcu_read_lock();
416 peer = rcu_dereference(priv->peer);
417 if (peer) {
Jiang Lidonge25d5db2020-03-04 09:49:29 +0800418 veth_stats_tx(peer, &packets, &bytes);
Eric Dumazetb4fba472019-11-07 16:27:17 -0800419 tot->rx_bytes += bytes;
420 tot->rx_packets += packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900421
422 veth_stats_rx(&rx, peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100423 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
424 tot->rx_dropped += rx.xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900425 tot->tx_bytes += rx.xdp_bytes;
426 tot->tx_packets += rx.xdp_packets;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000427 }
428 rcu_read_unlock();
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700429}
430
Gao feng5c70ef82013-10-04 16:52:24 +0800431/* fake multicast ability */
432static void veth_set_multicast_list(struct net_device *dev)
433{
434}
435
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900436static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
437 int buflen)
438{
439 struct sk_buff *skb;
440
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900441 skb = build_skb(head, buflen);
442 if (!skb)
443 return NULL;
444
445 skb_reserve(skb, headroom);
446 skb_put(skb, len);
447
448 return skb;
449}
450
Toshiaki Makita638264d2018-08-03 16:58:18 +0900451static int veth_select_rxq(struct net_device *dev)
452{
453 return smp_processor_id() % dev->real_num_rx_queues;
454}
455
Daniel Borkmann9aa12062020-10-11 01:40:02 +0200456static struct net_device *veth_peer_dev(struct net_device *dev)
457{
458 struct veth_priv *priv = netdev_priv(dev);
459
460 /* Callers must be under RCU read side. */
461 return rcu_dereference(priv->peer);
462}
463
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900464static int veth_xdp_xmit(struct net_device *dev, int n,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100465 struct xdp_frame **frames,
466 u32 flags, bool ndo_xmit)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900467{
468 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100469 int i, ret = -ENXIO, nxmit = 0;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900470 struct net_device *rcv;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100471 unsigned int max_len;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900472 struct veth_rq *rq;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900473
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100474 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100475 return -EINVAL;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900476
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100477 rcu_read_lock();
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900478 rcv = rcu_dereference(priv->peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100479 if (unlikely(!rcv))
480 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900481
482 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100483 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toke Høiland-Jørgensen0e672f32021-04-16 17:47:45 +0200484 /* The napi pointer is set if NAPI is enabled, which ensures that
485 * xdp_ring is initialized on receive side and the peer device is up.
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900486 */
Toke Høiland-Jørgensen0e672f32021-04-16 17:47:45 +0200487 if (!rcu_access_pointer(rq->napi))
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100488 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900489
490 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
491
Toshiaki Makita638264d2018-08-03 16:58:18 +0900492 spin_lock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900493 for (i = 0; i < n; i++) {
494 struct xdp_frame *frame = frames[i];
495 void *ptr = veth_xdp_to_ptr(frame);
496
497 if (unlikely(frame->len > max_len ||
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100498 __ptr_ring_produce(&rq->xdp_ring, ptr)))
499 break;
500 nxmit++;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900501 }
Toshiaki Makita638264d2018-08-03 16:58:18 +0900502 spin_unlock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900503
504 if (flags & XDP_XMIT_FLUSH)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900505 __veth_xdp_flush(rq);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900506
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100507 ret = nxmit;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100508 if (ndo_xmit) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100509 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100510 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
511 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100512 u64_stats_update_end(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100513 }
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100514
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100515out:
John Fastabendb23bfa52020-01-26 16:14:02 -0800516 rcu_read_unlock();
Toshiaki Makita21314792018-10-11 18:36:48 +0900517
518 return ret;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900519}
520
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100521static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
522 struct xdp_frame **frames, u32 flags)
523{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100524 int err;
525
526 err = veth_xdp_xmit(dev, n, frames, flags, true);
527 if (err < 0) {
528 struct veth_priv *priv = netdev_priv(dev);
529
530 atomic64_add(n, &priv->dropped);
531 }
532
533 return err;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100534}
535
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100536static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900537{
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100538 int sent, i, err = 0, drops;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900539
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100540 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900541 if (sent < 0) {
542 err = sent;
543 sent = 0;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900544 }
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100545
546 for (i = sent; unlikely(i < bq->count); i++)
547 xdp_return_frame(bq->q[i]);
548
549 drops = bq->count - sent;
550 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900551
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100552 u64_stats_update_begin(&rq->stats.syncp);
553 rq->stats.vs.xdp_tx += sent;
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100554 rq->stats.vs.xdp_tx_err += drops;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100555 u64_stats_update_end(&rq->stats.syncp);
556
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900557 bq->count = 0;
558}
559
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100560static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900561{
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100562 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900563 struct net_device *rcv;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100564 struct veth_rq *rcv_rq;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900565
566 rcu_read_lock();
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100567 veth_xdp_flush_bq(rq, bq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900568 rcv = rcu_dereference(priv->peer);
569 if (unlikely(!rcv))
570 goto out;
571
572 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100573 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toshiaki Makitad1396002018-08-03 16:58:17 +0900574 /* xdp_ring is initialized on receive side? */
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100575 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
Toshiaki Makitad1396002018-08-03 16:58:17 +0900576 goto out;
577
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100578 __veth_xdp_flush(rcv_rq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900579out:
580 rcu_read_unlock();
581}
582
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100583static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900584 struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900585{
Lorenzo Bianconi1b698fa2020-05-28 22:47:29 +0200586 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900587
588 if (unlikely(!frame))
589 return -EOVERFLOW;
590
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900591 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100592 veth_xdp_flush_bq(rq, bq);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900593
594 bq->q[bq->count++] = frame;
595
596 return 0;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900597}
598
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100599static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
600 struct xdp_frame *frame,
601 struct veth_xdp_tx_bq *bq,
602 struct veth_stats *stats)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900603{
Toshiaki Makitad1396002018-08-03 16:58:17 +0900604 struct xdp_frame orig_frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900605 struct bpf_prog *xdp_prog;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900606
607 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900608 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900609 if (likely(xdp_prog)) {
610 struct xdp_buff xdp;
611 u32 act;
612
Lorenzo Bianconifc379872020-05-28 22:47:28 +0200613 xdp_convert_frame_to_buff(frame, &xdp);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900614 xdp.rxq = &rq->xdp_rxq;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900615
616 act = bpf_prog_run_xdp(xdp_prog, &xdp);
617
618 switch (act) {
619 case XDP_PASS:
Lorenzo Bianconi89f479f2021-01-12 19:26:13 +0100620 if (xdp_update_frame_from_buff(&xdp, frame))
621 goto err_xdp;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900622 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900623 case XDP_TX:
624 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900625 xdp.rxq->mem = frame->mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100626 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900627 trace_xdp_exception(rq->dev, xdp_prog, act);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900628 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100629 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900630 goto err_xdp;
631 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100632 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900633 rcu_read_unlock();
634 goto xdp_xmit;
635 case XDP_REDIRECT:
636 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900637 xdp.rxq->mem = frame->mem;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900638 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
Toshiaki Makitad1396002018-08-03 16:58:17 +0900639 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100640 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900641 goto err_xdp;
642 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100643 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900644 rcu_read_unlock();
645 goto xdp_xmit;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900646 default:
Paolo Abenic8064e52021-11-30 11:08:07 +0100647 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500648 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900649 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900650 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500651 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900652 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100653 stats->xdp_drops++;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900654 goto err_xdp;
655 }
656 }
657 rcu_read_unlock();
658
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100659 return frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900660err_xdp:
661 rcu_read_unlock();
662 xdp_return_frame(frame);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900663xdp_xmit:
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900664 return NULL;
665}
666
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100667/* frames array contains VETH_XDP_BATCH at most */
668static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
669 int n_xdpf, struct veth_xdp_tx_bq *bq,
670 struct veth_stats *stats)
671{
672 void *skbs[VETH_XDP_BATCH];
673 int i;
674
675 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
676 GFP_ATOMIC | __GFP_ZERO) < 0) {
677 for (i = 0; i < n_xdpf; i++)
678 xdp_return_frame(frames[i]);
679 stats->rx_drops += n_xdpf;
680
681 return;
682 }
683
684 for (i = 0; i < n_xdpf; i++) {
685 struct sk_buff *skb = skbs[i];
686
687 skb = __xdp_build_skb_from_frame(frames[i], skb,
688 rq->dev);
689 if (!skb) {
690 xdp_return_frame(frames[i]);
691 stats->rx_drops++;
692 continue;
693 }
694 napi_gro_receive(&rq->xdp_napi, skb);
695 }
696}
697
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100698static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
699 struct sk_buff *skb,
700 struct veth_xdp_tx_bq *bq,
701 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900702{
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100703 u32 pktlen, headroom, act, metalen, frame_sz;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900704 void *orig_data, *orig_data_end;
705 struct bpf_prog *xdp_prog;
706 int mac_len, delta, off;
707 struct xdp_buff xdp;
708
Paolo Abenid504fff2021-07-28 18:24:04 +0200709 skb_prepare_for_gro(skb);
Toshiaki Makita4bf9ffa2018-09-14 13:33:44 +0900710
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900711 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900712 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900713 if (unlikely(!xdp_prog)) {
714 rcu_read_unlock();
715 goto out;
716 }
717
718 mac_len = skb->data - skb_mac_header(skb);
719 pktlen = skb->len + mac_len;
720 headroom = skb_headroom(skb) - mac_len;
721
722 if (skb_shared(skb) || skb_head_is_locked(skb) ||
723 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
724 struct sk_buff *nskb;
725 int size, head_off;
726 void *head, *start;
727 struct page *page;
728
729 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
730 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
731 if (size > PAGE_SIZE)
732 goto drop;
733
734 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
735 if (!page)
736 goto drop;
737
738 head = page_address(page);
739 start = head + VETH_XDP_HEADROOM;
740 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
741 page_frag_free(head);
742 goto drop;
743 }
744
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200745 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
746 skb->len, PAGE_SIZE);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900747 if (!nskb) {
748 page_frag_free(head);
749 goto drop;
750 }
751
752 skb_copy_header(nskb, skb);
753 head_off = skb_headroom(nskb) - skb_headroom(skb);
754 skb_headers_offset_update(nskb, head_off);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900755 consume_skb(skb);
756 skb = nskb;
757 }
758
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200759 /* SKB "head" area always have tailroom for skb_shared_info */
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100760 frame_sz = skb_end_pointer(skb) - skb->head;
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100761 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
762 xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100763 xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200764
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900765 orig_data = xdp.data;
766 orig_data_end = xdp.data_end;
767
768 act = bpf_prog_run_xdp(xdp_prog, &xdp);
769
770 switch (act) {
771 case XDP_PASS:
772 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900773 case XDP_TX:
774 get_page(virt_to_page(xdp.data));
775 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900776 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100777 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900778 trace_xdp_exception(rq->dev, xdp_prog, act);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100779 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900780 goto err_xdp;
781 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100782 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900783 rcu_read_unlock();
784 goto xdp_xmit;
785 case XDP_REDIRECT:
786 get_page(virt_to_page(xdp.data));
787 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900788 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100789 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
790 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900791 goto err_xdp;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100792 }
793 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900794 rcu_read_unlock();
795 goto xdp_xmit;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900796 default:
Paolo Abenic8064e52021-11-30 11:08:07 +0100797 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500798 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900799 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900800 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500801 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900802 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100803 stats->xdp_drops++;
804 goto xdp_drop;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900805 }
806 rcu_read_unlock();
807
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200808 /* check if bpf_xdp_adjust_head was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900809 delta = orig_data - xdp.data;
810 off = mac_len + delta;
811 if (off > 0)
812 __skb_push(skb, off);
813 else if (off < 0)
814 __skb_pull(skb, -off);
815 skb->mac_header -= delta;
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200816
817 /* check if bpf_xdp_adjust_tail was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900818 off = xdp.data_end - orig_data_end;
819 if (off != 0)
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200820 __skb_put(skb, off); /* positive on grow, negative on shrink */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900821 skb->protocol = eth_type_trans(skb, rq->dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900822
823 metalen = xdp.data - xdp.data_meta;
824 if (metalen)
825 skb_metadata_set(skb, metalen);
826out:
827 return skb;
828drop:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100829 stats->rx_drops++;
830xdp_drop:
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900831 rcu_read_unlock();
832 kfree_skb(skb);
833 return NULL;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900834err_xdp:
835 rcu_read_unlock();
836 page_frag_free(xdp.data);
837xdp_xmit:
838 return NULL;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900839}
840
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100841static int veth_xdp_rcv(struct veth_rq *rq, int budget,
842 struct veth_xdp_tx_bq *bq,
843 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900844{
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100845 int i, done = 0, n_xdpf = 0;
846 void *xdpf[VETH_XDP_BATCH];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900847
848 for (i = 0; i < budget; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900849 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900850
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900851 if (!ptr)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900852 break;
853
Toshiaki Makitad1396002018-08-03 16:58:17 +0900854 if (veth_is_xdp_frame(ptr)) {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100855 /* ndo_xdp_xmit */
Toshiaki Makita4195e542018-10-11 18:36:49 +0900856 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
857
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100858 stats->xdp_bytes += frame->len;
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100859 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
860 if (frame) {
861 /* XDP_PASS */
862 xdpf[n_xdpf++] = frame;
863 if (n_xdpf == VETH_XDP_BATCH) {
864 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
865 bq, stats);
866 n_xdpf = 0;
867 }
868 }
Toshiaki Makitad1396002018-08-03 16:58:17 +0900869 } else {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100870 /* ndo_start_xmit */
871 struct sk_buff *skb = ptr;
872
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100873 stats->xdp_bytes += skb->len;
874 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
Paolo Abeni9695b7d2021-12-22 19:39:52 +0100875 if (skb) {
876 if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
877 netif_receive_skb(skb);
878 else
879 napi_gro_receive(&rq->xdp_napi, skb);
880 }
Toshiaki Makitad1396002018-08-03 16:58:17 +0900881 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900882 done++;
883 }
884
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100885 if (n_xdpf)
886 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
887
Toshiaki Makita4195e542018-10-11 18:36:49 +0900888 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100889 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100890 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100891 rq->stats.vs.xdp_drops += stats->xdp_drops;
892 rq->stats.vs.rx_drops += stats->rx_drops;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100893 rq->stats.vs.xdp_packets += done;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900894 u64_stats_update_end(&rq->stats.syncp);
895
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900896 return done;
897}
898
899static int veth_poll(struct napi_struct *napi, int budget)
900{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900901 struct veth_rq *rq =
902 container_of(napi, struct veth_rq, xdp_napi);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100903 struct veth_stats stats = {};
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900904 struct veth_xdp_tx_bq bq;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900905 int done;
906
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900907 bq.count = 0;
908
Toshiaki Makitad1396002018-08-03 16:58:17 +0900909 xdp_set_return_frame_no_direct();
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100910 done = veth_xdp_rcv(rq, budget, &bq, &stats);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900911
912 if (done < budget && napi_complete_done(napi, done)) {
913 /* Write rx_notify_masked before reading ptr_ring */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900914 smp_store_mb(rq->rx_notify_masked, false);
915 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
Eric Dumazet68468d82022-02-08 15:28:22 -0800916 if (napi_schedule_prep(&rq->xdp_napi)) {
917 WRITE_ONCE(rq->rx_notify_masked, true);
918 __napi_schedule(&rq->xdp_napi);
919 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900920 }
921 }
922
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100923 if (stats.xdp_tx > 0)
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100924 veth_xdp_flush(rq, &bq);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100925 if (stats.xdp_redirect > 0)
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100926 xdp_do_flush();
Toshiaki Makitad1396002018-08-03 16:58:17 +0900927 xdp_clear_return_frame_no_direct();
928
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900929 return done;
930}
931
Paolo Abenidedd53c2021-07-20 10:41:49 +0200932static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900933{
934 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900935 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900936
Paolo Abenidedd53c2021-07-20 10:41:49 +0200937 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900938 struct veth_rq *rq = &priv->rq[i];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900939
Toshiaki Makita638264d2018-08-03 16:58:18 +0900940 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
941 if (err)
942 goto err_xdp_ring;
943 }
944
Paolo Abenidedd53c2021-07-20 10:41:49 +0200945 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900946 struct veth_rq *rq = &priv->rq[i];
947
Toshiaki Makita638264d2018-08-03 16:58:18 +0900948 napi_enable(&rq->xdp_napi);
Paolo Abenid3256ef2021-04-09 13:04:38 +0200949 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900950 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900951
952 return 0;
Paolo Abenidedd53c2021-07-20 10:41:49 +0200953
Toshiaki Makita638264d2018-08-03 16:58:18 +0900954err_xdp_ring:
Paolo Abenidedd53c2021-07-20 10:41:49 +0200955 for (i--; i >= start; i--)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900956 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
957
958 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900959}
960
Paolo Abenidedd53c2021-07-20 10:41:49 +0200961static int __veth_napi_enable(struct net_device *dev)
962{
963 return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
964}
965
966static void veth_napi_del_range(struct net_device *dev, int start, int end)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900967{
968 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900969 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900970
Paolo Abenidedd53c2021-07-20 10:41:49 +0200971 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900972 struct veth_rq *rq = &priv->rq[i];
973
Paolo Abenid3256ef2021-04-09 13:04:38 +0200974 rcu_assign_pointer(priv->rq[i].napi, NULL);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900975 napi_disable(&rq->xdp_napi);
Jakub Kicinski5198d5452020-09-09 10:37:51 -0700976 __netif_napi_del(&rq->xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900977 }
978 synchronize_net();
979
Paolo Abenidedd53c2021-07-20 10:41:49 +0200980 for (i = start; i < end; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900981 struct veth_rq *rq = &priv->rq[i];
982
Toshiaki Makita638264d2018-08-03 16:58:18 +0900983 rq->rx_notify_masked = false;
984 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
985 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900986}
987
Paolo Abenidedd53c2021-07-20 10:41:49 +0200988static void veth_napi_del(struct net_device *dev)
989{
990 veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
991}
992
Paolo Abenid3256ef2021-04-09 13:04:38 +0200993static bool veth_gro_requested(const struct net_device *dev)
994{
995 return !!(dev->wanted_features & NETIF_F_GRO);
996}
997
Paolo Abenidedd53c2021-07-20 10:41:49 +0200998static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
999 bool napi_already_on)
1000{
1001 struct veth_priv *priv = netdev_priv(dev);
1002 int err, i;
1003
1004 for (i = start; i < end; i++) {
1005 struct veth_rq *rq = &priv->rq[i];
1006
1007 if (!napi_already_on)
1008 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1009 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1010 if (err < 0)
1011 goto err_rxq_reg;
1012
1013 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1014 MEM_TYPE_PAGE_SHARED,
1015 NULL);
1016 if (err < 0)
1017 goto err_reg_mem;
1018
1019 /* Save original mem info as it can be overwritten */
1020 rq->xdp_mem = rq->xdp_rxq.mem;
1021 }
1022 return 0;
1023
1024err_reg_mem:
1025 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1026err_rxq_reg:
1027 for (i--; i >= start; i--) {
1028 struct veth_rq *rq = &priv->rq[i];
1029
1030 xdp_rxq_info_unreg(&rq->xdp_rxq);
1031 if (!napi_already_on)
1032 netif_napi_del(&rq->xdp_napi);
1033 }
1034
1035 return err;
1036}
1037
1038static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1039 bool delete_napi)
1040{
1041 struct veth_priv *priv = netdev_priv(dev);
1042 int i;
1043
1044 for (i = start; i < end; i++) {
1045 struct veth_rq *rq = &priv->rq[i];
1046
1047 rq->xdp_rxq.mem = rq->xdp_mem;
1048 xdp_rxq_info_unreg(&rq->xdp_rxq);
1049
1050 if (delete_napi)
1051 netif_napi_del(&rq->xdp_napi);
1052 }
1053}
1054
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001055static int veth_enable_xdp(struct net_device *dev)
1056{
Paolo Abenid3256ef2021-04-09 13:04:38 +02001057 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001058 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +09001059 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001060
Toshiaki Makita638264d2018-08-03 16:58:18 +09001061 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
Paolo Abenidedd53c2021-07-20 10:41:49 +02001062 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1063 if (err)
1064 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001065
Paolo Abenid3256ef2021-04-09 13:04:38 +02001066 if (!napi_already_on) {
1067 err = __veth_napi_enable(dev);
Paolo Abenidedd53c2021-07-20 10:41:49 +02001068 if (err) {
1069 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1070 return err;
1071 }
Paolo Abenid3256ef2021-04-09 13:04:38 +02001072
1073 if (!veth_gro_requested(dev)) {
1074 /* user-space did not require GRO, but adding XDP
1075 * is supposed to get GRO working
1076 */
1077 dev->features |= NETIF_F_GRO;
1078 netdev_features_change(dev);
1079 }
1080 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001081 }
1082
Paolo Abenid3256ef2021-04-09 13:04:38 +02001083 for (i = 0; i < dev->real_num_rx_queues; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +09001084 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001085 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1086 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001087
1088 return 0;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001089}
1090
1091static void veth_disable_xdp(struct net_device *dev)
1092{
1093 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +09001094 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001095
Toshiaki Makita638264d2018-08-03 16:58:18 +09001096 for (i = 0; i < dev->real_num_rx_queues; i++)
1097 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001098
1099 if (!netif_running(dev) || !veth_gro_requested(dev)) {
1100 veth_napi_del(dev);
1101
1102 /* if user-space did not require GRO, since adding XDP
1103 * enabled it, clear it now
1104 */
1105 if (!veth_gro_requested(dev) && netif_running(dev)) {
1106 dev->features &= ~NETIF_F_GRO;
1107 netdev_features_change(dev);
1108 }
1109 }
1110
Paolo Abenidedd53c2021-07-20 10:41:49 +02001111 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001112}
1113
Paolo Abenidedd53c2021-07-20 10:41:49 +02001114static int veth_napi_enable_range(struct net_device *dev, int start, int end)
Paolo Abenid3256ef2021-04-09 13:04:38 +02001115{
1116 struct veth_priv *priv = netdev_priv(dev);
1117 int err, i;
1118
Paolo Abenidedd53c2021-07-20 10:41:49 +02001119 for (i = start; i < end; i++) {
Paolo Abenid3256ef2021-04-09 13:04:38 +02001120 struct veth_rq *rq = &priv->rq[i];
1121
1122 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
1123 }
1124
Paolo Abenidedd53c2021-07-20 10:41:49 +02001125 err = __veth_napi_enable_range(dev, start, end);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001126 if (err) {
Paolo Abenidedd53c2021-07-20 10:41:49 +02001127 for (i = start; i < end; i++) {
Paolo Abenid3256ef2021-04-09 13:04:38 +02001128 struct veth_rq *rq = &priv->rq[i];
1129
1130 netif_napi_del(&rq->xdp_napi);
1131 }
1132 return err;
1133 }
1134 return err;
1135}
1136
Paolo Abenidedd53c2021-07-20 10:41:49 +02001137static int veth_napi_enable(struct net_device *dev)
1138{
1139 return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1140}
1141
Paolo Abeni4752eeb2021-07-20 10:41:50 +02001142static void veth_disable_range_safe(struct net_device *dev, int start, int end)
1143{
1144 struct veth_priv *priv = netdev_priv(dev);
1145
1146 if (start >= end)
1147 return;
1148
1149 if (priv->_xdp_prog) {
1150 veth_napi_del_range(dev, start, end);
1151 veth_disable_xdp_range(dev, start, end, false);
1152 } else if (veth_gro_requested(dev)) {
1153 veth_napi_del_range(dev, start, end);
1154 }
1155}
1156
1157static int veth_enable_range_safe(struct net_device *dev, int start, int end)
1158{
1159 struct veth_priv *priv = netdev_priv(dev);
1160 int err;
1161
1162 if (start >= end)
1163 return 0;
1164
1165 if (priv->_xdp_prog) {
1166 /* these channels are freshly initialized, napi is not on there even
1167 * when GRO is requeste
1168 */
1169 err = veth_enable_xdp_range(dev, start, end, false);
1170 if (err)
1171 return err;
1172
1173 err = __veth_napi_enable_range(dev, start, end);
1174 if (err) {
1175 /* on error always delete the newly added napis */
1176 veth_disable_xdp_range(dev, start, end, true);
1177 return err;
1178 }
1179 } else if (veth_gro_requested(dev)) {
1180 return veth_napi_enable_range(dev, start, end);
1181 }
1182 return 0;
1183}
1184
1185static int veth_set_channels(struct net_device *dev,
1186 struct ethtool_channels *ch)
1187{
1188 struct veth_priv *priv = netdev_priv(dev);
1189 unsigned int old_rx_count, new_rx_count;
1190 struct veth_priv *peer_priv;
1191 struct net_device *peer;
1192 int err;
1193
1194 /* sanity check. Upper bounds are already enforced by the caller */
1195 if (!ch->rx_count || !ch->tx_count)
1196 return -EINVAL;
1197
1198 /* avoid braking XDP, if that is enabled */
1199 peer = rtnl_dereference(priv->peer);
1200 peer_priv = peer ? netdev_priv(peer) : NULL;
1201 if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
1202 return -EINVAL;
1203
1204 if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
1205 return -EINVAL;
1206
1207 old_rx_count = dev->real_num_rx_queues;
1208 new_rx_count = ch->rx_count;
1209 if (netif_running(dev)) {
1210 /* turn device off */
1211 netif_carrier_off(dev);
1212 if (peer)
1213 netif_carrier_off(peer);
1214
1215 /* try to allocate new resurces, as needed*/
1216 err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
1217 if (err)
1218 goto out;
1219 }
1220
1221 err = netif_set_real_num_rx_queues(dev, ch->rx_count);
1222 if (err)
1223 goto revert;
1224
1225 err = netif_set_real_num_tx_queues(dev, ch->tx_count);
1226 if (err) {
1227 int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
1228
1229 /* this error condition could happen only if rx and tx change
1230 * in opposite directions (e.g. tx nr raises, rx nr decreases)
1231 * and we can't do anything to fully restore the original
1232 * status
1233 */
1234 if (err2)
1235 pr_warn("Can't restore rx queues config %d -> %d %d",
1236 new_rx_count, old_rx_count, err2);
1237 else
1238 goto revert;
1239 }
1240
1241out:
1242 if (netif_running(dev)) {
1243 /* note that we need to swap the arguments WRT the enable part
1244 * to identify the range we have to disable
1245 */
1246 veth_disable_range_safe(dev, new_rx_count, old_rx_count);
1247 netif_carrier_on(dev);
1248 if (peer)
1249 netif_carrier_on(peer);
1250 }
1251 return err;
1252
1253revert:
1254 new_rx_count = old_rx_count;
1255 old_rx_count = ch->rx_count;
1256 goto out;
1257}
1258
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001259static int veth_open(struct net_device *dev)
1260{
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001261 struct veth_priv *priv = netdev_priv(dev);
1262 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001263 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001264
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001265 if (!peer)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001266 return -ENOTCONN;
1267
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001268 if (priv->_xdp_prog) {
1269 err = veth_enable_xdp(dev);
1270 if (err)
1271 return err;
Paolo Abenid3256ef2021-04-09 13:04:38 +02001272 } else if (veth_gro_requested(dev)) {
1273 err = veth_napi_enable(dev);
1274 if (err)
1275 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001276 }
1277
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001278 if (peer->flags & IFF_UP) {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001279 netif_carrier_on(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001280 netif_carrier_on(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001281 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001282
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001283 return 0;
1284}
1285
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001286static int veth_close(struct net_device *dev)
1287{
1288 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001289 struct net_device *peer = rtnl_dereference(priv->peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001290
1291 netif_carrier_off(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001292 if (peer)
1293 netif_carrier_off(peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001294
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001295 if (priv->_xdp_prog)
1296 veth_disable_xdp(dev);
Paolo Abenid3256ef2021-04-09 13:04:38 +02001297 else if (veth_gro_requested(dev))
1298 veth_napi_del(dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001299
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001300 return 0;
1301}
1302
Jarod Wilson91572082016-10-20 13:55:20 -04001303static int is_valid_veth_mtu(int mtu)
Eric Biederman38d40812009-03-03 23:36:04 -08001304{
Jarod Wilson91572082016-10-20 13:55:20 -04001305 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
Eric Biederman38d40812009-03-03 23:36:04 -08001306}
1307
Toshiaki Makita7797b932018-08-15 17:07:29 +09001308static int veth_alloc_queues(struct net_device *dev)
1309{
1310 struct veth_priv *priv = netdev_priv(dev);
1311 int i;
1312
1313 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1314 if (!priv->rq)
1315 return -ENOMEM;
1316
Toshiaki Makita4195e542018-10-11 18:36:49 +09001317 for (i = 0; i < dev->num_rx_queues; i++) {
Toshiaki Makita7797b932018-08-15 17:07:29 +09001318 priv->rq[i].dev = dev;
Toshiaki Makita4195e542018-10-11 18:36:49 +09001319 u64_stats_init(&priv->rq[i].stats.syncp);
1320 }
Toshiaki Makita7797b932018-08-15 17:07:29 +09001321
1322 return 0;
1323}
1324
1325static void veth_free_queues(struct net_device *dev)
1326{
1327 struct veth_priv *priv = netdev_priv(dev);
1328
1329 kfree(priv->rq);
1330}
1331
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001332static int veth_dev_init(struct net_device *dev)
1333{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001334 int err;
1335
Li RongQing14d73412018-09-17 18:46:55 +08001336 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1337 if (!dev->lstats)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001338 return -ENOMEM;
Toshiaki Makita7797b932018-08-15 17:07:29 +09001339
1340 err = veth_alloc_queues(dev);
1341 if (err) {
Li RongQing14d73412018-09-17 18:46:55 +08001342 free_percpu(dev->lstats);
Toshiaki Makita7797b932018-08-15 17:07:29 +09001343 return err;
1344 }
1345
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001346 return 0;
1347}
1348
David S. Miller11687a12009-06-25 02:45:42 -07001349static void veth_dev_free(struct net_device *dev)
1350{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001351 veth_free_queues(dev);
Li RongQing14d73412018-09-17 18:46:55 +08001352 free_percpu(dev->lstats);
David S. Miller11687a12009-06-25 02:45:42 -07001353}
1354
WANG Congbb446c12014-06-23 15:36:02 -07001355#ifdef CONFIG_NET_POLL_CONTROLLER
1356static void veth_poll_controller(struct net_device *dev)
1357{
1358 /* veth only receives frames when its peer sends one
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001359 * Since it has nothing to do with disabling irqs, we are guaranteed
WANG Congbb446c12014-06-23 15:36:02 -07001360 * never to have pending data when we poll for it so
1361 * there is nothing to do here.
1362 *
1363 * We need this though so netpoll recognizes us as an interface that
1364 * supports polling, which enables bridge devices in virt setups to
1365 * still use netconsole
1366 */
1367}
1368#endif /* CONFIG_NET_POLL_CONTROLLER */
1369
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001370static int veth_get_iflink(const struct net_device *dev)
1371{
1372 struct veth_priv *priv = netdev_priv(dev);
1373 struct net_device *peer;
1374 int iflink;
1375
1376 rcu_read_lock();
1377 peer = rcu_dereference(priv->peer);
1378 iflink = peer ? peer->ifindex : 0;
1379 rcu_read_unlock();
1380
1381 return iflink;
1382}
1383
Toshiaki Makitadc224822018-08-03 16:58:11 +09001384static netdev_features_t veth_fix_features(struct net_device *dev,
1385 netdev_features_t features)
1386{
1387 struct veth_priv *priv = netdev_priv(dev);
1388 struct net_device *peer;
1389
1390 peer = rtnl_dereference(priv->peer);
1391 if (peer) {
1392 struct veth_priv *peer_priv = netdev_priv(peer);
1393
1394 if (peer_priv->_xdp_prog)
1395 features &= ~NETIF_F_GSO_SOFTWARE;
1396 }
Paolo Abenid3256ef2021-04-09 13:04:38 +02001397 if (priv->_xdp_prog)
1398 features |= NETIF_F_GRO;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001399
1400 return features;
1401}
1402
Paolo Abenid3256ef2021-04-09 13:04:38 +02001403static int veth_set_features(struct net_device *dev,
1404 netdev_features_t features)
1405{
1406 netdev_features_t changed = features ^ dev->features;
1407 struct veth_priv *priv = netdev_priv(dev);
1408 int err;
1409
1410 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1411 return 0;
1412
1413 if (features & NETIF_F_GRO) {
1414 err = veth_napi_enable(dev);
1415 if (err)
1416 return err;
1417 } else {
1418 veth_napi_del(dev);
1419 }
1420 return 0;
1421}
1422
Paolo Abeni163e5292016-02-26 10:45:41 +01001423static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1424{
1425 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1426 struct net_device *peer;
1427
1428 if (new_hr < 0)
1429 new_hr = 0;
1430
1431 rcu_read_lock();
1432 peer = rcu_dereference(priv->peer);
1433 if (unlikely(!peer))
1434 goto out;
1435
1436 peer_priv = netdev_priv(peer);
1437 priv->requested_headroom = new_hr;
1438 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1439 dev->needed_headroom = new_hr;
1440 peer->needed_headroom = new_hr;
1441
1442out:
1443 rcu_read_unlock();
1444}
1445
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001446static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1447 struct netlink_ext_ack *extack)
1448{
1449 struct veth_priv *priv = netdev_priv(dev);
1450 struct bpf_prog *old_prog;
1451 struct net_device *peer;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001452 unsigned int max_mtu;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001453 int err;
1454
1455 old_prog = priv->_xdp_prog;
1456 priv->_xdp_prog = prog;
1457 peer = rtnl_dereference(priv->peer);
1458
1459 if (prog) {
1460 if (!peer) {
1461 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1462 err = -ENOTCONN;
1463 goto err;
1464 }
1465
Toshiaki Makitadc224822018-08-03 16:58:11 +09001466 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1467 peer->hard_header_len -
1468 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1469 if (peer->mtu > max_mtu) {
1470 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1471 err = -ERANGE;
1472 goto err;
1473 }
1474
Toshiaki Makita638264d2018-08-03 16:58:18 +09001475 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1476 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1477 err = -ENOSPC;
1478 goto err;
1479 }
1480
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001481 if (dev->flags & IFF_UP) {
1482 err = veth_enable_xdp(dev);
1483 if (err) {
1484 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1485 goto err;
1486 }
1487 }
Toshiaki Makitadc224822018-08-03 16:58:11 +09001488
1489 if (!old_prog) {
1490 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1491 peer->max_mtu = max_mtu;
1492 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001493 }
1494
1495 if (old_prog) {
Toshiaki Makitadc224822018-08-03 16:58:11 +09001496 if (!prog) {
1497 if (dev->flags & IFF_UP)
1498 veth_disable_xdp(dev);
1499
1500 if (peer) {
1501 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1502 peer->max_mtu = ETH_MAX_MTU;
1503 }
1504 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001505 bpf_prog_put(old_prog);
1506 }
1507
Toshiaki Makitadc224822018-08-03 16:58:11 +09001508 if ((!!old_prog ^ !!prog) && peer)
1509 netdev_update_features(peer);
1510
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001511 return 0;
1512err:
1513 priv->_xdp_prog = old_prog;
1514
1515 return err;
1516}
1517
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001518static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1519{
1520 switch (xdp->command) {
1521 case XDP_SETUP_PROG:
1522 return veth_xdp_set(dev, xdp->prog, xdp->extack);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001523 default:
1524 return -EINVAL;
1525 }
1526}
1527
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001528static const struct net_device_ops veth_netdev_ops = {
Daniel Lezcanoee923622009-02-22 00:04:45 -08001529 .ndo_init = veth_dev_init,
1530 .ndo_open = veth_open,
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001531 .ndo_stop = veth_close,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001532 .ndo_start_xmit = veth_xmit,
stephen hemminger6311cc42011-06-08 14:53:59 +00001533 .ndo_get_stats64 = veth_get_stats64,
Gao feng5c70ef82013-10-04 16:52:24 +08001534 .ndo_set_rx_mode = veth_set_multicast_list,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001535 .ndo_set_mac_address = eth_mac_addr,
WANG Congbb446c12014-06-23 15:36:02 -07001536#ifdef CONFIG_NET_POLL_CONTROLLER
1537 .ndo_poll_controller = veth_poll_controller,
1538#endif
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001539 .ndo_get_iflink = veth_get_iflink,
Toshiaki Makitadc224822018-08-03 16:58:11 +09001540 .ndo_fix_features = veth_fix_features,
Paolo Abenid3256ef2021-04-09 13:04:38 +02001541 .ndo_set_features = veth_set_features,
Toshiaki Makita1a04a822015-07-31 15:03:25 +09001542 .ndo_features_check = passthru_features_check,
Paolo Abeni163e5292016-02-26 10:45:41 +01001543 .ndo_set_rx_headroom = veth_set_rx_headroom,
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001544 .ndo_bpf = veth_xdp,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +01001545 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
Daniel Borkmann9aa12062020-10-11 01:40:02 +02001546 .ndo_get_peer_dev = veth_peer_dev,
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001547};
1548
Alexander Duyck732912d72016-04-19 14:02:26 -04001549#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
Xin Longc80fafb2016-08-25 13:21:49 +08001550 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
Alexander Duyck732912d72016-04-19 14:02:26 -04001551 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +00001552 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1553 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
Eric Dumazet80933152012-12-29 16:26:10 +00001554
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001555static void veth_setup(struct net_device *dev)
1556{
1557 ether_setup(dev);
1558
Neil Horman550fd082011-07-26 06:05:38 +00001559 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
Hannes Frederic Sowa23ea5a92012-10-30 16:22:01 +00001560 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Phil Sutter02f01ec2015-08-18 10:30:29 +02001561 dev->priv_flags |= IFF_NO_QUEUE;
Paolo Abeni163e5292016-02-26 10:45:41 +01001562 dev->priv_flags |= IFF_PHONY_HEADROOM;
Neil Horman550fd082011-07-26 06:05:38 +00001563
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001564 dev->netdev_ops = &veth_netdev_ops;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001565 dev->ethtool_ops = &veth_ethtool_ops;
1566 dev->features |= NETIF_F_LLTX;
Eric Dumazet80933152012-12-29 16:26:10 +00001567 dev->features |= VETH_FEATURES;
Toshiaki Makita8d0d21f2014-02-18 21:20:08 +09001568 dev->vlan_features = dev->features &
Vlad Yasevich3f8c7072014-03-27 22:14:48 -04001569 ~(NETIF_F_HW_VLAN_CTAG_TX |
1570 NETIF_F_HW_VLAN_STAG_TX |
1571 NETIF_F_HW_VLAN_CTAG_RX |
1572 NETIF_F_HW_VLAN_STAG_RX);
David S. Millercf124db2017-05-08 12:52:56 -04001573 dev->needs_free_netdev = true;
1574 dev->priv_destructor = veth_dev_free;
Jarod Wilson91572082016-10-20 13:55:20 -04001575 dev->max_mtu = ETH_MAX_MTU;
Michał Mirosława2c725f2011-03-31 01:01:35 +00001576
Eric Dumazet80933152012-12-29 16:26:10 +00001577 dev->hw_features = VETH_FEATURES;
Eric Dumazet82d81892013-10-25 18:25:03 -07001578 dev->hw_enc_features = VETH_FEATURES;
David Ahern607fca92016-08-24 20:10:45 -07001579 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001580}
1581
1582/*
1583 * netlink interface
1584 */
1585
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001586static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1587 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001588{
1589 if (tb[IFLA_ADDRESS]) {
1590 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1591 return -EINVAL;
1592 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1593 return -EADDRNOTAVAIL;
1594 }
Eric Biederman38d40812009-03-03 23:36:04 -08001595 if (tb[IFLA_MTU]) {
1596 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1597 return -EINVAL;
1598 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001599 return 0;
1600}
1601
1602static struct rtnl_link_ops veth_link_ops;
1603
Paolo Abenid3256ef2021-04-09 13:04:38 +02001604static void veth_disable_gro(struct net_device *dev)
1605{
1606 dev->features &= ~NETIF_F_GRO;
1607 dev->wanted_features &= ~NETIF_F_GRO;
1608 netdev_update_features(dev);
1609}
1610
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001611static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
1612{
1613 int err;
1614
1615 if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
1616 err = netif_set_real_num_tx_queues(dev, 1);
1617 if (err)
1618 return err;
1619 }
1620 if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
1621 err = netif_set_real_num_rx_queues(dev, 1);
1622 if (err)
1623 return err;
1624 }
1625 return 0;
1626}
1627
Eric W. Biederman81adee42009-11-08 00:53:51 -08001628static int veth_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +02001629 struct nlattr *tb[], struct nlattr *data[],
1630 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001631{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001632 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001633 struct net_device *peer;
1634 struct veth_priv *priv;
1635 char ifname[IFNAMSIZ];
1636 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
Tom Gundersen55177502014-07-14 16:37:25 +02001637 unsigned char name_assign_type;
Patrick McHardy3729d502010-02-26 06:34:54 +00001638 struct ifinfomsg *ifmp;
Eric W. Biederman81adee42009-11-08 00:53:51 -08001639 struct net *net;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001640
1641 /*
1642 * create and register peer first
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001643 */
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001644 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1645 struct nlattr *nla_peer;
1646
1647 nla_peer = data[VETH_INFO_PEER];
Patrick McHardy3729d502010-02-26 06:34:54 +00001648 ifmp = nla_data(nla_peer);
Jiri Pirkof7b12602014-02-18 20:53:18 +01001649 err = rtnl_nla_parse_ifla(peer_tb,
1650 nla_data(nla_peer) + sizeof(struct ifinfomsg),
Johannes Bergfceb6432017-04-12 14:34:07 +02001651 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1652 NULL);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001653 if (err < 0)
1654 return err;
1655
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001656 err = veth_validate(peer_tb, NULL, extack);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001657 if (err < 0)
1658 return err;
1659
1660 tbp = peer_tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001661 } else {
1662 ifmp = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001663 tbp = tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001664 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001665
Serhey Popovych191cdb32017-06-21 12:12:24 +03001666 if (ifmp && tbp[IFLA_IFNAME]) {
Francis Laniel872f6902020-11-15 18:08:06 +01001667 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
Tom Gundersen55177502014-07-14 16:37:25 +02001668 name_assign_type = NET_NAME_USER;
1669 } else {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001670 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
Tom Gundersen55177502014-07-14 16:37:25 +02001671 name_assign_type = NET_NAME_ENUM;
1672 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001673
Eric W. Biederman81adee42009-11-08 00:53:51 -08001674 net = rtnl_link_get_net(src_net, tbp);
1675 if (IS_ERR(net))
1676 return PTR_ERR(net);
1677
Tom Gundersen55177502014-07-14 16:37:25 +02001678 peer = rtnl_create_link(net, ifname, name_assign_type,
David Ahernd0522f12018-11-06 12:51:14 -08001679 &veth_link_ops, tbp, extack);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001680 if (IS_ERR(peer)) {
1681 put_net(net);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001682 return PTR_ERR(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001683 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001684
Serhey Popovych191cdb32017-06-21 12:12:24 +03001685 if (!ifmp || !tbp[IFLA_ADDRESS])
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001686 eth_hw_addr_random(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001687
Pavel Emelyanove6f8f1a2012-08-08 21:53:03 +00001688 if (ifmp && (dev->ifindex != 0))
1689 peer->ifindex = ifmp->ifi_index;
1690
Eric Dumazet4b66d212021-11-19 07:43:31 -08001691 netif_set_gso_max_size(peer, dev->gso_max_size);
Eric Dumazet6d872df2021-11-19 07:43:32 -08001692 netif_set_gso_max_segs(peer, dev->gso_max_segs);
Stephen Hemminger72d249552017-12-07 15:40:20 -08001693
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001694 err = register_netdevice(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001695 put_net(net);
1696 net = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001697 if (err < 0)
1698 goto err_register_peer;
1699
Paolo Abenid3256ef2021-04-09 13:04:38 +02001700 /* keep GRO disabled by default to be consistent with the established
1701 * veth behavior
1702 */
1703 veth_disable_gro(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001704 netif_carrier_off(peer);
1705
Patrick McHardy3729d502010-02-26 06:34:54 +00001706 err = rtnl_configure_link(peer, ifmp);
1707 if (err < 0)
1708 goto err_configure_peer;
1709
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001710 /*
1711 * register dev last
1712 *
1713 * note, that since we've registered new device the dev's name
1714 * should be re-allocated
1715 */
1716
1717 if (tb[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001718 eth_hw_addr_random(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001719
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001720 if (tb[IFLA_IFNAME])
Francis Laniel872f6902020-11-15 18:08:06 +01001721 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001722 else
1723 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1724
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001725 err = register_netdevice(dev);
1726 if (err < 0)
1727 goto err_register_dev;
1728
1729 netif_carrier_off(dev);
1730
1731 /*
1732 * tie the deviced together
1733 */
1734
1735 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001736 rcu_assign_pointer(priv->peer, peer);
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001737 err = veth_init_queues(dev, tb);
1738 if (err)
1739 goto err_queues;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001740
1741 priv = netdev_priv(peer);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001742 rcu_assign_pointer(priv->peer, dev);
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001743 err = veth_init_queues(peer, tb);
1744 if (err)
1745 goto err_queues;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001746
Paolo Abenid3256ef2021-04-09 13:04:38 +02001747 veth_disable_gro(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001748 return 0;
1749
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001750err_queues:
1751 unregister_netdevice(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001752err_register_dev:
1753 /* nothing to do */
Patrick McHardy3729d502010-02-26 06:34:54 +00001754err_configure_peer:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001755 unregister_netdevice(peer);
1756 return err;
1757
1758err_register_peer:
1759 free_netdev(peer);
1760 return err;
1761}
1762
Eric Dumazet23289a32009-10-27 07:06:36 +00001763static void veth_dellink(struct net_device *dev, struct list_head *head)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001764{
1765 struct veth_priv *priv;
1766 struct net_device *peer;
1767
1768 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001769 peer = rtnl_dereference(priv->peer);
1770
1771 /* Note : dellink() is called from default_device_exit_batch(),
1772 * before a rcu_synchronize() point. The devices are guaranteed
1773 * not being freed before one RCU grace period.
1774 */
1775 RCU_INIT_POINTER(priv->peer, NULL);
Eric Dumazet24540532009-10-30 01:00:27 -07001776 unregister_netdevice_queue(dev, head);
Eric Dumazetf45a5c22013-02-08 20:10:49 +00001777
1778 if (peer) {
1779 priv = netdev_priv(peer);
1780 RCU_INIT_POINTER(priv->peer, NULL);
1781 unregister_netdevice_queue(peer, head);
1782 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001783}
1784
Thomas Graf23711432012-02-15 04:09:46 +00001785static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1786 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1787};
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001788
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001789static struct net *veth_get_link_net(const struct net_device *dev)
1790{
1791 struct veth_priv *priv = netdev_priv(dev);
1792 struct net_device *peer = rtnl_dereference(priv->peer);
1793
1794 return peer ? dev_net(peer) : dev_net(dev);
1795}
1796
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001797static unsigned int veth_get_num_queues(void)
1798{
1799 /* enforce the same queue limit as rtnl_create_link */
1800 int queues = num_possible_cpus();
1801
1802 if (queues > 4096)
1803 queues = 4096;
1804 return queues;
1805}
1806
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001807static struct rtnl_link_ops veth_link_ops = {
1808 .kind = DRV_NAME,
1809 .priv_size = sizeof(struct veth_priv),
1810 .setup = veth_setup,
1811 .validate = veth_validate,
1812 .newlink = veth_newlink,
1813 .dellink = veth_dellink,
1814 .policy = veth_policy,
1815 .maxtype = VETH_INFO_MAX,
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001816 .get_link_net = veth_get_link_net,
Paolo Abeni9d3684c2021-07-20 10:41:51 +02001817 .get_num_tx_queues = veth_get_num_queues,
1818 .get_num_rx_queues = veth_get_num_queues,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001819};
1820
1821/*
1822 * init/fini
1823 */
1824
1825static __init int veth_init(void)
1826{
1827 return rtnl_link_register(&veth_link_ops);
1828}
1829
1830static __exit void veth_exit(void)
1831{
Patrick McHardy68365452008-01-20 17:25:14 -08001832 rtnl_link_unregister(&veth_link_ops);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001833}
1834
1835module_init(veth_init);
1836module_exit(veth_exit);
1837
1838MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1839MODULE_LICENSE("GPL v2");
1840MODULE_ALIAS_RTNL_LINK(DRV_NAME);