blob: 6e03b619c93c4baf6b5c7a26d1e25e6b319e446b [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07002/*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070012#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070014#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
Eric Dumazetcf05c702011-06-19 22:48:34 -070016#include <linux/u64_stats_sync.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070017
Jiri Pirkof7b12602014-02-18 20:53:18 +010018#include <net/rtnetlink.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070019#include <net/dst.h>
20#include <net/xfrm.h>
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +090021#include <net/xdp.h>
Stephen Hemmingerecef9692007-12-25 17:23:59 -080022#include <linux/veth.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040023#include <linux/module.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090024#include <linux/bpf.h>
25#include <linux/filter.h>
26#include <linux/ptr_ring.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090027#include <linux/bpf_trace.h>
Michael Walleaa4e6892018-08-29 17:24:11 +020028#include <linux/net_tstamp.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070029
30#define DRV_NAME "veth"
31#define DRV_VERSION "1.0"
32
Toshiaki Makita9fc8d512018-08-03 16:58:13 +090033#define VETH_XDP_FLAG BIT(0)
Toshiaki Makita948d4f22018-08-03 16:58:10 +090034#define VETH_RING_SIZE 256
35#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
Toshiaki Makita9cda7802019-06-13 18:39:59 +090037#define VETH_XDP_TX_BULK_SIZE 16
38
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010039struct veth_stats {
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010040 u64 rx_drops;
41 /* xdp */
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010042 u64 xdp_packets;
43 u64 xdp_bytes;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010044 u64 xdp_redirect;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010045 u64 xdp_drops;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010046 u64 xdp_tx;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +010047 u64 xdp_tx_err;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010048 u64 peer_tq_xdp_xmit;
49 u64 peer_tq_xdp_xmit_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010050};
51
Toshiaki Makita4195e542018-10-11 18:36:49 +090052struct veth_rq_stats {
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010053 struct veth_stats vs;
Toshiaki Makita4195e542018-10-11 18:36:49 +090054 struct u64_stats_sync syncp;
55};
56
Toshiaki Makita638264d2018-08-03 16:58:18 +090057struct veth_rq {
Toshiaki Makita948d4f22018-08-03 16:58:10 +090058 struct napi_struct xdp_napi;
59 struct net_device *dev;
60 struct bpf_prog __rcu *xdp_prog;
Toshiaki Makitad1396002018-08-03 16:58:17 +090061 struct xdp_mem_info xdp_mem;
Toshiaki Makita4195e542018-10-11 18:36:49 +090062 struct veth_rq_stats stats;
Toshiaki Makita948d4f22018-08-03 16:58:10 +090063 bool rx_notify_masked;
64 struct ptr_ring xdp_ring;
65 struct xdp_rxq_info xdp_rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070066};
67
Toshiaki Makita638264d2018-08-03 16:58:18 +090068struct veth_priv {
69 struct net_device __rcu *peer;
70 atomic64_t dropped;
71 struct bpf_prog *_xdp_prog;
72 struct veth_rq *rq;
73 unsigned int requested_headroom;
74};
75
Toshiaki Makita9cda7802019-06-13 18:39:59 +090076struct veth_xdp_tx_bq {
77 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
78 unsigned int count;
79};
80
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070081/*
82 * ethtool interface
83 */
84
Toshiaki Makitad397b962018-10-11 18:36:50 +090085struct veth_q_stat_desc {
86 char desc[ETH_GSTRING_LEN];
87 size_t offset;
88};
89
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010090#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
Toshiaki Makitad397b962018-10-11 18:36:50 +090091
92static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
93 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
94 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010095 { "drops", VETH_RQ_STAT(rx_drops) },
96 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
97 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
98 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
99 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
Toshiaki Makitad397b962018-10-11 18:36:50 +0900100};
101
102#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
103
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100104static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
105 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
106 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
107};
108
109#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
110
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700111static struct {
112 const char string[ETH_GSTRING_LEN];
113} ethtool_stats_keys[] = {
114 { "peer_ifindex" },
115};
116
Philippe Reynes56607b92017-03-29 08:24:21 +0200117static int veth_get_link_ksettings(struct net_device *dev,
118 struct ethtool_link_ksettings *cmd)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700119{
Philippe Reynes56607b92017-03-29 08:24:21 +0200120 cmd->base.speed = SPEED_10000;
121 cmd->base.duplex = DUPLEX_FULL;
122 cmd->base.port = PORT_TP;
123 cmd->base.autoneg = AUTONEG_DISABLE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700124 return 0;
125}
126
127static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
128{
Rick Jones33a5ba12011-11-15 14:59:53 +0000129 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
130 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700131}
132
133static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
134{
Toshiaki Makitad397b962018-10-11 18:36:50 +0900135 char *p = (char *)buf;
136 int i, j;
137
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700138 switch(stringset) {
139 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900140 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
141 p += sizeof(ethtool_stats_keys);
142 for (i = 0; i < dev->real_num_rx_queues; i++) {
143 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
Florian Fainelliabdf47a2019-02-21 20:09:29 -0800144 snprintf(p, ETH_GSTRING_LEN,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100145 "rx_queue_%u_%.18s",
Toshiaki Makitad397b962018-10-11 18:36:50 +0900146 i, veth_rq_stats_desc[j].desc);
147 p += ETH_GSTRING_LEN;
148 }
149 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100150 for (i = 0; i < dev->real_num_tx_queues; i++) {
151 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
152 snprintf(p, ETH_GSTRING_LEN,
153 "tx_queue_%u_%.18s",
154 i, veth_tq_stats_desc[j].desc);
155 p += ETH_GSTRING_LEN;
156 }
157 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700158 break;
159 }
160}
161
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700162static int veth_get_sset_count(struct net_device *dev, int sset)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700163{
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700164 switch (sset) {
165 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900166 return ARRAY_SIZE(ethtool_stats_keys) +
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100167 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
168 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700169 default:
170 return -EOPNOTSUPP;
171 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700172}
173
174static void veth_get_ethtool_stats(struct net_device *dev,
175 struct ethtool_stats *stats, u64 *data)
176{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100177 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000178 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makitad397b962018-10-11 18:36:50 +0900179 int i, j, idx;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700180
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000181 data[0] = peer ? peer->ifindex : 0;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900182 idx = 1;
183 for (i = 0; i < dev->real_num_rx_queues; i++) {
184 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100185 const void *stats_base = (void *)&rq_stats->vs;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900186 unsigned int start;
187 size_t offset;
188
189 do {
190 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
191 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
192 offset = veth_rq_stats_desc[j].offset;
193 data[idx + j] = *(u64 *)(stats_base + offset);
194 }
195 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
196 idx += VETH_RQ_STATS_LEN;
197 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100198
199 if (!peer)
200 return;
201
202 rcv_priv = netdev_priv(peer);
203 for (i = 0; i < peer->real_num_rx_queues; i++) {
204 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
205 const void *base = (void *)&rq_stats->vs;
206 unsigned int start, tx_idx = idx;
207 size_t offset;
208
209 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
210 do {
211 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
212 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
213 offset = veth_tq_stats_desc[j].offset;
214 data[tx_idx + j] += *(u64 *)(base + offset);
215 }
216 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
217 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700218}
219
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700220static const struct ethtool_ops veth_ethtool_ops = {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700221 .get_drvinfo = veth_get_drvinfo,
222 .get_link = ethtool_op_get_link,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700223 .get_strings = veth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700224 .get_sset_count = veth_get_sset_count,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700225 .get_ethtool_stats = veth_get_ethtool_stats,
Philippe Reynes56607b92017-03-29 08:24:21 +0200226 .get_link_ksettings = veth_get_link_ksettings,
Julian Wiedmann056b21f2019-04-12 13:06:15 +0200227 .get_ts_info = ethtool_op_get_ts_info,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700228};
229
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900230/* general routines */
231
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900232static bool veth_is_xdp_frame(void *ptr)
233{
234 return (unsigned long)ptr & VETH_XDP_FLAG;
235}
236
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700237static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900238{
239 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
240}
241
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700242static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900243{
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700244 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900245}
246
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900247static void veth_ptr_free(void *ptr)
248{
249 if (veth_is_xdp_frame(ptr))
250 xdp_return_frame(veth_ptr_to_xdp(ptr));
251 else
252 kfree_skb(ptr);
253}
254
Toshiaki Makita638264d2018-08-03 16:58:18 +0900255static void __veth_xdp_flush(struct veth_rq *rq)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900256{
257 /* Write ptr_ring before reading rx_notify_masked */
258 smp_mb();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900259 if (!rq->rx_notify_masked) {
260 rq->rx_notify_masked = true;
261 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900262 }
263}
264
Toshiaki Makita638264d2018-08-03 16:58:18 +0900265static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900266{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900267 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900268 dev_kfree_skb_any(skb);
269 return NET_RX_DROP;
270 }
271
272 return NET_RX_SUCCESS;
273}
274
Toshiaki Makita638264d2018-08-03 16:58:18 +0900275static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
276 struct veth_rq *rq, bool xdp)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700277{
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900278 return __dev_forward_skb(dev, skb) ?: xdp ?
Toshiaki Makita638264d2018-08-03 16:58:18 +0900279 veth_xdp_rx(rq, skb) :
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900280 netif_rx(skb);
281}
282
283static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
284{
285 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900286 struct veth_rq *rq = NULL;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000287 struct net_device *rcv;
Eric Dumazet26811282012-12-29 16:02:43 +0000288 int length = skb->len;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900289 bool rcv_xdp = false;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900290 int rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700291
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000292 rcu_read_lock();
293 rcv = rcu_dereference(priv->peer);
294 if (unlikely(!rcv)) {
295 kfree_skb(skb);
296 goto drop;
297 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700298
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900299 rcv_priv = netdev_priv(rcv);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900300 rxq = skb_get_queue_mapping(skb);
301 if (rxq < rcv->real_num_rx_queues) {
302 rq = &rcv_priv->rq[rxq];
303 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
304 if (rcv_xdp)
305 skb_record_rx_queue(skb, rxq);
306 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900307
Michael Walleaa4e6892018-08-29 17:24:11 +0200308 skb_tx_timestamp(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900309 if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
Eric Dumazetb4fba472019-11-07 16:27:17 -0800310 if (!rcv_xdp)
311 dev_lstats_add(dev, length);
Eric Dumazet26811282012-12-29 16:02:43 +0000312 } else {
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000313drop:
Eric Dumazet26811282012-12-29 16:02:43 +0000314 atomic64_inc(&priv->dropped);
315 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900316
317 if (rcv_xdp)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900318 __veth_xdp_flush(rq);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900319
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000320 rcu_read_unlock();
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900321
Patrick McHardy6ed10652009-06-23 06:03:08 +0000322 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700323}
324
Eric Dumazetb4fba472019-11-07 16:27:17 -0800325static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700326{
Eric Dumazetcf05c702011-06-19 22:48:34 -0700327 struct veth_priv *priv = netdev_priv(dev);
David S. Miller11687a12009-06-25 02:45:42 -0700328
Eric Dumazetb4fba472019-11-07 16:27:17 -0800329 dev_lstats_read(dev, packets, bytes);
Eric Dumazet26811282012-12-29 16:02:43 +0000330 return atomic64_read(&priv->dropped);
331}
332
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100333static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
Toshiaki Makita4195e542018-10-11 18:36:49 +0900334{
335 struct veth_priv *priv = netdev_priv(dev);
336 int i;
337
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100338 result->peer_tq_xdp_xmit_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900339 result->xdp_packets = 0;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100340 result->xdp_tx_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900341 result->xdp_bytes = 0;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100342 result->rx_drops = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900343 for (i = 0; i < dev->num_rx_queues; i++) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100344 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900345 struct veth_rq_stats *stats = &priv->rq[i].stats;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900346 unsigned int start;
347
348 do {
349 start = u64_stats_fetch_begin_irq(&stats->syncp);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100350 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100351 xdp_tx_err = stats->vs.xdp_tx_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100352 packets = stats->vs.xdp_packets;
353 bytes = stats->vs.xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100354 drops = stats->vs.rx_drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900355 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100356 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100357 result->xdp_tx_err += xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900358 result->xdp_packets += packets;
359 result->xdp_bytes += bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100360 result->rx_drops += drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900361 }
362}
363
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800364static void veth_get_stats64(struct net_device *dev,
365 struct rtnl_link_stats64 *tot)
Eric Dumazet26811282012-12-29 16:02:43 +0000366{
367 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000368 struct net_device *peer;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100369 struct veth_stats rx;
Eric Dumazetb4fba472019-11-07 16:27:17 -0800370 u64 packets, bytes;
Eric Dumazet26811282012-12-29 16:02:43 +0000371
Eric Dumazetb4fba472019-11-07 16:27:17 -0800372 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
373 tot->tx_bytes = bytes;
374 tot->tx_packets = packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900375
376 veth_stats_rx(&rx, dev);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100377 tot->tx_dropped += rx.xdp_tx_err;
378 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900379 tot->rx_bytes = rx.xdp_bytes;
380 tot->rx_packets = rx.xdp_packets;
Eric Dumazet26811282012-12-29 16:02:43 +0000381
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000382 rcu_read_lock();
383 peer = rcu_dereference(priv->peer);
384 if (peer) {
Jiang Lidonge25d5db2020-03-04 09:49:29 +0800385 veth_stats_tx(peer, &packets, &bytes);
Eric Dumazetb4fba472019-11-07 16:27:17 -0800386 tot->rx_bytes += bytes;
387 tot->rx_packets += packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900388
389 veth_stats_rx(&rx, peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100390 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
391 tot->rx_dropped += rx.xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900392 tot->tx_bytes += rx.xdp_bytes;
393 tot->tx_packets += rx.xdp_packets;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000394 }
395 rcu_read_unlock();
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700396}
397
Gao feng5c70ef82013-10-04 16:52:24 +0800398/* fake multicast ability */
399static void veth_set_multicast_list(struct net_device *dev)
400{
401}
402
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900403static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
404 int buflen)
405{
406 struct sk_buff *skb;
407
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900408 skb = build_skb(head, buflen);
409 if (!skb)
410 return NULL;
411
412 skb_reserve(skb, headroom);
413 skb_put(skb, len);
414
415 return skb;
416}
417
Toshiaki Makita638264d2018-08-03 16:58:18 +0900418static int veth_select_rxq(struct net_device *dev)
419{
420 return smp_processor_id() % dev->real_num_rx_queues;
421}
422
Daniel Borkmann9aa12062020-10-11 01:40:02 +0200423static struct net_device *veth_peer_dev(struct net_device *dev)
424{
425 struct veth_priv *priv = netdev_priv(dev);
426
427 /* Callers must be under RCU read side. */
428 return rcu_dereference(priv->peer);
429}
430
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900431static int veth_xdp_xmit(struct net_device *dev, int n,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100432 struct xdp_frame **frames,
433 u32 flags, bool ndo_xmit)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900434{
435 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100436 int i, ret = -ENXIO, drops = 0;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900437 struct net_device *rcv;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100438 unsigned int max_len;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900439 struct veth_rq *rq;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900440
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100441 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100442 return -EINVAL;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900443
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100444 rcu_read_lock();
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900445 rcv = rcu_dereference(priv->peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100446 if (unlikely(!rcv))
447 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900448
449 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100450 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900451 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
452 * side. This means an XDP program is loaded on the peer and the peer
453 * device is up.
454 */
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100455 if (!rcu_access_pointer(rq->xdp_prog))
456 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900457
458 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
459
Toshiaki Makita638264d2018-08-03 16:58:18 +0900460 spin_lock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900461 for (i = 0; i < n; i++) {
462 struct xdp_frame *frame = frames[i];
463 void *ptr = veth_xdp_to_ptr(frame);
464
465 if (unlikely(frame->len > max_len ||
Toshiaki Makita638264d2018-08-03 16:58:18 +0900466 __ptr_ring_produce(&rq->xdp_ring, ptr))) {
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900467 xdp_return_frame_rx_napi(frame);
468 drops++;
469 }
470 }
Toshiaki Makita638264d2018-08-03 16:58:18 +0900471 spin_unlock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900472
473 if (flags & XDP_XMIT_FLUSH)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900474 __veth_xdp_flush(rq);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900475
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100476 ret = n - drops;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100477 if (ndo_xmit) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100478 u64_stats_update_begin(&rq->stats.syncp);
479 rq->stats.vs.peer_tq_xdp_xmit += n - drops;
480 rq->stats.vs.peer_tq_xdp_xmit_err += drops;
481 u64_stats_update_end(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100482 }
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100483
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100484out:
John Fastabendb23bfa52020-01-26 16:14:02 -0800485 rcu_read_unlock();
Toshiaki Makita21314792018-10-11 18:36:48 +0900486
487 return ret;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900488}
489
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100490static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
491 struct xdp_frame **frames, u32 flags)
492{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100493 int err;
494
495 err = veth_xdp_xmit(dev, n, frames, flags, true);
496 if (err < 0) {
497 struct veth_priv *priv = netdev_priv(dev);
498
499 atomic64_add(n, &priv->dropped);
500 }
501
502 return err;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100503}
504
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100505static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900506{
507 int sent, i, err = 0;
508
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100509 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900510 if (sent < 0) {
511 err = sent;
512 sent = 0;
513 for (i = 0; i < bq->count; i++)
514 xdp_return_frame(bq->q[i]);
515 }
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100516 trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900517
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100518 u64_stats_update_begin(&rq->stats.syncp);
519 rq->stats.vs.xdp_tx += sent;
520 rq->stats.vs.xdp_tx_err += bq->count - sent;
521 u64_stats_update_end(&rq->stats.syncp);
522
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900523 bq->count = 0;
524}
525
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100526static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900527{
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100528 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900529 struct net_device *rcv;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100530 struct veth_rq *rcv_rq;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900531
532 rcu_read_lock();
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100533 veth_xdp_flush_bq(rq, bq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900534 rcv = rcu_dereference(priv->peer);
535 if (unlikely(!rcv))
536 goto out;
537
538 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100539 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toshiaki Makitad1396002018-08-03 16:58:17 +0900540 /* xdp_ring is initialized on receive side? */
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100541 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
Toshiaki Makitad1396002018-08-03 16:58:17 +0900542 goto out;
543
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100544 __veth_xdp_flush(rcv_rq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900545out:
546 rcu_read_unlock();
547}
548
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100549static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900550 struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900551{
Lorenzo Bianconi1b698fa2020-05-28 22:47:29 +0200552 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900553
554 if (unlikely(!frame))
555 return -EOVERFLOW;
556
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900557 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100558 veth_xdp_flush_bq(rq, bq);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900559
560 bq->q[bq->count++] = frame;
561
562 return 0;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900563}
564
Toshiaki Makita638264d2018-08-03 16:58:18 +0900565static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
Toshiaki Makitad1396002018-08-03 16:58:17 +0900566 struct xdp_frame *frame,
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100567 struct veth_xdp_tx_bq *bq,
568 struct veth_stats *stats)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900569{
Toshiaki Makitad1396002018-08-03 16:58:17 +0900570 struct xdp_frame orig_frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900571 struct bpf_prog *xdp_prog;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900572 struct sk_buff *skb;
573
574 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900575 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900576 if (likely(xdp_prog)) {
577 struct xdp_buff xdp;
578 u32 act;
579
Lorenzo Bianconifc379872020-05-28 22:47:28 +0200580 xdp_convert_frame_to_buff(frame, &xdp);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900581 xdp.rxq = &rq->xdp_rxq;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900582
583 act = bpf_prog_run_xdp(xdp_prog, &xdp);
584
585 switch (act) {
586 case XDP_PASS:
Lorenzo Bianconi89f479f2021-01-12 19:26:13 +0100587 if (xdp_update_frame_from_buff(&xdp, frame))
588 goto err_xdp;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900589 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900590 case XDP_TX:
591 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900592 xdp.rxq->mem = frame->mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100593 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900594 trace_xdp_exception(rq->dev, xdp_prog, act);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900595 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100596 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900597 goto err_xdp;
598 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100599 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900600 rcu_read_unlock();
601 goto xdp_xmit;
602 case XDP_REDIRECT:
603 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900604 xdp.rxq->mem = frame->mem;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900605 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
Toshiaki Makitad1396002018-08-03 16:58:17 +0900606 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100607 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900608 goto err_xdp;
609 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100610 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900611 rcu_read_unlock();
612 goto xdp_xmit;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900613 default:
614 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500615 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900616 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900617 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500618 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900619 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100620 stats->xdp_drops++;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900621 goto err_xdp;
622 }
623 }
624 rcu_read_unlock();
625
Lorenzo Bianconi89f479f2021-01-12 19:26:13 +0100626 skb = xdp_build_skb_from_frame(frame, rq->dev);
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900627 if (!skb) {
628 xdp_return_frame(frame);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100629 stats->rx_drops++;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900630 }
631
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900632 return skb;
633err_xdp:
634 rcu_read_unlock();
635 xdp_return_frame(frame);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900636xdp_xmit:
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900637 return NULL;
638}
639
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100640static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
641 struct sk_buff *skb,
642 struct veth_xdp_tx_bq *bq,
643 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900644{
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100645 u32 pktlen, headroom, act, metalen, frame_sz;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900646 void *orig_data, *orig_data_end;
647 struct bpf_prog *xdp_prog;
648 int mac_len, delta, off;
649 struct xdp_buff xdp;
650
Toshiaki Makita4bf9ffa2018-09-14 13:33:44 +0900651 skb_orphan(skb);
652
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900653 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900654 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900655 if (unlikely(!xdp_prog)) {
656 rcu_read_unlock();
657 goto out;
658 }
659
660 mac_len = skb->data - skb_mac_header(skb);
661 pktlen = skb->len + mac_len;
662 headroom = skb_headroom(skb) - mac_len;
663
664 if (skb_shared(skb) || skb_head_is_locked(skb) ||
665 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
666 struct sk_buff *nskb;
667 int size, head_off;
668 void *head, *start;
669 struct page *page;
670
671 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
672 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
673 if (size > PAGE_SIZE)
674 goto drop;
675
676 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
677 if (!page)
678 goto drop;
679
680 head = page_address(page);
681 start = head + VETH_XDP_HEADROOM;
682 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
683 page_frag_free(head);
684 goto drop;
685 }
686
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200687 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
688 skb->len, PAGE_SIZE);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900689 if (!nskb) {
690 page_frag_free(head);
691 goto drop;
692 }
693
694 skb_copy_header(nskb, skb);
695 head_off = skb_headroom(nskb) - skb_headroom(skb);
696 skb_headers_offset_update(nskb, head_off);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900697 consume_skb(skb);
698 skb = nskb;
699 }
700
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200701 /* SKB "head" area always have tailroom for skb_shared_info */
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100702 frame_sz = skb_end_pointer(skb) - skb->head;
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100703 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
704 xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100705 xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200706
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900707 orig_data = xdp.data;
708 orig_data_end = xdp.data_end;
709
710 act = bpf_prog_run_xdp(xdp_prog, &xdp);
711
712 switch (act) {
713 case XDP_PASS:
714 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900715 case XDP_TX:
716 get_page(virt_to_page(xdp.data));
717 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900718 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100719 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900720 trace_xdp_exception(rq->dev, xdp_prog, act);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100721 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900722 goto err_xdp;
723 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100724 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900725 rcu_read_unlock();
726 goto xdp_xmit;
727 case XDP_REDIRECT:
728 get_page(virt_to_page(xdp.data));
729 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900730 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100731 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
732 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900733 goto err_xdp;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100734 }
735 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900736 rcu_read_unlock();
737 goto xdp_xmit;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900738 default:
739 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500740 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900741 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900742 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500743 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900744 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100745 stats->xdp_drops++;
746 goto xdp_drop;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900747 }
748 rcu_read_unlock();
749
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200750 /* check if bpf_xdp_adjust_head was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900751 delta = orig_data - xdp.data;
752 off = mac_len + delta;
753 if (off > 0)
754 __skb_push(skb, off);
755 else if (off < 0)
756 __skb_pull(skb, -off);
757 skb->mac_header -= delta;
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200758
759 /* check if bpf_xdp_adjust_tail was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900760 off = xdp.data_end - orig_data_end;
761 if (off != 0)
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200762 __skb_put(skb, off); /* positive on grow, negative on shrink */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900763 skb->protocol = eth_type_trans(skb, rq->dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900764
765 metalen = xdp.data - xdp.data_meta;
766 if (metalen)
767 skb_metadata_set(skb, metalen);
768out:
769 return skb;
770drop:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100771 stats->rx_drops++;
772xdp_drop:
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900773 rcu_read_unlock();
774 kfree_skb(skb);
775 return NULL;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900776err_xdp:
777 rcu_read_unlock();
778 page_frag_free(xdp.data);
779xdp_xmit:
780 return NULL;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900781}
782
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100783static int veth_xdp_rcv(struct veth_rq *rq, int budget,
784 struct veth_xdp_tx_bq *bq,
785 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900786{
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100787 int i, done = 0;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900788
789 for (i = 0; i < budget; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900790 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900791 struct sk_buff *skb;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900792
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900793 if (!ptr)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900794 break;
795
Toshiaki Makitad1396002018-08-03 16:58:17 +0900796 if (veth_is_xdp_frame(ptr)) {
Toshiaki Makita4195e542018-10-11 18:36:49 +0900797 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
798
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100799 stats->xdp_bytes += frame->len;
800 skb = veth_xdp_rcv_one(rq, frame, bq, stats);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900801 } else {
Toshiaki Makita4195e542018-10-11 18:36:49 +0900802 skb = ptr;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100803 stats->xdp_bytes += skb->len;
804 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900805 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900806
807 if (skb)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900808 napi_gro_receive(&rq->xdp_napi, skb);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900809
810 done++;
811 }
812
Toshiaki Makita4195e542018-10-11 18:36:49 +0900813 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100814 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100815 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100816 rq->stats.vs.xdp_drops += stats->xdp_drops;
817 rq->stats.vs.rx_drops += stats->rx_drops;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100818 rq->stats.vs.xdp_packets += done;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900819 u64_stats_update_end(&rq->stats.syncp);
820
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900821 return done;
822}
823
824static int veth_poll(struct napi_struct *napi, int budget)
825{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900826 struct veth_rq *rq =
827 container_of(napi, struct veth_rq, xdp_napi);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100828 struct veth_stats stats = {};
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900829 struct veth_xdp_tx_bq bq;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900830 int done;
831
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900832 bq.count = 0;
833
Toshiaki Makitad1396002018-08-03 16:58:17 +0900834 xdp_set_return_frame_no_direct();
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100835 done = veth_xdp_rcv(rq, budget, &bq, &stats);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900836
837 if (done < budget && napi_complete_done(napi, done)) {
838 /* Write rx_notify_masked before reading ptr_ring */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900839 smp_store_mb(rq->rx_notify_masked, false);
840 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
841 rq->rx_notify_masked = true;
842 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900843 }
844 }
845
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100846 if (stats.xdp_tx > 0)
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100847 veth_xdp_flush(rq, &bq);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100848 if (stats.xdp_redirect > 0)
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100849 xdp_do_flush();
Toshiaki Makitad1396002018-08-03 16:58:17 +0900850 xdp_clear_return_frame_no_direct();
851
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900852 return done;
853}
854
855static int veth_napi_add(struct net_device *dev)
856{
857 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900858 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900859
Toshiaki Makita638264d2018-08-03 16:58:18 +0900860 for (i = 0; i < dev->real_num_rx_queues; i++) {
861 struct veth_rq *rq = &priv->rq[i];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900862
Toshiaki Makita638264d2018-08-03 16:58:18 +0900863 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
864 if (err)
865 goto err_xdp_ring;
866 }
867
868 for (i = 0; i < dev->real_num_rx_queues; i++) {
869 struct veth_rq *rq = &priv->rq[i];
870
Toshiaki Makita638264d2018-08-03 16:58:18 +0900871 napi_enable(&rq->xdp_napi);
872 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900873
874 return 0;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900875err_xdp_ring:
876 for (i--; i >= 0; i--)
877 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
878
879 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900880}
881
882static void veth_napi_del(struct net_device *dev)
883{
884 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900885 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900886
Toshiaki Makita638264d2018-08-03 16:58:18 +0900887 for (i = 0; i < dev->real_num_rx_queues; i++) {
888 struct veth_rq *rq = &priv->rq[i];
889
890 napi_disable(&rq->xdp_napi);
Jakub Kicinski5198d5452020-09-09 10:37:51 -0700891 __netif_napi_del(&rq->xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900892 }
893 synchronize_net();
894
895 for (i = 0; i < dev->real_num_rx_queues; i++) {
896 struct veth_rq *rq = &priv->rq[i];
897
Toshiaki Makita638264d2018-08-03 16:58:18 +0900898 rq->rx_notify_masked = false;
899 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
900 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900901}
902
903static int veth_enable_xdp(struct net_device *dev)
904{
905 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900906 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900907
Toshiaki Makita638264d2018-08-03 16:58:18 +0900908 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
909 for (i = 0; i < dev->real_num_rx_queues; i++) {
910 struct veth_rq *rq = &priv->rq[i];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900911
Björn Töpelb02e5a02020-11-30 19:52:01 +0100912 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
913 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900914 if (err < 0)
915 goto err_rxq_reg;
916
917 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
918 MEM_TYPE_PAGE_SHARED,
919 NULL);
920 if (err < 0)
921 goto err_reg_mem;
922
923 /* Save original mem info as it can be overwritten */
924 rq->xdp_mem = rq->xdp_rxq.mem;
925 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900926
927 err = veth_napi_add(dev);
928 if (err)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900929 goto err_rxq_reg;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900930 }
931
Toshiaki Makita638264d2018-08-03 16:58:18 +0900932 for (i = 0; i < dev->real_num_rx_queues; i++)
933 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900934
935 return 0;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900936err_reg_mem:
937 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
938err_rxq_reg:
Björn Töpelb02e5a02020-11-30 19:52:01 +0100939 for (i--; i >= 0; i--) {
940 struct veth_rq *rq = &priv->rq[i];
941
942 xdp_rxq_info_unreg(&rq->xdp_rxq);
943 netif_napi_del(&rq->xdp_napi);
944 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900945
946 return err;
947}
948
949static void veth_disable_xdp(struct net_device *dev)
950{
951 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900952 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900953
Toshiaki Makita638264d2018-08-03 16:58:18 +0900954 for (i = 0; i < dev->real_num_rx_queues; i++)
955 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900956 veth_napi_del(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900957 for (i = 0; i < dev->real_num_rx_queues; i++) {
958 struct veth_rq *rq = &priv->rq[i];
959
960 rq->xdp_rxq.mem = rq->xdp_mem;
961 xdp_rxq_info_unreg(&rq->xdp_rxq);
962 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900963}
964
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700965static int veth_open(struct net_device *dev)
966{
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000967 struct veth_priv *priv = netdev_priv(dev);
968 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900969 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700970
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000971 if (!peer)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700972 return -ENOTCONN;
973
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900974 if (priv->_xdp_prog) {
975 err = veth_enable_xdp(dev);
976 if (err)
977 return err;
978 }
979
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000980 if (peer->flags & IFF_UP) {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700981 netif_carrier_on(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000982 netif_carrier_on(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700983 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900984
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700985 return 0;
986}
987
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000988static int veth_close(struct net_device *dev)
989{
990 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +0000991 struct net_device *peer = rtnl_dereference(priv->peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000992
993 netif_carrier_off(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +0000994 if (peer)
995 netif_carrier_off(peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000996
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900997 if (priv->_xdp_prog)
998 veth_disable_xdp(dev);
999
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001000 return 0;
1001}
1002
Jarod Wilson91572082016-10-20 13:55:20 -04001003static int is_valid_veth_mtu(int mtu)
Eric Biederman38d40812009-03-03 23:36:04 -08001004{
Jarod Wilson91572082016-10-20 13:55:20 -04001005 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
Eric Biederman38d40812009-03-03 23:36:04 -08001006}
1007
Toshiaki Makita7797b932018-08-15 17:07:29 +09001008static int veth_alloc_queues(struct net_device *dev)
1009{
1010 struct veth_priv *priv = netdev_priv(dev);
1011 int i;
1012
1013 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1014 if (!priv->rq)
1015 return -ENOMEM;
1016
Toshiaki Makita4195e542018-10-11 18:36:49 +09001017 for (i = 0; i < dev->num_rx_queues; i++) {
Toshiaki Makita7797b932018-08-15 17:07:29 +09001018 priv->rq[i].dev = dev;
Toshiaki Makita4195e542018-10-11 18:36:49 +09001019 u64_stats_init(&priv->rq[i].stats.syncp);
1020 }
Toshiaki Makita7797b932018-08-15 17:07:29 +09001021
1022 return 0;
1023}
1024
1025static void veth_free_queues(struct net_device *dev)
1026{
1027 struct veth_priv *priv = netdev_priv(dev);
1028
1029 kfree(priv->rq);
1030}
1031
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001032static int veth_dev_init(struct net_device *dev)
1033{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001034 int err;
1035
Li RongQing14d73412018-09-17 18:46:55 +08001036 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1037 if (!dev->lstats)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001038 return -ENOMEM;
Toshiaki Makita7797b932018-08-15 17:07:29 +09001039
1040 err = veth_alloc_queues(dev);
1041 if (err) {
Li RongQing14d73412018-09-17 18:46:55 +08001042 free_percpu(dev->lstats);
Toshiaki Makita7797b932018-08-15 17:07:29 +09001043 return err;
1044 }
1045
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001046 return 0;
1047}
1048
David S. Miller11687a12009-06-25 02:45:42 -07001049static void veth_dev_free(struct net_device *dev)
1050{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001051 veth_free_queues(dev);
Li RongQing14d73412018-09-17 18:46:55 +08001052 free_percpu(dev->lstats);
David S. Miller11687a12009-06-25 02:45:42 -07001053}
1054
WANG Congbb446c12014-06-23 15:36:02 -07001055#ifdef CONFIG_NET_POLL_CONTROLLER
1056static void veth_poll_controller(struct net_device *dev)
1057{
1058 /* veth only receives frames when its peer sends one
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001059 * Since it has nothing to do with disabling irqs, we are guaranteed
WANG Congbb446c12014-06-23 15:36:02 -07001060 * never to have pending data when we poll for it so
1061 * there is nothing to do here.
1062 *
1063 * We need this though so netpoll recognizes us as an interface that
1064 * supports polling, which enables bridge devices in virt setups to
1065 * still use netconsole
1066 */
1067}
1068#endif /* CONFIG_NET_POLL_CONTROLLER */
1069
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001070static int veth_get_iflink(const struct net_device *dev)
1071{
1072 struct veth_priv *priv = netdev_priv(dev);
1073 struct net_device *peer;
1074 int iflink;
1075
1076 rcu_read_lock();
1077 peer = rcu_dereference(priv->peer);
1078 iflink = peer ? peer->ifindex : 0;
1079 rcu_read_unlock();
1080
1081 return iflink;
1082}
1083
Toshiaki Makitadc224822018-08-03 16:58:11 +09001084static netdev_features_t veth_fix_features(struct net_device *dev,
1085 netdev_features_t features)
1086{
1087 struct veth_priv *priv = netdev_priv(dev);
1088 struct net_device *peer;
1089
1090 peer = rtnl_dereference(priv->peer);
1091 if (peer) {
1092 struct veth_priv *peer_priv = netdev_priv(peer);
1093
1094 if (peer_priv->_xdp_prog)
1095 features &= ~NETIF_F_GSO_SOFTWARE;
1096 }
1097
1098 return features;
1099}
1100
Paolo Abeni163e5292016-02-26 10:45:41 +01001101static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1102{
1103 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1104 struct net_device *peer;
1105
1106 if (new_hr < 0)
1107 new_hr = 0;
1108
1109 rcu_read_lock();
1110 peer = rcu_dereference(priv->peer);
1111 if (unlikely(!peer))
1112 goto out;
1113
1114 peer_priv = netdev_priv(peer);
1115 priv->requested_headroom = new_hr;
1116 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1117 dev->needed_headroom = new_hr;
1118 peer->needed_headroom = new_hr;
1119
1120out:
1121 rcu_read_unlock();
1122}
1123
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001124static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1125 struct netlink_ext_ack *extack)
1126{
1127 struct veth_priv *priv = netdev_priv(dev);
1128 struct bpf_prog *old_prog;
1129 struct net_device *peer;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001130 unsigned int max_mtu;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001131 int err;
1132
1133 old_prog = priv->_xdp_prog;
1134 priv->_xdp_prog = prog;
1135 peer = rtnl_dereference(priv->peer);
1136
1137 if (prog) {
1138 if (!peer) {
1139 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1140 err = -ENOTCONN;
1141 goto err;
1142 }
1143
Toshiaki Makitadc224822018-08-03 16:58:11 +09001144 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1145 peer->hard_header_len -
1146 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1147 if (peer->mtu > max_mtu) {
1148 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1149 err = -ERANGE;
1150 goto err;
1151 }
1152
Toshiaki Makita638264d2018-08-03 16:58:18 +09001153 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1154 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1155 err = -ENOSPC;
1156 goto err;
1157 }
1158
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001159 if (dev->flags & IFF_UP) {
1160 err = veth_enable_xdp(dev);
1161 if (err) {
1162 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1163 goto err;
1164 }
1165 }
Toshiaki Makitadc224822018-08-03 16:58:11 +09001166
1167 if (!old_prog) {
1168 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1169 peer->max_mtu = max_mtu;
1170 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001171 }
1172
1173 if (old_prog) {
Toshiaki Makitadc224822018-08-03 16:58:11 +09001174 if (!prog) {
1175 if (dev->flags & IFF_UP)
1176 veth_disable_xdp(dev);
1177
1178 if (peer) {
1179 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1180 peer->max_mtu = ETH_MAX_MTU;
1181 }
1182 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001183 bpf_prog_put(old_prog);
1184 }
1185
Toshiaki Makitadc224822018-08-03 16:58:11 +09001186 if ((!!old_prog ^ !!prog) && peer)
1187 netdev_update_features(peer);
1188
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001189 return 0;
1190err:
1191 priv->_xdp_prog = old_prog;
1192
1193 return err;
1194}
1195
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001196static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1197{
1198 switch (xdp->command) {
1199 case XDP_SETUP_PROG:
1200 return veth_xdp_set(dev, xdp->prog, xdp->extack);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001201 default:
1202 return -EINVAL;
1203 }
1204}
1205
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001206static const struct net_device_ops veth_netdev_ops = {
Daniel Lezcanoee923622009-02-22 00:04:45 -08001207 .ndo_init = veth_dev_init,
1208 .ndo_open = veth_open,
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001209 .ndo_stop = veth_close,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001210 .ndo_start_xmit = veth_xmit,
stephen hemminger6311cc42011-06-08 14:53:59 +00001211 .ndo_get_stats64 = veth_get_stats64,
Gao feng5c70ef82013-10-04 16:52:24 +08001212 .ndo_set_rx_mode = veth_set_multicast_list,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001213 .ndo_set_mac_address = eth_mac_addr,
WANG Congbb446c12014-06-23 15:36:02 -07001214#ifdef CONFIG_NET_POLL_CONTROLLER
1215 .ndo_poll_controller = veth_poll_controller,
1216#endif
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001217 .ndo_get_iflink = veth_get_iflink,
Toshiaki Makitadc224822018-08-03 16:58:11 +09001218 .ndo_fix_features = veth_fix_features,
Toshiaki Makita1a04a822015-07-31 15:03:25 +09001219 .ndo_features_check = passthru_features_check,
Paolo Abeni163e5292016-02-26 10:45:41 +01001220 .ndo_set_rx_headroom = veth_set_rx_headroom,
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001221 .ndo_bpf = veth_xdp,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +01001222 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
Daniel Borkmann9aa12062020-10-11 01:40:02 +02001223 .ndo_get_peer_dev = veth_peer_dev,
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001224};
1225
Alexander Duyck732912d72016-04-19 14:02:26 -04001226#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
Xin Longc80fafb2016-08-25 13:21:49 +08001227 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
Alexander Duyck732912d72016-04-19 14:02:26 -04001228 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +00001229 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1230 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
Eric Dumazet80933152012-12-29 16:26:10 +00001231
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001232static void veth_setup(struct net_device *dev)
1233{
1234 ether_setup(dev);
1235
Neil Horman550fd082011-07-26 06:05:38 +00001236 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
Hannes Frederic Sowa23ea5a92012-10-30 16:22:01 +00001237 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Phil Sutter02f01ec2015-08-18 10:30:29 +02001238 dev->priv_flags |= IFF_NO_QUEUE;
Paolo Abeni163e5292016-02-26 10:45:41 +01001239 dev->priv_flags |= IFF_PHONY_HEADROOM;
Neil Horman550fd082011-07-26 06:05:38 +00001240
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001241 dev->netdev_ops = &veth_netdev_ops;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001242 dev->ethtool_ops = &veth_ethtool_ops;
1243 dev->features |= NETIF_F_LLTX;
Eric Dumazet80933152012-12-29 16:26:10 +00001244 dev->features |= VETH_FEATURES;
Toshiaki Makita8d0d21f2014-02-18 21:20:08 +09001245 dev->vlan_features = dev->features &
Vlad Yasevich3f8c7072014-03-27 22:14:48 -04001246 ~(NETIF_F_HW_VLAN_CTAG_TX |
1247 NETIF_F_HW_VLAN_STAG_TX |
1248 NETIF_F_HW_VLAN_CTAG_RX |
1249 NETIF_F_HW_VLAN_STAG_RX);
David S. Millercf124db2017-05-08 12:52:56 -04001250 dev->needs_free_netdev = true;
1251 dev->priv_destructor = veth_dev_free;
Jarod Wilson91572082016-10-20 13:55:20 -04001252 dev->max_mtu = ETH_MAX_MTU;
Michał Mirosława2c725f2011-03-31 01:01:35 +00001253
Eric Dumazet80933152012-12-29 16:26:10 +00001254 dev->hw_features = VETH_FEATURES;
Eric Dumazet82d81892013-10-25 18:25:03 -07001255 dev->hw_enc_features = VETH_FEATURES;
David Ahern607fca92016-08-24 20:10:45 -07001256 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001257}
1258
1259/*
1260 * netlink interface
1261 */
1262
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001263static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1264 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001265{
1266 if (tb[IFLA_ADDRESS]) {
1267 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1268 return -EINVAL;
1269 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1270 return -EADDRNOTAVAIL;
1271 }
Eric Biederman38d40812009-03-03 23:36:04 -08001272 if (tb[IFLA_MTU]) {
1273 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1274 return -EINVAL;
1275 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001276 return 0;
1277}
1278
1279static struct rtnl_link_ops veth_link_ops;
1280
Eric W. Biederman81adee42009-11-08 00:53:51 -08001281static int veth_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +02001282 struct nlattr *tb[], struct nlattr *data[],
1283 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001284{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001285 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001286 struct net_device *peer;
1287 struct veth_priv *priv;
1288 char ifname[IFNAMSIZ];
1289 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
Tom Gundersen55177502014-07-14 16:37:25 +02001290 unsigned char name_assign_type;
Patrick McHardy3729d502010-02-26 06:34:54 +00001291 struct ifinfomsg *ifmp;
Eric W. Biederman81adee42009-11-08 00:53:51 -08001292 struct net *net;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001293
1294 /*
1295 * create and register peer first
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001296 */
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001297 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1298 struct nlattr *nla_peer;
1299
1300 nla_peer = data[VETH_INFO_PEER];
Patrick McHardy3729d502010-02-26 06:34:54 +00001301 ifmp = nla_data(nla_peer);
Jiri Pirkof7b12602014-02-18 20:53:18 +01001302 err = rtnl_nla_parse_ifla(peer_tb,
1303 nla_data(nla_peer) + sizeof(struct ifinfomsg),
Johannes Bergfceb6432017-04-12 14:34:07 +02001304 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1305 NULL);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001306 if (err < 0)
1307 return err;
1308
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001309 err = veth_validate(peer_tb, NULL, extack);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001310 if (err < 0)
1311 return err;
1312
1313 tbp = peer_tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001314 } else {
1315 ifmp = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001316 tbp = tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001317 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001318
Serhey Popovych191cdb32017-06-21 12:12:24 +03001319 if (ifmp && tbp[IFLA_IFNAME]) {
Francis Laniel872f6902020-11-15 18:08:06 +01001320 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
Tom Gundersen55177502014-07-14 16:37:25 +02001321 name_assign_type = NET_NAME_USER;
1322 } else {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001323 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
Tom Gundersen55177502014-07-14 16:37:25 +02001324 name_assign_type = NET_NAME_ENUM;
1325 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001326
Eric W. Biederman81adee42009-11-08 00:53:51 -08001327 net = rtnl_link_get_net(src_net, tbp);
1328 if (IS_ERR(net))
1329 return PTR_ERR(net);
1330
Tom Gundersen55177502014-07-14 16:37:25 +02001331 peer = rtnl_create_link(net, ifname, name_assign_type,
David Ahernd0522f12018-11-06 12:51:14 -08001332 &veth_link_ops, tbp, extack);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001333 if (IS_ERR(peer)) {
1334 put_net(net);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001335 return PTR_ERR(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001336 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001337
Serhey Popovych191cdb32017-06-21 12:12:24 +03001338 if (!ifmp || !tbp[IFLA_ADDRESS])
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001339 eth_hw_addr_random(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001340
Pavel Emelyanove6f8f1a2012-08-08 21:53:03 +00001341 if (ifmp && (dev->ifindex != 0))
1342 peer->ifindex = ifmp->ifi_index;
1343
Stephen Hemminger72d249552017-12-07 15:40:20 -08001344 peer->gso_max_size = dev->gso_max_size;
1345 peer->gso_max_segs = dev->gso_max_segs;
1346
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001347 err = register_netdevice(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001348 put_net(net);
1349 net = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001350 if (err < 0)
1351 goto err_register_peer;
1352
1353 netif_carrier_off(peer);
1354
Patrick McHardy3729d502010-02-26 06:34:54 +00001355 err = rtnl_configure_link(peer, ifmp);
1356 if (err < 0)
1357 goto err_configure_peer;
1358
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001359 /*
1360 * register dev last
1361 *
1362 * note, that since we've registered new device the dev's name
1363 * should be re-allocated
1364 */
1365
1366 if (tb[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001367 eth_hw_addr_random(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001368
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001369 if (tb[IFLA_IFNAME])
Francis Laniel872f6902020-11-15 18:08:06 +01001370 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001371 else
1372 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1373
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001374 err = register_netdevice(dev);
1375 if (err < 0)
1376 goto err_register_dev;
1377
1378 netif_carrier_off(dev);
1379
1380 /*
1381 * tie the deviced together
1382 */
1383
1384 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001385 rcu_assign_pointer(priv->peer, peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001386
1387 priv = netdev_priv(peer);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001388 rcu_assign_pointer(priv->peer, dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001389
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001390 return 0;
1391
1392err_register_dev:
1393 /* nothing to do */
Patrick McHardy3729d502010-02-26 06:34:54 +00001394err_configure_peer:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001395 unregister_netdevice(peer);
1396 return err;
1397
1398err_register_peer:
1399 free_netdev(peer);
1400 return err;
1401}
1402
Eric Dumazet23289a32009-10-27 07:06:36 +00001403static void veth_dellink(struct net_device *dev, struct list_head *head)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001404{
1405 struct veth_priv *priv;
1406 struct net_device *peer;
1407
1408 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001409 peer = rtnl_dereference(priv->peer);
1410
1411 /* Note : dellink() is called from default_device_exit_batch(),
1412 * before a rcu_synchronize() point. The devices are guaranteed
1413 * not being freed before one RCU grace period.
1414 */
1415 RCU_INIT_POINTER(priv->peer, NULL);
Eric Dumazet24540532009-10-30 01:00:27 -07001416 unregister_netdevice_queue(dev, head);
Eric Dumazetf45a5c22013-02-08 20:10:49 +00001417
1418 if (peer) {
1419 priv = netdev_priv(peer);
1420 RCU_INIT_POINTER(priv->peer, NULL);
1421 unregister_netdevice_queue(peer, head);
1422 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001423}
1424
Thomas Graf23711432012-02-15 04:09:46 +00001425static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1426 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1427};
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001428
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001429static struct net *veth_get_link_net(const struct net_device *dev)
1430{
1431 struct veth_priv *priv = netdev_priv(dev);
1432 struct net_device *peer = rtnl_dereference(priv->peer);
1433
1434 return peer ? dev_net(peer) : dev_net(dev);
1435}
1436
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001437static struct rtnl_link_ops veth_link_ops = {
1438 .kind = DRV_NAME,
1439 .priv_size = sizeof(struct veth_priv),
1440 .setup = veth_setup,
1441 .validate = veth_validate,
1442 .newlink = veth_newlink,
1443 .dellink = veth_dellink,
1444 .policy = veth_policy,
1445 .maxtype = VETH_INFO_MAX,
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001446 .get_link_net = veth_get_link_net,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001447};
1448
1449/*
1450 * init/fini
1451 */
1452
1453static __init int veth_init(void)
1454{
1455 return rtnl_link_register(&veth_link_ops);
1456}
1457
1458static __exit void veth_exit(void)
1459{
Patrick McHardy68365452008-01-20 17:25:14 -08001460 rtnl_link_unregister(&veth_link_ops);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001461}
1462
1463module_init(veth_init);
1464module_exit(veth_exit);
1465
1466MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1467MODULE_LICENSE("GPL v2");
1468MODULE_ALIAS_RTNL_LINK(DRV_NAME);