blob: 05abe360603be9bdf9d290a652c35864d8e914f3 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07002/*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070012#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070014#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
Eric Dumazetcf05c702011-06-19 22:48:34 -070016#include <linux/u64_stats_sync.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070017
Jiri Pirkof7b12602014-02-18 20:53:18 +010018#include <net/rtnetlink.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070019#include <net/dst.h>
20#include <net/xfrm.h>
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +090021#include <net/xdp.h>
Stephen Hemmingerecef9692007-12-25 17:23:59 -080022#include <linux/veth.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040023#include <linux/module.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090024#include <linux/bpf.h>
25#include <linux/filter.h>
26#include <linux/ptr_ring.h>
Toshiaki Makita948d4f22018-08-03 16:58:10 +090027#include <linux/bpf_trace.h>
Michael Walleaa4e6892018-08-29 17:24:11 +020028#include <linux/net_tstamp.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070029
30#define DRV_NAME "veth"
31#define DRV_VERSION "1.0"
32
Toshiaki Makita9fc8d512018-08-03 16:58:13 +090033#define VETH_XDP_FLAG BIT(0)
Toshiaki Makita948d4f22018-08-03 16:58:10 +090034#define VETH_RING_SIZE 256
35#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
Toshiaki Makita9cda7802019-06-13 18:39:59 +090037#define VETH_XDP_TX_BULK_SIZE 16
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +010038#define VETH_XDP_BATCH 16
Toshiaki Makita9cda7802019-06-13 18:39:59 +090039
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010040struct veth_stats {
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010041 u64 rx_drops;
42 /* xdp */
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010043 u64 xdp_packets;
44 u64 xdp_bytes;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010045 u64 xdp_redirect;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010046 u64 xdp_drops;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +010047 u64 xdp_tx;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +010048 u64 xdp_tx_err;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010049 u64 peer_tq_xdp_xmit;
50 u64 peer_tq_xdp_xmit_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010051};
52
Toshiaki Makita4195e542018-10-11 18:36:49 +090053struct veth_rq_stats {
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010054 struct veth_stats vs;
Toshiaki Makita4195e542018-10-11 18:36:49 +090055 struct u64_stats_sync syncp;
56};
57
Toshiaki Makita638264d2018-08-03 16:58:18 +090058struct veth_rq {
Toshiaki Makita948d4f22018-08-03 16:58:10 +090059 struct napi_struct xdp_napi;
60 struct net_device *dev;
61 struct bpf_prog __rcu *xdp_prog;
Toshiaki Makitad1396002018-08-03 16:58:17 +090062 struct xdp_mem_info xdp_mem;
Toshiaki Makita4195e542018-10-11 18:36:49 +090063 struct veth_rq_stats stats;
Toshiaki Makita948d4f22018-08-03 16:58:10 +090064 bool rx_notify_masked;
65 struct ptr_ring xdp_ring;
66 struct xdp_rxq_info xdp_rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070067};
68
Toshiaki Makita638264d2018-08-03 16:58:18 +090069struct veth_priv {
70 struct net_device __rcu *peer;
71 atomic64_t dropped;
72 struct bpf_prog *_xdp_prog;
73 struct veth_rq *rq;
74 unsigned int requested_headroom;
75};
76
Toshiaki Makita9cda7802019-06-13 18:39:59 +090077struct veth_xdp_tx_bq {
78 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
79 unsigned int count;
80};
81
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070082/*
83 * ethtool interface
84 */
85
Toshiaki Makitad397b962018-10-11 18:36:50 +090086struct veth_q_stat_desc {
87 char desc[ETH_GSTRING_LEN];
88 size_t offset;
89};
90
Lorenzo Bianconi65780c52020-03-19 17:41:25 +010091#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
Toshiaki Makitad397b962018-10-11 18:36:50 +090092
93static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
94 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
95 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +010096 { "drops", VETH_RQ_STAT(rx_drops) },
97 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
98 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
99 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
100 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
Toshiaki Makitad397b962018-10-11 18:36:50 +0900101};
102
103#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
104
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100105static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
106 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
107 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
108};
109
110#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
111
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700112static struct {
113 const char string[ETH_GSTRING_LEN];
114} ethtool_stats_keys[] = {
115 { "peer_ifindex" },
116};
117
Philippe Reynes56607b92017-03-29 08:24:21 +0200118static int veth_get_link_ksettings(struct net_device *dev,
119 struct ethtool_link_ksettings *cmd)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700120{
Philippe Reynes56607b92017-03-29 08:24:21 +0200121 cmd->base.speed = SPEED_10000;
122 cmd->base.duplex = DUPLEX_FULL;
123 cmd->base.port = PORT_TP;
124 cmd->base.autoneg = AUTONEG_DISABLE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700125 return 0;
126}
127
128static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
129{
Rick Jones33a5ba12011-11-15 14:59:53 +0000130 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
131 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700132}
133
134static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
135{
Toshiaki Makitad397b962018-10-11 18:36:50 +0900136 char *p = (char *)buf;
137 int i, j;
138
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700139 switch(stringset) {
140 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900141 memcpy(p, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
142 p += sizeof(ethtool_stats_keys);
143 for (i = 0; i < dev->real_num_rx_queues; i++) {
144 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
Florian Fainelliabdf47a2019-02-21 20:09:29 -0800145 snprintf(p, ETH_GSTRING_LEN,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100146 "rx_queue_%u_%.18s",
Toshiaki Makitad397b962018-10-11 18:36:50 +0900147 i, veth_rq_stats_desc[j].desc);
148 p += ETH_GSTRING_LEN;
149 }
150 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100151 for (i = 0; i < dev->real_num_tx_queues; i++) {
152 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
153 snprintf(p, ETH_GSTRING_LEN,
154 "tx_queue_%u_%.18s",
155 i, veth_tq_stats_desc[j].desc);
156 p += ETH_GSTRING_LEN;
157 }
158 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700159 break;
160 }
161}
162
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700163static int veth_get_sset_count(struct net_device *dev, int sset)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700164{
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700165 switch (sset) {
166 case ETH_SS_STATS:
Toshiaki Makitad397b962018-10-11 18:36:50 +0900167 return ARRAY_SIZE(ethtool_stats_keys) +
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100168 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
169 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700170 default:
171 return -EOPNOTSUPP;
172 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700173}
174
175static void veth_get_ethtool_stats(struct net_device *dev,
176 struct ethtool_stats *stats, u64 *data)
177{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100178 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000179 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makitad397b962018-10-11 18:36:50 +0900180 int i, j, idx;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700181
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000182 data[0] = peer ? peer->ifindex : 0;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900183 idx = 1;
184 for (i = 0; i < dev->real_num_rx_queues; i++) {
185 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100186 const void *stats_base = (void *)&rq_stats->vs;
Toshiaki Makitad397b962018-10-11 18:36:50 +0900187 unsigned int start;
188 size_t offset;
189
190 do {
191 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
192 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
193 offset = veth_rq_stats_desc[j].offset;
194 data[idx + j] = *(u64 *)(stats_base + offset);
195 }
196 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
197 idx += VETH_RQ_STATS_LEN;
198 }
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100199
200 if (!peer)
201 return;
202
203 rcv_priv = netdev_priv(peer);
204 for (i = 0; i < peer->real_num_rx_queues; i++) {
205 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
206 const void *base = (void *)&rq_stats->vs;
207 unsigned int start, tx_idx = idx;
208 size_t offset;
209
210 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
211 do {
212 start = u64_stats_fetch_begin_irq(&rq_stats->syncp);
213 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
214 offset = veth_tq_stats_desc[j].offset;
215 data[tx_idx + j] += *(u64 *)(base + offset);
216 }
217 } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start));
218 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700219}
220
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700221static const struct ethtool_ops veth_ethtool_ops = {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700222 .get_drvinfo = veth_get_drvinfo,
223 .get_link = ethtool_op_get_link,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700224 .get_strings = veth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700225 .get_sset_count = veth_get_sset_count,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700226 .get_ethtool_stats = veth_get_ethtool_stats,
Philippe Reynes56607b92017-03-29 08:24:21 +0200227 .get_link_ksettings = veth_get_link_ksettings,
Julian Wiedmann056b21f2019-04-12 13:06:15 +0200228 .get_ts_info = ethtool_op_get_ts_info,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700229};
230
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900231/* general routines */
232
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900233static bool veth_is_xdp_frame(void *ptr)
234{
235 return (unsigned long)ptr & VETH_XDP_FLAG;
236}
237
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700238static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900239{
240 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
241}
242
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700243static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900244{
Maciej Żenczykowskidefcffe2020-08-18 19:00:27 -0700245 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900246}
247
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900248static void veth_ptr_free(void *ptr)
249{
250 if (veth_is_xdp_frame(ptr))
251 xdp_return_frame(veth_ptr_to_xdp(ptr));
252 else
253 kfree_skb(ptr);
254}
255
Toshiaki Makita638264d2018-08-03 16:58:18 +0900256static void __veth_xdp_flush(struct veth_rq *rq)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900257{
258 /* Write ptr_ring before reading rx_notify_masked */
259 smp_mb();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900260 if (!rq->rx_notify_masked) {
261 rq->rx_notify_masked = true;
262 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900263 }
264}
265
Toshiaki Makita638264d2018-08-03 16:58:18 +0900266static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900267{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900268 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900269 dev_kfree_skb_any(skb);
270 return NET_RX_DROP;
271 }
272
273 return NET_RX_SUCCESS;
274}
275
Toshiaki Makita638264d2018-08-03 16:58:18 +0900276static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
277 struct veth_rq *rq, bool xdp)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700278{
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900279 return __dev_forward_skb(dev, skb) ?: xdp ?
Toshiaki Makita638264d2018-08-03 16:58:18 +0900280 veth_xdp_rx(rq, skb) :
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900281 netif_rx(skb);
282}
283
284static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
285{
286 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900287 struct veth_rq *rq = NULL;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000288 struct net_device *rcv;
Eric Dumazet26811282012-12-29 16:02:43 +0000289 int length = skb->len;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900290 bool rcv_xdp = false;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900291 int rxq;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700292
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000293 rcu_read_lock();
294 rcv = rcu_dereference(priv->peer);
295 if (unlikely(!rcv)) {
296 kfree_skb(skb);
297 goto drop;
298 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700299
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900300 rcv_priv = netdev_priv(rcv);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900301 rxq = skb_get_queue_mapping(skb);
302 if (rxq < rcv->real_num_rx_queues) {
303 rq = &rcv_priv->rq[rxq];
304 rcv_xdp = rcu_access_pointer(rq->xdp_prog);
305 if (rcv_xdp)
306 skb_record_rx_queue(skb, rxq);
307 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900308
Michael Walleaa4e6892018-08-29 17:24:11 +0200309 skb_tx_timestamp(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900310 if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
Eric Dumazetb4fba472019-11-07 16:27:17 -0800311 if (!rcv_xdp)
312 dev_lstats_add(dev, length);
Eric Dumazet26811282012-12-29 16:02:43 +0000313 } else {
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000314drop:
Eric Dumazet26811282012-12-29 16:02:43 +0000315 atomic64_inc(&priv->dropped);
316 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900317
318 if (rcv_xdp)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900319 __veth_xdp_flush(rq);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900320
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000321 rcu_read_unlock();
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900322
Patrick McHardy6ed10652009-06-23 06:03:08 +0000323 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700324}
325
Eric Dumazetb4fba472019-11-07 16:27:17 -0800326static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700327{
Eric Dumazetcf05c702011-06-19 22:48:34 -0700328 struct veth_priv *priv = netdev_priv(dev);
David S. Miller11687a12009-06-25 02:45:42 -0700329
Eric Dumazetb4fba472019-11-07 16:27:17 -0800330 dev_lstats_read(dev, packets, bytes);
Eric Dumazet26811282012-12-29 16:02:43 +0000331 return atomic64_read(&priv->dropped);
332}
333
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100334static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
Toshiaki Makita4195e542018-10-11 18:36:49 +0900335{
336 struct veth_priv *priv = netdev_priv(dev);
337 int i;
338
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100339 result->peer_tq_xdp_xmit_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900340 result->xdp_packets = 0;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100341 result->xdp_tx_err = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900342 result->xdp_bytes = 0;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100343 result->rx_drops = 0;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900344 for (i = 0; i < dev->num_rx_queues; i++) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100345 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900346 struct veth_rq_stats *stats = &priv->rq[i].stats;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900347 unsigned int start;
348
349 do {
350 start = u64_stats_fetch_begin_irq(&stats->syncp);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100351 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100352 xdp_tx_err = stats->vs.xdp_tx_err;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100353 packets = stats->vs.xdp_packets;
354 bytes = stats->vs.xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100355 drops = stats->vs.rx_drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900356 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100357 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100358 result->xdp_tx_err += xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900359 result->xdp_packets += packets;
360 result->xdp_bytes += bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100361 result->rx_drops += drops;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900362 }
363}
364
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800365static void veth_get_stats64(struct net_device *dev,
366 struct rtnl_link_stats64 *tot)
Eric Dumazet26811282012-12-29 16:02:43 +0000367{
368 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000369 struct net_device *peer;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100370 struct veth_stats rx;
Eric Dumazetb4fba472019-11-07 16:27:17 -0800371 u64 packets, bytes;
Eric Dumazet26811282012-12-29 16:02:43 +0000372
Eric Dumazetb4fba472019-11-07 16:27:17 -0800373 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
374 tot->tx_bytes = bytes;
375 tot->tx_packets = packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900376
377 veth_stats_rx(&rx, dev);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100378 tot->tx_dropped += rx.xdp_tx_err;
379 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900380 tot->rx_bytes = rx.xdp_bytes;
381 tot->rx_packets = rx.xdp_packets;
Eric Dumazet26811282012-12-29 16:02:43 +0000382
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000383 rcu_read_lock();
384 peer = rcu_dereference(priv->peer);
385 if (peer) {
Jiang Lidonge25d5db2020-03-04 09:49:29 +0800386 veth_stats_tx(peer, &packets, &bytes);
Eric Dumazetb4fba472019-11-07 16:27:17 -0800387 tot->rx_bytes += bytes;
388 tot->rx_packets += packets;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900389
390 veth_stats_rx(&rx, peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100391 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
392 tot->rx_dropped += rx.xdp_tx_err;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900393 tot->tx_bytes += rx.xdp_bytes;
394 tot->tx_packets += rx.xdp_packets;
Eric Dumazetd0e2c552013-01-04 15:42:40 +0000395 }
396 rcu_read_unlock();
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700397}
398
Gao feng5c70ef82013-10-04 16:52:24 +0800399/* fake multicast ability */
400static void veth_set_multicast_list(struct net_device *dev)
401{
402}
403
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900404static struct sk_buff *veth_build_skb(void *head, int headroom, int len,
405 int buflen)
406{
407 struct sk_buff *skb;
408
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900409 skb = build_skb(head, buflen);
410 if (!skb)
411 return NULL;
412
413 skb_reserve(skb, headroom);
414 skb_put(skb, len);
415
416 return skb;
417}
418
Toshiaki Makita638264d2018-08-03 16:58:18 +0900419static int veth_select_rxq(struct net_device *dev)
420{
421 return smp_processor_id() % dev->real_num_rx_queues;
422}
423
Daniel Borkmann9aa12062020-10-11 01:40:02 +0200424static struct net_device *veth_peer_dev(struct net_device *dev)
425{
426 struct veth_priv *priv = netdev_priv(dev);
427
428 /* Callers must be under RCU read side. */
429 return rcu_dereference(priv->peer);
430}
431
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900432static int veth_xdp_xmit(struct net_device *dev, int n,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100433 struct xdp_frame **frames,
434 u32 flags, bool ndo_xmit)
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900435{
436 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100437 int i, ret = -ENXIO, nxmit = 0;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900438 struct net_device *rcv;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100439 unsigned int max_len;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900440 struct veth_rq *rq;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900441
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100442 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
Lorenzo Bianconid99a7c2f2020-03-19 17:41:29 +0100443 return -EINVAL;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900444
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100445 rcu_read_lock();
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900446 rcv = rcu_dereference(priv->peer);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100447 if (unlikely(!rcv))
448 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900449
450 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100451 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900452 /* Non-NULL xdp_prog ensures that xdp_ring is initialized on receive
453 * side. This means an XDP program is loaded on the peer and the peer
454 * device is up.
455 */
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100456 if (!rcu_access_pointer(rq->xdp_prog))
457 goto out;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900458
459 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
460
Toshiaki Makita638264d2018-08-03 16:58:18 +0900461 spin_lock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900462 for (i = 0; i < n; i++) {
463 struct xdp_frame *frame = frames[i];
464 void *ptr = veth_xdp_to_ptr(frame);
465
466 if (unlikely(frame->len > max_len ||
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100467 __ptr_ring_produce(&rq->xdp_ring, ptr)))
468 break;
469 nxmit++;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900470 }
Toshiaki Makita638264d2018-08-03 16:58:18 +0900471 spin_unlock(&rq->xdp_ring.producer_lock);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900472
473 if (flags & XDP_XMIT_FLUSH)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900474 __veth_xdp_flush(rq);
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900475
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100476 ret = nxmit;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100477 if (ndo_xmit) {
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100478 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100479 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
480 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100481 u64_stats_update_end(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100482 }
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100483
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100484out:
John Fastabendb23bfa52020-01-26 16:14:02 -0800485 rcu_read_unlock();
Toshiaki Makita21314792018-10-11 18:36:48 +0900486
487 return ret;
Toshiaki Makitaaf87a3a2018-08-03 16:58:14 +0900488}
489
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100490static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
491 struct xdp_frame **frames, u32 flags)
492{
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100493 int err;
494
495 err = veth_xdp_xmit(dev, n, frames, flags, true);
496 if (err < 0) {
497 struct veth_priv *priv = netdev_priv(dev);
498
499 atomic64_add(n, &priv->dropped);
500 }
501
502 return err;
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100503}
504
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100505static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900506{
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100507 int sent, i, err = 0, drops;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900508
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100509 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900510 if (sent < 0) {
511 err = sent;
512 sent = 0;
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900513 }
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100514
515 for (i = sent; unlikely(i < bq->count); i++)
516 xdp_return_frame(bq->q[i]);
517
518 drops = bq->count - sent;
519 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900520
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100521 u64_stats_update_begin(&rq->stats.syncp);
522 rq->stats.vs.xdp_tx += sent;
Lorenzo Bianconifdc13972021-03-08 12:06:58 +0100523 rq->stats.vs.xdp_tx_err += drops;
Lorenzo Bianconi5fe6e562020-03-26 23:10:20 +0100524 u64_stats_update_end(&rq->stats.syncp);
525
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900526 bq->count = 0;
527}
528
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100529static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900530{
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100531 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900532 struct net_device *rcv;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100533 struct veth_rq *rcv_rq;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900534
535 rcu_read_lock();
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100536 veth_xdp_flush_bq(rq, bq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900537 rcv = rcu_dereference(priv->peer);
538 if (unlikely(!rcv))
539 goto out;
540
541 rcv_priv = netdev_priv(rcv);
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100542 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
Toshiaki Makitad1396002018-08-03 16:58:17 +0900543 /* xdp_ring is initialized on receive side? */
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100544 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
Toshiaki Makitad1396002018-08-03 16:58:17 +0900545 goto out;
546
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100547 __veth_xdp_flush(rcv_rq);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900548out:
549 rcu_read_unlock();
550}
551
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100552static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900553 struct veth_xdp_tx_bq *bq)
Toshiaki Makitad1396002018-08-03 16:58:17 +0900554{
Lorenzo Bianconi1b698fa2020-05-28 22:47:29 +0200555 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900556
557 if (unlikely(!frame))
558 return -EOVERFLOW;
559
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900560 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100561 veth_xdp_flush_bq(rq, bq);
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900562
563 bq->q[bq->count++] = frame;
564
565 return 0;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900566}
567
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100568static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
569 struct xdp_frame *frame,
570 struct veth_xdp_tx_bq *bq,
571 struct veth_stats *stats)
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900572{
Toshiaki Makitad1396002018-08-03 16:58:17 +0900573 struct xdp_frame orig_frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900574 struct bpf_prog *xdp_prog;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900575
576 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900577 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900578 if (likely(xdp_prog)) {
579 struct xdp_buff xdp;
580 u32 act;
581
Lorenzo Bianconifc379872020-05-28 22:47:28 +0200582 xdp_convert_frame_to_buff(frame, &xdp);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900583 xdp.rxq = &rq->xdp_rxq;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900584
585 act = bpf_prog_run_xdp(xdp_prog, &xdp);
586
587 switch (act) {
588 case XDP_PASS:
Lorenzo Bianconi89f479f2021-01-12 19:26:13 +0100589 if (xdp_update_frame_from_buff(&xdp, frame))
590 goto err_xdp;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900591 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900592 case XDP_TX:
593 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900594 xdp.rxq->mem = frame->mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100595 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900596 trace_xdp_exception(rq->dev, xdp_prog, act);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900597 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100598 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900599 goto err_xdp;
600 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100601 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900602 rcu_read_unlock();
603 goto xdp_xmit;
604 case XDP_REDIRECT:
605 orig_frame = *frame;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900606 xdp.rxq->mem = frame->mem;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900607 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
Toshiaki Makitad1396002018-08-03 16:58:17 +0900608 frame = &orig_frame;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100609 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900610 goto err_xdp;
611 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100612 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900613 rcu_read_unlock();
614 goto xdp_xmit;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900615 default:
616 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500617 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900618 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900619 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500620 fallthrough;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900621 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100622 stats->xdp_drops++;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900623 goto err_xdp;
624 }
625 }
626 rcu_read_unlock();
627
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100628 return frame;
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900629err_xdp:
630 rcu_read_unlock();
631 xdp_return_frame(frame);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900632xdp_xmit:
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900633 return NULL;
634}
635
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100636/* frames array contains VETH_XDP_BATCH at most */
637static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
638 int n_xdpf, struct veth_xdp_tx_bq *bq,
639 struct veth_stats *stats)
640{
641 void *skbs[VETH_XDP_BATCH];
642 int i;
643
644 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
645 GFP_ATOMIC | __GFP_ZERO) < 0) {
646 for (i = 0; i < n_xdpf; i++)
647 xdp_return_frame(frames[i]);
648 stats->rx_drops += n_xdpf;
649
650 return;
651 }
652
653 for (i = 0; i < n_xdpf; i++) {
654 struct sk_buff *skb = skbs[i];
655
656 skb = __xdp_build_skb_from_frame(frames[i], skb,
657 rq->dev);
658 if (!skb) {
659 xdp_return_frame(frames[i]);
660 stats->rx_drops++;
661 continue;
662 }
663 napi_gro_receive(&rq->xdp_napi, skb);
664 }
665}
666
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100667static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
668 struct sk_buff *skb,
669 struct veth_xdp_tx_bq *bq,
670 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900671{
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100672 u32 pktlen, headroom, act, metalen, frame_sz;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900673 void *orig_data, *orig_data_end;
674 struct bpf_prog *xdp_prog;
675 int mac_len, delta, off;
676 struct xdp_buff xdp;
677
Toshiaki Makita4bf9ffa2018-09-14 13:33:44 +0900678 skb_orphan(skb);
679
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900680 rcu_read_lock();
Toshiaki Makita638264d2018-08-03 16:58:18 +0900681 xdp_prog = rcu_dereference(rq->xdp_prog);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900682 if (unlikely(!xdp_prog)) {
683 rcu_read_unlock();
684 goto out;
685 }
686
687 mac_len = skb->data - skb_mac_header(skb);
688 pktlen = skb->len + mac_len;
689 headroom = skb_headroom(skb) - mac_len;
690
691 if (skb_shared(skb) || skb_head_is_locked(skb) ||
692 skb_is_nonlinear(skb) || headroom < XDP_PACKET_HEADROOM) {
693 struct sk_buff *nskb;
694 int size, head_off;
695 void *head, *start;
696 struct page *page;
697
698 size = SKB_DATA_ALIGN(VETH_XDP_HEADROOM + pktlen) +
699 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
700 if (size > PAGE_SIZE)
701 goto drop;
702
703 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
704 if (!page)
705 goto drop;
706
707 head = page_address(page);
708 start = head + VETH_XDP_HEADROOM;
709 if (skb_copy_bits(skb, -mac_len, start, pktlen)) {
710 page_frag_free(head);
711 goto drop;
712 }
713
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200714 nskb = veth_build_skb(head, VETH_XDP_HEADROOM + mac_len,
715 skb->len, PAGE_SIZE);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900716 if (!nskb) {
717 page_frag_free(head);
718 goto drop;
719 }
720
721 skb_copy_header(nskb, skb);
722 head_off = skb_headroom(nskb) - skb_headroom(skb);
723 skb_headers_offset_update(nskb, head_off);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900724 consume_skb(skb);
725 skb = nskb;
726 }
727
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200728 /* SKB "head" area always have tailroom for skb_shared_info */
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100729 frame_sz = skb_end_pointer(skb) - skb->head;
Lorenzo Bianconi43b51692020-12-22 22:09:28 +0100730 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
731 xdp_init_buff(&xdp, frame_sz, &rq->xdp_rxq);
Lorenzo Bianconibe9df4a2020-12-22 22:09:29 +0100732 xdp_prepare_buff(&xdp, skb->head, skb->mac_header, pktlen, true);
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200733
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900734 orig_data = xdp.data;
735 orig_data_end = xdp.data_end;
736
737 act = bpf_prog_run_xdp(xdp_prog, &xdp);
738
739 switch (act) {
740 case XDP_PASS:
741 break;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900742 case XDP_TX:
743 get_page(virt_to_page(xdp.data));
744 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900745 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100746 if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900747 trace_xdp_exception(rq->dev, xdp_prog, act);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100748 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900749 goto err_xdp;
750 }
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100751 stats->xdp_tx++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900752 rcu_read_unlock();
753 goto xdp_xmit;
754 case XDP_REDIRECT:
755 get_page(virt_to_page(xdp.data));
756 consume_skb(skb);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900757 xdp.rxq->mem = rq->xdp_mem;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100758 if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
759 stats->rx_drops++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900760 goto err_xdp;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100761 }
762 stats->xdp_redirect++;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900763 rcu_read_unlock();
764 goto xdp_xmit;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900765 default:
766 bpf_warn_invalid_xdp_action(act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500767 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900768 case XDP_ABORTED:
Toshiaki Makita638264d2018-08-03 16:58:18 +0900769 trace_xdp_exception(rq->dev, xdp_prog, act);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500770 fallthrough;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900771 case XDP_DROP:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100772 stats->xdp_drops++;
773 goto xdp_drop;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900774 }
775 rcu_read_unlock();
776
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200777 /* check if bpf_xdp_adjust_head was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900778 delta = orig_data - xdp.data;
779 off = mac_len + delta;
780 if (off > 0)
781 __skb_push(skb, off);
782 else if (off < 0)
783 __skb_pull(skb, -off);
784 skb->mac_header -= delta;
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200785
786 /* check if bpf_xdp_adjust_tail was used */
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900787 off = xdp.data_end - orig_data_end;
788 if (off != 0)
Jesper Dangaard Brouer45a9e6d2020-05-14 12:49:48 +0200789 __skb_put(skb, off); /* positive on grow, negative on shrink */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900790 skb->protocol = eth_type_trans(skb, rq->dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900791
792 metalen = xdp.data - xdp.data_meta;
793 if (metalen)
794 skb_metadata_set(skb, metalen);
795out:
796 return skb;
797drop:
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100798 stats->rx_drops++;
799xdp_drop:
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900800 rcu_read_unlock();
801 kfree_skb(skb);
802 return NULL;
Toshiaki Makitad1396002018-08-03 16:58:17 +0900803err_xdp:
804 rcu_read_unlock();
805 page_frag_free(xdp.data);
806xdp_xmit:
807 return NULL;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900808}
809
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100810static int veth_xdp_rcv(struct veth_rq *rq, int budget,
811 struct veth_xdp_tx_bq *bq,
812 struct veth_stats *stats)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900813{
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100814 int i, done = 0, n_xdpf = 0;
815 void *xdpf[VETH_XDP_BATCH];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900816
817 for (i = 0; i < budget; i++) {
Toshiaki Makita638264d2018-08-03 16:58:18 +0900818 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900819
Toshiaki Makita9fc8d512018-08-03 16:58:13 +0900820 if (!ptr)
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900821 break;
822
Toshiaki Makitad1396002018-08-03 16:58:17 +0900823 if (veth_is_xdp_frame(ptr)) {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100824 /* ndo_xdp_xmit */
Toshiaki Makita4195e542018-10-11 18:36:49 +0900825 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
826
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100827 stats->xdp_bytes += frame->len;
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100828 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
829 if (frame) {
830 /* XDP_PASS */
831 xdpf[n_xdpf++] = frame;
832 if (n_xdpf == VETH_XDP_BATCH) {
833 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
834 bq, stats);
835 n_xdpf = 0;
836 }
837 }
Toshiaki Makitad1396002018-08-03 16:58:17 +0900838 } else {
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100839 /* ndo_start_xmit */
840 struct sk_buff *skb = ptr;
841
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100842 stats->xdp_bytes += skb->len;
843 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100844 if (skb)
845 napi_gro_receive(&rq->xdp_napi, skb);
Toshiaki Makitad1396002018-08-03 16:58:17 +0900846 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900847 done++;
848 }
849
Lorenzo Bianconi65e6dcf2021-01-29 23:04:08 +0100850 if (n_xdpf)
851 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
852
Toshiaki Makita4195e542018-10-11 18:36:49 +0900853 u64_stats_update_begin(&rq->stats.syncp);
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +0100854 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100855 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
Lorenzo Bianconi66fe4a02020-03-19 17:41:27 +0100856 rq->stats.vs.xdp_drops += stats->xdp_drops;
857 rq->stats.vs.rx_drops += stats->rx_drops;
Lorenzo Bianconi65780c52020-03-19 17:41:25 +0100858 rq->stats.vs.xdp_packets += done;
Toshiaki Makita4195e542018-10-11 18:36:49 +0900859 u64_stats_update_end(&rq->stats.syncp);
860
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900861 return done;
862}
863
864static int veth_poll(struct napi_struct *napi, int budget)
865{
Toshiaki Makita638264d2018-08-03 16:58:18 +0900866 struct veth_rq *rq =
867 container_of(napi, struct veth_rq, xdp_napi);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100868 struct veth_stats stats = {};
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900869 struct veth_xdp_tx_bq bq;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900870 int done;
871
Toshiaki Makita9cda7802019-06-13 18:39:59 +0900872 bq.count = 0;
873
Toshiaki Makitad1396002018-08-03 16:58:17 +0900874 xdp_set_return_frame_no_direct();
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100875 done = veth_xdp_rcv(rq, budget, &bq, &stats);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900876
877 if (done < budget && napi_complete_done(napi, done)) {
878 /* Write rx_notify_masked before reading ptr_ring */
Toshiaki Makita638264d2018-08-03 16:58:18 +0900879 smp_store_mb(rq->rx_notify_masked, false);
880 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
881 rq->rx_notify_masked = true;
882 napi_schedule(&rq->xdp_napi);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900883 }
884 }
885
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100886 if (stats.xdp_tx > 0)
Lorenzo Bianconibd32aa12020-03-26 23:10:19 +0100887 veth_xdp_flush(rq, &bq);
Lorenzo Bianconi1c5b82e52020-03-19 17:41:26 +0100888 if (stats.xdp_redirect > 0)
Toke Høiland-Jørgensen1d233882020-01-16 16:14:45 +0100889 xdp_do_flush();
Toshiaki Makitad1396002018-08-03 16:58:17 +0900890 xdp_clear_return_frame_no_direct();
891
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900892 return done;
893}
894
895static int veth_napi_add(struct net_device *dev)
896{
897 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900898 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900899
Toshiaki Makita638264d2018-08-03 16:58:18 +0900900 for (i = 0; i < dev->real_num_rx_queues; i++) {
901 struct veth_rq *rq = &priv->rq[i];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900902
Toshiaki Makita638264d2018-08-03 16:58:18 +0900903 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
904 if (err)
905 goto err_xdp_ring;
906 }
907
908 for (i = 0; i < dev->real_num_rx_queues; i++) {
909 struct veth_rq *rq = &priv->rq[i];
910
Toshiaki Makita638264d2018-08-03 16:58:18 +0900911 napi_enable(&rq->xdp_napi);
912 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900913
914 return 0;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900915err_xdp_ring:
916 for (i--; i >= 0; i--)
917 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
918
919 return err;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900920}
921
922static void veth_napi_del(struct net_device *dev)
923{
924 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900925 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900926
Toshiaki Makita638264d2018-08-03 16:58:18 +0900927 for (i = 0; i < dev->real_num_rx_queues; i++) {
928 struct veth_rq *rq = &priv->rq[i];
929
930 napi_disable(&rq->xdp_napi);
Jakub Kicinski5198d5452020-09-09 10:37:51 -0700931 __netif_napi_del(&rq->xdp_napi);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900932 }
933 synchronize_net();
934
935 for (i = 0; i < dev->real_num_rx_queues; i++) {
936 struct veth_rq *rq = &priv->rq[i];
937
Toshiaki Makita638264d2018-08-03 16:58:18 +0900938 rq->rx_notify_masked = false;
939 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
940 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900941}
942
943static int veth_enable_xdp(struct net_device *dev)
944{
945 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900946 int err, i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900947
Toshiaki Makita638264d2018-08-03 16:58:18 +0900948 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
949 for (i = 0; i < dev->real_num_rx_queues; i++) {
950 struct veth_rq *rq = &priv->rq[i];
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900951
Björn Töpelb02e5a02020-11-30 19:52:01 +0100952 netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
953 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900954 if (err < 0)
955 goto err_rxq_reg;
956
957 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
958 MEM_TYPE_PAGE_SHARED,
959 NULL);
960 if (err < 0)
961 goto err_reg_mem;
962
963 /* Save original mem info as it can be overwritten */
964 rq->xdp_mem = rq->xdp_rxq.mem;
965 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900966
967 err = veth_napi_add(dev);
968 if (err)
Toshiaki Makita638264d2018-08-03 16:58:18 +0900969 goto err_rxq_reg;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900970 }
971
Toshiaki Makita638264d2018-08-03 16:58:18 +0900972 for (i = 0; i < dev->real_num_rx_queues; i++)
973 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900974
975 return 0;
Toshiaki Makita638264d2018-08-03 16:58:18 +0900976err_reg_mem:
977 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
978err_rxq_reg:
Björn Töpelb02e5a02020-11-30 19:52:01 +0100979 for (i--; i >= 0; i--) {
980 struct veth_rq *rq = &priv->rq[i];
981
982 xdp_rxq_info_unreg(&rq->xdp_rxq);
983 netif_napi_del(&rq->xdp_napi);
984 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900985
986 return err;
987}
988
989static void veth_disable_xdp(struct net_device *dev)
990{
991 struct veth_priv *priv = netdev_priv(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900992 int i;
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900993
Toshiaki Makita638264d2018-08-03 16:58:18 +0900994 for (i = 0; i < dev->real_num_rx_queues; i++)
995 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
Toshiaki Makita948d4f22018-08-03 16:58:10 +0900996 veth_napi_del(dev);
Toshiaki Makita638264d2018-08-03 16:58:18 +0900997 for (i = 0; i < dev->real_num_rx_queues; i++) {
998 struct veth_rq *rq = &priv->rq[i];
999
1000 rq->xdp_rxq.mem = rq->xdp_mem;
1001 xdp_rxq_info_unreg(&rq->xdp_rxq);
1002 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001003}
1004
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001005static int veth_open(struct net_device *dev)
1006{
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001007 struct veth_priv *priv = netdev_priv(dev);
1008 struct net_device *peer = rtnl_dereference(priv->peer);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001009 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001010
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001011 if (!peer)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001012 return -ENOTCONN;
1013
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001014 if (priv->_xdp_prog) {
1015 err = veth_enable_xdp(dev);
1016 if (err)
1017 return err;
1018 }
1019
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001020 if (peer->flags & IFF_UP) {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001021 netif_carrier_on(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001022 netif_carrier_on(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001023 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001024
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001025 return 0;
1026}
1027
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001028static int veth_close(struct net_device *dev)
1029{
1030 struct veth_priv *priv = netdev_priv(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001031 struct net_device *peer = rtnl_dereference(priv->peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001032
1033 netif_carrier_off(dev);
Eric Dumazet2efd32e2013-01-10 08:32:45 +00001034 if (peer)
1035 netif_carrier_off(peer);
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001036
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001037 if (priv->_xdp_prog)
1038 veth_disable_xdp(dev);
1039
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001040 return 0;
1041}
1042
Jarod Wilson91572082016-10-20 13:55:20 -04001043static int is_valid_veth_mtu(int mtu)
Eric Biederman38d40812009-03-03 23:36:04 -08001044{
Jarod Wilson91572082016-10-20 13:55:20 -04001045 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
Eric Biederman38d40812009-03-03 23:36:04 -08001046}
1047
Toshiaki Makita7797b932018-08-15 17:07:29 +09001048static int veth_alloc_queues(struct net_device *dev)
1049{
1050 struct veth_priv *priv = netdev_priv(dev);
1051 int i;
1052
1053 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
1054 if (!priv->rq)
1055 return -ENOMEM;
1056
Toshiaki Makita4195e542018-10-11 18:36:49 +09001057 for (i = 0; i < dev->num_rx_queues; i++) {
Toshiaki Makita7797b932018-08-15 17:07:29 +09001058 priv->rq[i].dev = dev;
Toshiaki Makita4195e542018-10-11 18:36:49 +09001059 u64_stats_init(&priv->rq[i].stats.syncp);
1060 }
Toshiaki Makita7797b932018-08-15 17:07:29 +09001061
1062 return 0;
1063}
1064
1065static void veth_free_queues(struct net_device *dev)
1066{
1067 struct veth_priv *priv = netdev_priv(dev);
1068
1069 kfree(priv->rq);
1070}
1071
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001072static int veth_dev_init(struct net_device *dev)
1073{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001074 int err;
1075
Li RongQing14d73412018-09-17 18:46:55 +08001076 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1077 if (!dev->lstats)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001078 return -ENOMEM;
Toshiaki Makita7797b932018-08-15 17:07:29 +09001079
1080 err = veth_alloc_queues(dev);
1081 if (err) {
Li RongQing14d73412018-09-17 18:46:55 +08001082 free_percpu(dev->lstats);
Toshiaki Makita7797b932018-08-15 17:07:29 +09001083 return err;
1084 }
1085
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001086 return 0;
1087}
1088
David S. Miller11687a12009-06-25 02:45:42 -07001089static void veth_dev_free(struct net_device *dev)
1090{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001091 veth_free_queues(dev);
Li RongQing14d73412018-09-17 18:46:55 +08001092 free_percpu(dev->lstats);
David S. Miller11687a12009-06-25 02:45:42 -07001093}
1094
WANG Congbb446c12014-06-23 15:36:02 -07001095#ifdef CONFIG_NET_POLL_CONTROLLER
1096static void veth_poll_controller(struct net_device *dev)
1097{
1098 /* veth only receives frames when its peer sends one
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001099 * Since it has nothing to do with disabling irqs, we are guaranteed
WANG Congbb446c12014-06-23 15:36:02 -07001100 * never to have pending data when we poll for it so
1101 * there is nothing to do here.
1102 *
1103 * We need this though so netpoll recognizes us as an interface that
1104 * supports polling, which enables bridge devices in virt setups to
1105 * still use netconsole
1106 */
1107}
1108#endif /* CONFIG_NET_POLL_CONTROLLER */
1109
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001110static int veth_get_iflink(const struct net_device *dev)
1111{
1112 struct veth_priv *priv = netdev_priv(dev);
1113 struct net_device *peer;
1114 int iflink;
1115
1116 rcu_read_lock();
1117 peer = rcu_dereference(priv->peer);
1118 iflink = peer ? peer->ifindex : 0;
1119 rcu_read_unlock();
1120
1121 return iflink;
1122}
1123
Toshiaki Makitadc224822018-08-03 16:58:11 +09001124static netdev_features_t veth_fix_features(struct net_device *dev,
1125 netdev_features_t features)
1126{
1127 struct veth_priv *priv = netdev_priv(dev);
1128 struct net_device *peer;
1129
1130 peer = rtnl_dereference(priv->peer);
1131 if (peer) {
1132 struct veth_priv *peer_priv = netdev_priv(peer);
1133
1134 if (peer_priv->_xdp_prog)
1135 features &= ~NETIF_F_GSO_SOFTWARE;
1136 }
1137
1138 return features;
1139}
1140
Paolo Abeni163e5292016-02-26 10:45:41 +01001141static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1142{
1143 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1144 struct net_device *peer;
1145
1146 if (new_hr < 0)
1147 new_hr = 0;
1148
1149 rcu_read_lock();
1150 peer = rcu_dereference(priv->peer);
1151 if (unlikely(!peer))
1152 goto out;
1153
1154 peer_priv = netdev_priv(peer);
1155 priv->requested_headroom = new_hr;
1156 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1157 dev->needed_headroom = new_hr;
1158 peer->needed_headroom = new_hr;
1159
1160out:
1161 rcu_read_unlock();
1162}
1163
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001164static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1165 struct netlink_ext_ack *extack)
1166{
1167 struct veth_priv *priv = netdev_priv(dev);
1168 struct bpf_prog *old_prog;
1169 struct net_device *peer;
Toshiaki Makitadc224822018-08-03 16:58:11 +09001170 unsigned int max_mtu;
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001171 int err;
1172
1173 old_prog = priv->_xdp_prog;
1174 priv->_xdp_prog = prog;
1175 peer = rtnl_dereference(priv->peer);
1176
1177 if (prog) {
1178 if (!peer) {
1179 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1180 err = -ENOTCONN;
1181 goto err;
1182 }
1183
Toshiaki Makitadc224822018-08-03 16:58:11 +09001184 max_mtu = PAGE_SIZE - VETH_XDP_HEADROOM -
1185 peer->hard_header_len -
1186 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1187 if (peer->mtu > max_mtu) {
1188 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1189 err = -ERANGE;
1190 goto err;
1191 }
1192
Toshiaki Makita638264d2018-08-03 16:58:18 +09001193 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1194 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1195 err = -ENOSPC;
1196 goto err;
1197 }
1198
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001199 if (dev->flags & IFF_UP) {
1200 err = veth_enable_xdp(dev);
1201 if (err) {
1202 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1203 goto err;
1204 }
1205 }
Toshiaki Makitadc224822018-08-03 16:58:11 +09001206
1207 if (!old_prog) {
1208 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1209 peer->max_mtu = max_mtu;
1210 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001211 }
1212
1213 if (old_prog) {
Toshiaki Makitadc224822018-08-03 16:58:11 +09001214 if (!prog) {
1215 if (dev->flags & IFF_UP)
1216 veth_disable_xdp(dev);
1217
1218 if (peer) {
1219 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1220 peer->max_mtu = ETH_MAX_MTU;
1221 }
1222 }
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001223 bpf_prog_put(old_prog);
1224 }
1225
Toshiaki Makitadc224822018-08-03 16:58:11 +09001226 if ((!!old_prog ^ !!prog) && peer)
1227 netdev_update_features(peer);
1228
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001229 return 0;
1230err:
1231 priv->_xdp_prog = old_prog;
1232
1233 return err;
1234}
1235
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001236static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1237{
1238 switch (xdp->command) {
1239 case XDP_SETUP_PROG:
1240 return veth_xdp_set(dev, xdp->prog, xdp->extack);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001241 default:
1242 return -EINVAL;
1243 }
1244}
1245
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001246static const struct net_device_ops veth_netdev_ops = {
Daniel Lezcanoee923622009-02-22 00:04:45 -08001247 .ndo_init = veth_dev_init,
1248 .ndo_open = veth_open,
Eric W. Biederman2cf48a12009-02-25 19:47:29 +00001249 .ndo_stop = veth_close,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001250 .ndo_start_xmit = veth_xmit,
stephen hemminger6311cc42011-06-08 14:53:59 +00001251 .ndo_get_stats64 = veth_get_stats64,
Gao feng5c70ef82013-10-04 16:52:24 +08001252 .ndo_set_rx_mode = veth_set_multicast_list,
Daniel Lezcanoee923622009-02-22 00:04:45 -08001253 .ndo_set_mac_address = eth_mac_addr,
WANG Congbb446c12014-06-23 15:36:02 -07001254#ifdef CONFIG_NET_POLL_CONTROLLER
1255 .ndo_poll_controller = veth_poll_controller,
1256#endif
Nicolas Dichtela45253b2015-04-02 17:07:11 +02001257 .ndo_get_iflink = veth_get_iflink,
Toshiaki Makitadc224822018-08-03 16:58:11 +09001258 .ndo_fix_features = veth_fix_features,
Toshiaki Makita1a04a822015-07-31 15:03:25 +09001259 .ndo_features_check = passthru_features_check,
Paolo Abeni163e5292016-02-26 10:45:41 +01001260 .ndo_set_rx_headroom = veth_set_rx_headroom,
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001261 .ndo_bpf = veth_xdp,
Lorenzo Bianconi9152cff2020-03-19 17:41:28 +01001262 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
Daniel Borkmann9aa12062020-10-11 01:40:02 +02001263 .ndo_get_peer_dev = veth_peer_dev,
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001264};
1265
Alexander Duyck732912d72016-04-19 14:02:26 -04001266#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
Xin Longc80fafb2016-08-25 13:21:49 +08001267 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
Alexander Duyck732912d72016-04-19 14:02:26 -04001268 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +00001269 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1270 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
Eric Dumazet80933152012-12-29 16:26:10 +00001271
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001272static void veth_setup(struct net_device *dev)
1273{
1274 ether_setup(dev);
1275
Neil Horman550fd082011-07-26 06:05:38 +00001276 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
Hannes Frederic Sowa23ea5a92012-10-30 16:22:01 +00001277 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Phil Sutter02f01ec2015-08-18 10:30:29 +02001278 dev->priv_flags |= IFF_NO_QUEUE;
Paolo Abeni163e5292016-02-26 10:45:41 +01001279 dev->priv_flags |= IFF_PHONY_HEADROOM;
Neil Horman550fd082011-07-26 06:05:38 +00001280
Stephen Hemminger4456e7b2008-11-19 21:50:10 -08001281 dev->netdev_ops = &veth_netdev_ops;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001282 dev->ethtool_ops = &veth_ethtool_ops;
1283 dev->features |= NETIF_F_LLTX;
Eric Dumazet80933152012-12-29 16:26:10 +00001284 dev->features |= VETH_FEATURES;
Toshiaki Makita8d0d21f2014-02-18 21:20:08 +09001285 dev->vlan_features = dev->features &
Vlad Yasevich3f8c7072014-03-27 22:14:48 -04001286 ~(NETIF_F_HW_VLAN_CTAG_TX |
1287 NETIF_F_HW_VLAN_STAG_TX |
1288 NETIF_F_HW_VLAN_CTAG_RX |
1289 NETIF_F_HW_VLAN_STAG_RX);
David S. Millercf124db2017-05-08 12:52:56 -04001290 dev->needs_free_netdev = true;
1291 dev->priv_destructor = veth_dev_free;
Jarod Wilson91572082016-10-20 13:55:20 -04001292 dev->max_mtu = ETH_MAX_MTU;
Michał Mirosława2c725f2011-03-31 01:01:35 +00001293
Eric Dumazet80933152012-12-29 16:26:10 +00001294 dev->hw_features = VETH_FEATURES;
Eric Dumazet82d81892013-10-25 18:25:03 -07001295 dev->hw_enc_features = VETH_FEATURES;
David Ahern607fca92016-08-24 20:10:45 -07001296 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001297}
1298
1299/*
1300 * netlink interface
1301 */
1302
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001303static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1304 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001305{
1306 if (tb[IFLA_ADDRESS]) {
1307 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1308 return -EINVAL;
1309 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1310 return -EADDRNOTAVAIL;
1311 }
Eric Biederman38d40812009-03-03 23:36:04 -08001312 if (tb[IFLA_MTU]) {
1313 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1314 return -EINVAL;
1315 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001316 return 0;
1317}
1318
1319static struct rtnl_link_ops veth_link_ops;
1320
Eric W. Biederman81adee42009-11-08 00:53:51 -08001321static int veth_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +02001322 struct nlattr *tb[], struct nlattr *data[],
1323 struct netlink_ext_ack *extack)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001324{
Toshiaki Makita7797b932018-08-15 17:07:29 +09001325 int err;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001326 struct net_device *peer;
1327 struct veth_priv *priv;
1328 char ifname[IFNAMSIZ];
1329 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
Tom Gundersen55177502014-07-14 16:37:25 +02001330 unsigned char name_assign_type;
Patrick McHardy3729d502010-02-26 06:34:54 +00001331 struct ifinfomsg *ifmp;
Eric W. Biederman81adee42009-11-08 00:53:51 -08001332 struct net *net;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001333
1334 /*
1335 * create and register peer first
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001336 */
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001337 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1338 struct nlattr *nla_peer;
1339
1340 nla_peer = data[VETH_INFO_PEER];
Patrick McHardy3729d502010-02-26 06:34:54 +00001341 ifmp = nla_data(nla_peer);
Jiri Pirkof7b12602014-02-18 20:53:18 +01001342 err = rtnl_nla_parse_ifla(peer_tb,
1343 nla_data(nla_peer) + sizeof(struct ifinfomsg),
Johannes Bergfceb6432017-04-12 14:34:07 +02001344 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1345 NULL);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001346 if (err < 0)
1347 return err;
1348
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001349 err = veth_validate(peer_tb, NULL, extack);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001350 if (err < 0)
1351 return err;
1352
1353 tbp = peer_tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001354 } else {
1355 ifmp = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001356 tbp = tb;
Patrick McHardy3729d502010-02-26 06:34:54 +00001357 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001358
Serhey Popovych191cdb32017-06-21 12:12:24 +03001359 if (ifmp && tbp[IFLA_IFNAME]) {
Francis Laniel872f6902020-11-15 18:08:06 +01001360 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
Tom Gundersen55177502014-07-14 16:37:25 +02001361 name_assign_type = NET_NAME_USER;
1362 } else {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001363 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
Tom Gundersen55177502014-07-14 16:37:25 +02001364 name_assign_type = NET_NAME_ENUM;
1365 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001366
Eric W. Biederman81adee42009-11-08 00:53:51 -08001367 net = rtnl_link_get_net(src_net, tbp);
1368 if (IS_ERR(net))
1369 return PTR_ERR(net);
1370
Tom Gundersen55177502014-07-14 16:37:25 +02001371 peer = rtnl_create_link(net, ifname, name_assign_type,
David Ahernd0522f12018-11-06 12:51:14 -08001372 &veth_link_ops, tbp, extack);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001373 if (IS_ERR(peer)) {
1374 put_net(net);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001375 return PTR_ERR(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001376 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001377
Serhey Popovych191cdb32017-06-21 12:12:24 +03001378 if (!ifmp || !tbp[IFLA_ADDRESS])
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001379 eth_hw_addr_random(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001380
Pavel Emelyanove6f8f1a2012-08-08 21:53:03 +00001381 if (ifmp && (dev->ifindex != 0))
1382 peer->ifindex = ifmp->ifi_index;
1383
Stephen Hemminger72d249552017-12-07 15:40:20 -08001384 peer->gso_max_size = dev->gso_max_size;
1385 peer->gso_max_segs = dev->gso_max_segs;
1386
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001387 err = register_netdevice(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -08001388 put_net(net);
1389 net = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001390 if (err < 0)
1391 goto err_register_peer;
1392
1393 netif_carrier_off(peer);
1394
Patrick McHardy3729d502010-02-26 06:34:54 +00001395 err = rtnl_configure_link(peer, ifmp);
1396 if (err < 0)
1397 goto err_configure_peer;
1398
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001399 /*
1400 * register dev last
1401 *
1402 * note, that since we've registered new device the dev's name
1403 * should be re-allocated
1404 */
1405
1406 if (tb[IFLA_ADDRESS] == NULL)
Danny Kukawkaf2cedb62012-02-15 06:45:39 +00001407 eth_hw_addr_random(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001408
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001409 if (tb[IFLA_IFNAME])
Francis Laniel872f6902020-11-15 18:08:06 +01001410 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
Jiri Pirko6c8c4442011-04-30 01:28:17 +00001411 else
1412 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1413
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001414 err = register_netdevice(dev);
1415 if (err < 0)
1416 goto err_register_dev;
1417
1418 netif_carrier_off(dev);
1419
1420 /*
1421 * tie the deviced together
1422 */
1423
1424 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001425 rcu_assign_pointer(priv->peer, peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001426
1427 priv = netdev_priv(peer);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001428 rcu_assign_pointer(priv->peer, dev);
Toshiaki Makita948d4f22018-08-03 16:58:10 +09001429
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001430 return 0;
1431
1432err_register_dev:
1433 /* nothing to do */
Patrick McHardy3729d502010-02-26 06:34:54 +00001434err_configure_peer:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001435 unregister_netdevice(peer);
1436 return err;
1437
1438err_register_peer:
1439 free_netdev(peer);
1440 return err;
1441}
1442
Eric Dumazet23289a32009-10-27 07:06:36 +00001443static void veth_dellink(struct net_device *dev, struct list_head *head)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001444{
1445 struct veth_priv *priv;
1446 struct net_device *peer;
1447
1448 priv = netdev_priv(dev);
Eric Dumazetd0e2c552013-01-04 15:42:40 +00001449 peer = rtnl_dereference(priv->peer);
1450
1451 /* Note : dellink() is called from default_device_exit_batch(),
1452 * before a rcu_synchronize() point. The devices are guaranteed
1453 * not being freed before one RCU grace period.
1454 */
1455 RCU_INIT_POINTER(priv->peer, NULL);
Eric Dumazet24540532009-10-30 01:00:27 -07001456 unregister_netdevice_queue(dev, head);
Eric Dumazetf45a5c22013-02-08 20:10:49 +00001457
1458 if (peer) {
1459 priv = netdev_priv(peer);
1460 RCU_INIT_POINTER(priv->peer, NULL);
1461 unregister_netdevice_queue(peer, head);
1462 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001463}
1464
Thomas Graf23711432012-02-15 04:09:46 +00001465static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1466 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1467};
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001468
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001469static struct net *veth_get_link_net(const struct net_device *dev)
1470{
1471 struct veth_priv *priv = netdev_priv(dev);
1472 struct net_device *peer = rtnl_dereference(priv->peer);
1473
1474 return peer ? dev_net(peer) : dev_net(dev);
1475}
1476
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001477static struct rtnl_link_ops veth_link_ops = {
1478 .kind = DRV_NAME,
1479 .priv_size = sizeof(struct veth_priv),
1480 .setup = veth_setup,
1481 .validate = veth_validate,
1482 .newlink = veth_newlink,
1483 .dellink = veth_dellink,
1484 .policy = veth_policy,
1485 .maxtype = VETH_INFO_MAX,
Nicolas Dichtele5f4e7b2015-01-20 15:15:46 +01001486 .get_link_net = veth_get_link_net,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001487};
1488
1489/*
1490 * init/fini
1491 */
1492
1493static __init int veth_init(void)
1494{
1495 return rtnl_link_register(&veth_link_ops);
1496}
1497
1498static __exit void veth_exit(void)
1499{
Patrick McHardy68365452008-01-20 17:25:14 -08001500 rtnl_link_unregister(&veth_link_ops);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001501}
1502
1503module_init(veth_init);
1504module_exit(veth_exit);
1505
1506MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1507MODULE_LICENSE("GPL v2");
1508MODULE_ALIAS_RTNL_LINK(DRV_NAME);